From 7fb46860e212a358e9926280f656faa77774be91 Mon Sep 17 00:00:00 2001 From: Jack Gordley Date: Fri, 5 Sep 2025 16:24:51 -0700 Subject: [PATCH 1/4] Adding bedrock agentcore memory store --- .../stores/bedrock_agentcore_memory_store.py | 524 +++++++++++ ...rock_agentcore_memory_store_tutorial.ipynb | 875 ++++++++++++++++++ 2 files changed, 1399 insertions(+) create mode 100644 libs/langgraph-checkpoint-aws/langgraph_checkpoint_aws/stores/bedrock_agentcore_memory_store.py create mode 100644 samples/memory/bedrock_agentcore_memory_store_tutorial.ipynb diff --git a/libs/langgraph-checkpoint-aws/langgraph_checkpoint_aws/stores/bedrock_agentcore_memory_store.py b/libs/langgraph-checkpoint-aws/langgraph_checkpoint_aws/stores/bedrock_agentcore_memory_store.py new file mode 100644 index 00000000..f99b6634 --- /dev/null +++ b/libs/langgraph-checkpoint-aws/langgraph_checkpoint_aws/stores/bedrock_agentcore_memory_store.py @@ -0,0 +1,524 @@ +import logging +from collections.abc import Iterable +from datetime import datetime +from typing import ( + Any, + Dict, + List, + Literal, + NamedTuple, + Union, +) + +from bedrock_agentcore.memory.client import MemoryClient +from bedrock_agentcore.memory.constants import MessageRole +from langchain_core.messages import ( + BaseMessage, +) +from langgraph.store.base import ( + BaseStore, + Item, + SearchItem, + TTLConfig, +) + +logger = logging.getLogger(__name__) + +class AgentCoreRetrieveOp(NamedTuple): + namespace: tuple[str, ...] # (actor_id, session_id) + query: str + top_k: int = 10 + memory_strategy_id: str | None = None + +class AgentCoreListOp(NamedTuple): + namespace: tuple[str, ...] # (actor_id, session_id) + max_results: int = 100 + +class AgentCoreStoreOp(NamedTuple): + """Operation to store a message event.""" + namespace: tuple[str, ...] # (actor_id, session_id) + key: str # event identifier + message: BaseMessage + event_timestamp: datetime | None = None + +class AgentCoreDeleteOp(NamedTuple): + """Operation to delete a memory record.""" + memory_record_id: str + +class AgentCoreGetOp(NamedTuple): + """Operation to get a memory record.""" + memory_record_id: str + +AgentCoreOp = Union[AgentCoreRetrieveOp, AgentCoreListOp, AgentCoreStoreOp, AgentCoreDeleteOp, AgentCoreGetOp] +AgentCoreResult = Union[Item, list[Item], list[SearchItem], list[tuple[str, ...]], None] + +# Define missing constants and types +class NotProvided: + pass + +NOT_PROVIDED = NotProvided() +NamespacePath = tuple[str, ...] + +class BedrockAgentCoreMemoryStore(BaseStore): + """Bedrock AgentCore Memory Store support for storage of chat messages and retrieval of long term memories + + !!! example "Examples" + Storing conversation messages: + memory_client = MemoryClient(region="us-west-2") + store = BedrockAgentCoreMemoryStore(memory_client) + + Stores enable persistence and memory that can be shared across threads, + scoped to user IDs, assistant IDs, or other arbitrary namespaces. + + Note: + This implementation depends on Amazon Bedrock AgentCore Memory to store and process + messages then later retrieve the processed memories through semantic search. An example + would be saving a conversation and then processing async user preferences for later + search in a user preferences namespace. + """ + + supports_ttl: bool = False + ttl_config: TTLConfig | None = None + + __slots__ = ("memory_client", "memory_id") + + def __init__(self, *, memory_id: str, memory_client: MemoryClient) -> None: + + # Bedrock AgentCore Memory Client + self.memory_client: MemoryClient = memory_client + self.memory_id = memory_id + + def batch(self, ops: Iterable[AgentCoreOp]) -> list[AgentCoreResult]: + """Execute a batch of AgentCore operations synchronously.""" + results = [] + + for op in ops: + if isinstance(op, AgentCoreRetrieveOp): + result = self._retrieve_memories(op) + results.append(result) + elif isinstance(op, AgentCoreListOp): + result = self._list_memory_records(op) + results.append(result) + elif isinstance(op, AgentCoreGetOp): + result = self._get_memory_record(op) + results.append(result) + elif isinstance(op, AgentCoreStoreOp): + self._store_message(op) + results.append(None) + elif isinstance(op, AgentCoreDeleteOp): + self._delete_memory_record(op) + results.append(None) + else: + raise ValueError(f"Unknown AgentCore operation type: {type(op)}") + + return results + + async def abatch(self, ops: Iterable[AgentCoreOp]) -> list[AgentCoreResult]: + """Execute a batch of AgentCore operations asynchronously.""" + raise NotImplementedError("The Bedrock AgentCore Memory client does not yet support async operations") + + def get( + self, + namespace: tuple[str, ...], + key: str, + *, + refresh_ttl: bool | None = None, + ) -> Item | None: + """Retrieve a single memory item. + + Args: + namespace: (actor_id, session_id) indicating where the memory is stored + key: Unique identifier for the memory event + refresh_ttl: Not applicable for Bedrock AgentCore Memory + + Returns: + Item with the individual record information retrieved + """ + op = AgentCoreGetOp(memory_record_id=key) + result = self.batch([op])[0] + return result + + + + def search( + self, + namespace_prefix: tuple[str, ...], + /, + *, + query: str | None = None, + filter: dict[str, Any] | None = None, + limit: int = 10, + offset: int = 0, + refresh_ttl: bool | None = None, + # AgentCore-specific parameters + memory_strategy_id: str | None = None, + top_k: int | None = None, + ) -> list[SearchItem]: + """Search for items within a namespace prefix. + + Args: + namespace_prefix: the namespace tuple of which to search (actor_id, session_id) + query: the query to search for in Bedrock AgentCore memory + filter: Not supported by Bedrock AgentCore Memory (will be ignored) + limit: Maximum number of items to return (used as top_k if not specified) + offset: Not supported by Bedrock AgentCore Memory (will be ignored) + refresh_ttl: Not applicable for Bedrock AgentCore Memory + memory_strategy_id (optional): strategy ID to search for + top_k (optional): the maximum number of top-scoring memory records to return + + Returns: + List of items matching the search criteria. + + ???+ example "Examples" + Basic listing of long term memories (no semantic search): + ```python + # List memory records in a namespace + results = store.search(("user-1", "session-1")) + ``` + + Basic semantic searching for long term memories: + ```python + # Search for user preferences for a certain query + results = store.search( + ("user-1", "session-1"), + query="favorite coffeeshops and past orders" + ) + ``` + """ + + if query: + # Use semantic search + op = AgentCoreRetrieveOp( + namespace=namespace_prefix, + query=query, + top_k=top_k or limit, + memory_strategy_id=memory_strategy_id + ) + else: + # Use list operation + op = AgentCoreListOp( + namespace=namespace_prefix, + max_results=limit + ) + + return self.batch([op])[0] or [] + + def put( + self, + namespace: tuple[str, ...], + key: str, + value: dict[str, Any], + index: Literal[False] | list[str] | None = None, + *, + ttl: float | None | NotProvided = NOT_PROVIDED, + ) -> None: + """Store or update a message event in Bedrock AgentCore Memory + + Args: + namespace: a tuple with actor id and session id as the arguments + Example: ("actorId", "sessionId") + key: the event identifier for the memory + value: The message data containing a "message" key with a BaseMessage object + index: Not supported - indexing is handled automatically by Bedrock AgentCore + ttl: Not supported - TTL is handled by Bedrock AgentCore service + + Note: + Async processing of messages in Bedrock AgentCore such as summarization or user + preference abstraction happens automatically in the service. Each message that + is saved here is then processed later. + + ???+ example "Examples" + Store a message. + ```python + from langchain_core.messages import HumanMessage + store.put(("user-1","session-1"), "123", {"message": HumanMessage("My favorite pirate is Blackbeard")}) + ``` + """ + self._validate_namespace(namespace) + + if index is not None and index is not False: + raise NotImplementedError("Custom indexing is handled by the Bedrock AgentCore service itself.") + + if not isinstance(ttl, NotProvided) and ttl is not None: + raise NotImplementedError("TTL is handled by the Bedrock AgentCore service itself.") + + message = value.get("message") + if message is None: + raise ValueError("Value must contain a 'message' key with a BaseMessage object") + + if not isinstance(message, BaseMessage): + raise ValueError("The 'message' value must be a BaseMessage instance") + + op = AgentCoreStoreOp( + namespace=namespace, + key=str(key), + message=message, + event_timestamp=None + ) + + self.batch([op]) + + def delete(self, namespace: tuple[str, ...], key: str) -> None: + """Delete an item. + + Args: + namespace: tuple with (actor_id, session_id) + key: the event_id of the memory to delete + + """ + op = AgentCoreDeleteOp(memory_record_id=key) + self.batch([op]) + + def list_namespaces( + self, + *, + prefix: NamespacePath | None = None, + suffix: NamespacePath | None = None, + max_depth: int | None = None, + limit: int = 100, + offset: int = 0, + ) -> list[tuple[str, ...]]: + """List and filter namespaces in the store""" + raise NotImplementedError("Listing namespaces is not yet implemented for Bedrock AgentCore APIs") + + async def aget( + self, + namespace: tuple[str, ...], + key: str, + *, + refresh_ttl: bool | None = None, + ) -> Item | None: + """Asynchronously retrieve a single memory item. + + Args: + namespace: (actor_id, session_id) indicating where the memory is stored + key: Unique identifier for the memory event + refresh_ttl: Not applicable for Bedrock AgentCore Memory + + Returns: + None - Individual memory retrieval by key is not supported by Bedrock AgentCore. + Use search() with a specific query instead. + + Note: + Bedrock AgentCore Memory is designed for semantic search rather than direct + key-based retrieval. Use the search() method with specific queries to find + relevant memories. + """ + raise NotImplementedError("The Bedrock AgentCore Memory client does not yet support async operations") + + + async def asearch( + self, + namespace_prefix: tuple[str, ...], + /, + *, + query: str | None = None, + filter: dict[str, Any] | None = None, + limit: int = 10, + offset: int = 0, + refresh_ttl: bool | None = None, + # AgentCore-specific parameters + memory_strategy_id: str | None = None, + top_k: int | None = None, + ) -> list[SearchItem]: + """Asynchronously search for memories within a namespace prefix using Bedrock AgentCore.""" + raise NotImplementedError("The Bedrock AgentCore Memory client does not yet support async operations") + + async def aput( + self, + namespace: tuple[str, ...], + key: str, + value: dict[str, Any], + index: Literal[False] | list[str] | None = None, + *, + ttl: float | None | NotProvided = NOT_PROVIDED, + ) -> None: + """Asynchronously store a message event in Bedrock AgentCore Memory.""" + raise NotImplementedError("The Bedrock AgentCore Memory client does not yet support async operations") + + async def adelete(self, namespace: tuple[str, ...], key: str) -> None: + """Asynchronously delete a memory event.""" + raise NotImplementedError("The Bedrock AgentCore Memory client does not yet support async operations") + + async def alist_namespaces( + self, + *, + prefix: NamespacePath | None = None, + suffix: NamespacePath | None = None, + max_depth: int | None = None, + limit: int = 100, + offset: int = 0, + ) -> list[tuple[str, ...]]: + """List and filter namespaces in the store asynchronously.""" + raise NotImplementedError("Listing namespaces is not yet implemented for Bedrock AgentCore APIs") + + def _retrieve_memories(self, op: AgentCoreRetrieveOp) -> list[SearchItem]: + """Retrieve memories using semantic search.""" + namespace_str = self._convert_namespace_tuple_to_str(op.namespace) + + try: + retrieve_params = { + "memory_id": self.memory_id, + "namespace": namespace_str, + "query": op.query, + "top_k": op.top_k + } + + # Add memory_strategy_id if provided + if op.memory_strategy_id is not None: + retrieve_params["memory_strategy_id"] = op.memory_strategy_id + + memories = self.memory_client.retrieve_memories(**retrieve_params) + return self._convert_memories_to_search_items(memories, op.namespace) + + except Exception as e: + logger.error(f"Failed to retrieve memories: {e}") + return [] + + def _list_memory_records(self, op: AgentCoreListOp) -> list[SearchItem]: + """List memory records in a namespace.""" + namespace_str = self._convert_namespace_tuple_to_str(op.namespace) + + response = self.memory_client.list_memory_records( + memoryId=self.memory_id, + namespace=namespace_str, + maxResults=op.max_results + ) + memories = response.get("memoryRecordSummaries", []) + return self._convert_memories_to_search_items(memories, op.namespace) + + + def _get_memory_record(self, op: AgentCoreGetOp) -> Item | None: + """Get a specific long term memory record by ID.""" + + response = self.memory_client.get_memory_record( + memoryId=self.memory_id, + memoryRecordId=op.memory_record_id, + ) + + record = response.get('memoryRecord') + if not record: + return None + + text = record.get('content', {}).get('text', '') + namespaces = record.get('namespaces', []) + + # Parse namespace - take first one and split by '/' + namespace_tuple = tuple(namespaces[0].split('/')) if namespaces else ('', '') + created_at = record.get('createdAt') + + return Item( + key=op.memory_record_id, + namespace=namespace_tuple, + value={"content": text}, + created_at=created_at, + updated_at=created_at + ) + + def _delete_memory_record(self, op: AgentCoreDeleteOp) -> None: + """Get a specific long term memory record by ID.""" + self.memory_client.delete_memory_record( + memoryId=self.memory_id, + memoryRecordId=op.memory_record_id, + ) + + def _store_message(self, op: AgentCoreStoreOp) -> None: + """Store a message event.""" + messages_to_store = convert_langchain_messages_to_event_messages([op.message]) + if not messages_to_store: + logger.warning(f"No valid messages to store for key {op.key}") + return + + self.memory_client.create_event( + memory_id=self.memory_id, + actor_id=op.namespace[0], + session_id=op.namespace[1], + messages=messages_to_store, + event_timestamp=op.event_timestamp + ) + logger.debug(f"Stored message event with key {op.key}") + + def _validate_namespace(self, namespace: tuple[str, ...]) -> None: + """Validate namespace format for Bedrock AgentCore.""" + if not isinstance(namespace, tuple) or len(namespace) != 2: + raise ValueError("Namespace must be a tuple of (actor_id, session_id)") + if not all(isinstance(part, str) and part.strip() for part in namespace): + raise ValueError("Namespace parts must be non-empty strings") + + def _convert_namespace_tuple_to_str(self, namespace_tuple): + return "/" + "/".join(namespace_tuple) + + def _convert_memories_to_search_items(self, memories: list, namespace: tuple[str, ...]) -> list[SearchItem]: + """Convert AgentCore memory records to SearchItem objects.""" + results = [] + + for item in memories: + if isinstance(item, dict): + content = item.get("content", {}) + if isinstance(content, dict): + text = content.get("text", "") + else: + text = str(content) + + score = item.get("score", 0.0) + record_id = item.get("memoryRecordId") or item.get("id") or str(len(results)) + + # Handle datetime parsing + created_at = item.get("createdAt") or item.get("timestamp") + if isinstance(created_at, str): + try: + created_at = datetime.fromisoformat(created_at.replace('Z', '+00:00')) + except (ValueError, AttributeError): + created_at = datetime.now() + elif created_at is None: + created_at = datetime.now() + + result = SearchItem( + namespace=namespace, + key=record_id, + value={"content": text, "metadata": item.get("metadata", {})}, + created_at=created_at, + updated_at=created_at, # memories are not updated + score=float(score) if score is not None else None, + ) + results.append(result) + + return results + +def convert_langchain_messages_to_event_messages( + messages: List[BaseMessage] +) -> List[Dict[str, Any]]: + """Convert LangChain messages to Bedrock Agent Core events + + Args: + messages: List of Langchain messages (BaseMessage) + + Returns: + List of AgentCore event tuples (text, role) + """ + converted_messages = [] + for msg in messages: + # Skip if event already saved + if msg.additional_kwargs.get("event_id") is not None: + continue + + text = msg.text() + if not text.strip(): + continue + + # Map LangChain roles to Bedrock Agent Core roles + if msg.type == "human": + role = MessageRole.USER.value + elif msg.type == "ai": + role = MessageRole.ASSISTANT.value + elif msg.type == "tool": + role = MessageRole.TOOL.value + elif msg.type == "system": + role = MessageRole.OTHER.value + else: + logger.warning(f"Skipping unsupported message type: {msg.type}") + continue + + converted_messages.append((text, role)) + + return converted_messages diff --git a/samples/memory/bedrock_agentcore_memory_store_tutorial.ipynb b/samples/memory/bedrock_agentcore_memory_store_tutorial.ipynb new file mode 100644 index 00000000..e1ebda6d --- /dev/null +++ b/samples/memory/bedrock_agentcore_memory_store_tutorial.ipynb @@ -0,0 +1,875 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Bedrock AgentCore Memory Store Tutorial\n", + "\n", + "This tutorial demonstrates the complete functionality of the BedrockAgentCoreMemoryStore, which provides persistent memory capabilities for LangGraph agents using Amazon Bedrock AgentCore Memory service.\n", + "\n", + "## Prerequisites\n", + "\n", + "1. AWS credentials configured\n", + "2. Bedrock AgentCore Memory resource created and ACTIVE\n", + "3. Required packages installed" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Setup and Configuration" + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "metadata": {}, + "outputs": [], + "source": [ + "import logging\n", + "from datetime import datetime\n", + "from typing import Annotated\n", + "\n", + "from bedrock_agentcore.memory.client import MemoryClient\n", + "from bedrock_agentcore_memory_store import BedrockAgentCoreMemoryStore\n", + "from langchain.chat_models import init_chat_model\n", + "from langchain_core.messages import AIMessage, HumanMessage, SystemMessage, ToolMessage\n", + "from langchain_core.runnables import RunnableConfig\n", + "from langgraph.checkpoint.memory import InMemorySaver\n", + "from langgraph.graph import START, StateGraph\n", + "from langgraph.graph.message import add_messages\n", + "from langgraph.store.base import BaseStore\n", + "from typing_extensions import TypedDict\n", + "\n", + "# Configure logging\n", + "logging.basicConfig(level=logging.INFO)\n", + "logger = logging.getLogger(__name__)" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "metadata": {}, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "INFO:botocore.credentials:Found credentials in environment variables.\n", + "INFO:bedrock_agentcore.memory.client:Initialized MemoryClient for control plane: us-west-2, data plane: us-west-2\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Memory store initialized with ID: MEMORY_ID\n", + "Supports TTL: False\n" + ] + } + ], + "source": [ + "# Configuration - Update these values\n", + "REGION = \"us-west-2\"\n", + "MEMORY_ID = \"MEMORY_ID\" # Replace with your actual memory ID\n", + "\n", + "# Initialize memory client and store\n", + "memory_client = MemoryClient(REGION)\n", + "store = BedrockAgentCoreMemoryStore(memory_client=memory_client, memory_id=MEMORY_ID)\n", + "\n", + "print(f\"Memory store initialized with ID: {MEMORY_ID}\")\n", + "print(f\"Supports TTL: {store.supports_ttl}\")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Core Store Operations\n", + "\n", + "### 1. Storing Messages (put operation)" + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "metadata": {}, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "INFO:bedrock_agentcore.memory.client:Created event: 0000001757113343000#0f3ea8da\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "✅ Stored human message with key: msg-1\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "INFO:bedrock_agentcore.memory.client:Created event: 0000001757113344000#b3cf6a09\n", + "INFO:bedrock_agentcore.memory.client:Created event: 0000001757113345000#866aac05\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "✅ Stored ai message with key: msg-2\n", + "✅ Stored human message with key: msg-3\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "INFO:bedrock_agentcore.memory.client:Created event: 0000001757113345000#abb0945d\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "✅ Stored ai message with key: msg-4\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "ERROR:bedrock_agentcore.memory.client:Failed to create event: An error occurred (ThrottledException) when calling the CreateEvent operation (reached max retries: 4): Rate exceeded.\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "❌ Failed to store message msg-5: An error occurred (ThrottledException) when calling the CreateEvent operation (reached max retries: 4): Rate exceeded.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "INFO:bedrock_agentcore.memory.client:Created event: 0000001757113356000#175223c3\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "✅ Stored system message with key: msg-6\n" + ] + } + ], + "source": [ + "# Define namespace (actor_id, session_id)\n", + "namespace = (\"user-123\", \"session-456\")\n", + "\n", + "# Store different types of messages\n", + "messages_to_store = [\n", + " (\"msg-1\", HumanMessage(\"I love playing soccer and my favorite team is Barcelona\")),\n", + " (\"msg-2\", AIMessage(\"That's great! Barcelona has a rich history in football.\")),\n", + " (\"msg-3\", HumanMessage(\"My name is John and I'm a software engineer\")),\n", + " (\"msg-4\", AIMessage(\"Nice to meet you John! Software engineering is a fascinating field.\")),\n", + " (\"msg-5\", HumanMessage(\"I prefer Python for backend development\")),\n", + " (\"msg-6\", SystemMessage(\"User preferences updated: Python, Backend Development\")),\n", + "]\n", + "\n", + "for key, message in messages_to_store:\n", + " try:\n", + " store.put(namespace, key, {\"message\": message})\n", + " print(f\"✅ Stored {message.type} message with key: {key}\")\n", + " except Exception as e:\n", + " print(f\"❌ Failed to store message {key}: {e}\")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### 2. Searching and Retrieving Memories\n", + "\n", + "Wait for messages to be processed by AgentCore (typically 30-60 seconds)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "import time\n", + "\n", + "print(\"Waiting 45 seconds for AgentCore to process messages...\")\n", + "time.sleep(45)\n", + "print(\"Processing complete!\")" + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "=== Basic Listing ===\n", + "Found 1 memory records\n", + "\n", + "Record 1:\n", + " Key: mem-e2facd2a22ded39a2e94e522b857bfaba224\n", + " Content: \n", + " \n", + " The user's name is John, and he works as a...\n", + " Created: 2025-09-05 16:02:36-07:00\n" + ] + } + ], + "source": [ + "# Search namespace for processed memories\n", + "search_namespace = (\"summaries\", \"user-123\", \"session-456\")\n", + "\n", + "# Basic listing (no semantic search)\n", + "print(\"=== Basic Listing ===\")\n", + "try:\n", + " results = store.search(search_namespace, limit=10)\n", + " print(f\"Found {len(results)} memory records\")\n", + " \n", + " for i, item in enumerate(results[:3]):\n", + " print(f\"\\nRecord {i+1}:\")\n", + " print(f\" Key: {item.key}\")\n", + " print(f\" Content: {item.value.get('content', '')[:100]}...\")\n", + " print(f\" Created: {item.created_at}\")\n", + "except Exception as e:\n", + " print(f\"❌ Search failed: {e}\")" + ] + }, + { + "cell_type": "code", + "execution_count": 7, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "=== Semantic Search ===\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "INFO:bedrock_agentcore.memory.client:Retrieved 1 memories from namespace: /summaries/user-123/session-456\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "\n", + "Query: 'soccer and Barcelona'\n", + "Results: 1\n", + " Score: 0.42368466 | Content: \n", + " \n", + " The user's name is Joh...\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "INFO:bedrock_agentcore.memory.client:Retrieved 1 memories from namespace: /summaries/user-123/session-456\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "\n", + "Query: 'user preferences and programming'\n", + "Results: 1\n", + " Score: 0.43479428 | Content: \n", + " \n", + " The user's name is Joh...\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "INFO:bedrock_agentcore.memory.client:Retrieved 1 memories from namespace: /summaries/user-123/session-456\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "\n", + "Query: 'John software engineer'\n", + "Results: 1\n", + " Score: 0.4470298 | Content: \n", + " \n", + " The user's name is Joh...\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "INFO:bedrock_agentcore.memory.client:Retrieved 1 memories from namespace: /summaries/user-123/session-456\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "\n", + "Query: 'Python backend development'\n", + "Results: 1\n", + " Score: 0.43134356 | Content: \n", + " \n", + " The user's name is Joh...\n" + ] + } + ], + "source": [ + "# Semantic search with queries\n", + "print(\"=== Semantic Search ===\")\n", + "\n", + "search_queries = [\n", + " \"soccer and Barcelona\",\n", + " \"user preferences and programming\",\n", + " \"John software engineer\",\n", + " \"Python backend development\"\n", + "]\n", + "\n", + "for query in search_queries:\n", + " try:\n", + " results = store.search(\n", + " search_namespace,\n", + " query=query,\n", + " limit=3\n", + " )\n", + " \n", + " print(f\"\\nQuery: '{query}'\")\n", + " print(f\"Results: {len(results)}\")\n", + " \n", + " for item in results:\n", + " score = getattr(item, 'score', 'N/A')\n", + " content = item.value.get('content', '')[:80]\n", + " print(f\" Score: {score} | Content: {content}...\")\n", + " \n", + " except Exception as e:\n", + " print(f\"❌ Search failed for '{query}': {e}\")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### 3. Get Individual Memory Records" + ] + }, + { + "cell_type": "code", + "execution_count": 9, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "=== Get Individual Records ===\n", + "Testing get operation with record ID: mem-e2facd2a22ded39a2e94e522b857bfaba224\n", + "✅ Retrieved record:\n", + " Key: mem-e2facd2a22ded39a2e94e522b857bfaba224\n", + " Namespace: ('', 'summaries', 'user-123', 'session-456')\n", + " Content: \n", + " \n", + " The user's name is John, and he works as a software engineer. He enjoys playing soccer and supports Barcelona football club.\n", + " \n", + " \n", + " The assistant acknowledged John's interest in Barcelona, noting the team's rich history in football. The assistant also greeted John and commented that software engineering is a fascinating field.\n", + " \n", + " \n", + " User preferences were updated to include Python and Backend Development.\n", + " \n", + "\n", + " Created: 2025-09-05 16:02:36-07:00\n", + "❌ Get non-existent failed: An error occurred (ResourceNotFoundException) when calling the GetMemoryRecord operation: Resource not found with memory id jgordleTestMemoryTools1-XKfUGT7fO4 and memory record id non-existent-key\n" + ] + } + ], + "source": [ + "# Get a specific memory record by ID\n", + "print(\"=== Get Individual Records ===\")\n", + "\n", + "# First get some record IDs from search\n", + "try:\n", + " search_results = store.search(search_namespace, limit=2)\n", + " \n", + " if search_results:\n", + " record_id = search_results[0].key\n", + " print(f\"Testing get operation with record ID: {record_id}\")\n", + " \n", + " # Get the specific record\n", + " result = store.get(namespace, record_id)\n", + " \n", + " if result:\n", + " print(f\"✅ Retrieved record:\")\n", + " print(f\" Key: {result.key}\")\n", + " print(f\" Namespace: {result.namespace}\")\n", + " print(f\" Content: {result.value.get('content', '')}\")\n", + " print(f\" Created: {result.created_at}\")\n", + " else:\n", + " print(\"❌ No record returned\")\n", + " else:\n", + " print(\"No search results available for get test\")\n", + " \n", + "except Exception as e:\n", + " print(f\"❌ Get operation failed: {e}\")\n", + "\n", + "# Test get with non-existent key\n", + "try:\n", + " result = store.get(namespace, \"non-existent-key\")\n", + " if result is None:\n", + " print(\"✅ Correctly returned None for non-existent key\")\n", + " else:\n", + " print(\"❌ Should have returned None\")\n", + "except Exception as e:\n", + " print(f\"❌ Get non-existent failed: {e}\")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### 4. Delete Operations" + ] + }, + { + "cell_type": "code", + "execution_count": 12, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "=== Delete Operations ===\n", + "Record ID to delete: mem-e2facd2a22ded39a2e94e522b857bfaba224\n", + "✅ Delete operation called for key: mem-e2facd2a22ded39a2e94e522b857bfaba224\n" + ] + } + ], + "source": [ + "# Test delete operations\n", + "print(\"=== Delete Operations ===\")\n", + "\n", + "print(f\"Record ID to delete: {record_id}\")\n", + "\n", + "try:\n", + " # Delete the record\n", + " store.delete(namespace, record_id)\n", + " print(f\"✅ Delete operation called for key: {record_id}\")\n", + " \n", + "except Exception as e:\n", + " print(f\"❌ Delete operation failed: {e}\")" + ] + }, + { + "cell_type": "code", + "execution_count": 16, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "✅ Record mem-e2facd2a22ded39a2e94e522b857bfaba224 was successfully deleted\n" + ] + } + ], + "source": [ + "# Verify deletion\n", + "try:\n", + " result = store.get(namespace, record_id)\n", + "except Exception as e:\n", + " print(f\"✅ Record {record_id} was successfully deleted\")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Complete Agent Example with Memory\n", + "\n", + "This demonstrates how to use the memory store in a complete LangGraph agent." + ] + }, + { + "cell_type": "code", + "execution_count": 18, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "✅ Agent with memory compiled successfully\n" + ] + } + ], + "source": [ + "# Initialize LLM\n", + "llm = init_chat_model(\n", + " \"us.anthropic.claude-3-7-sonnet-20250219-v1:0\",\n", + " model_provider=\"bedrock_converse\",\n", + ")\n", + "\n", + "# Define agent state\n", + "class State(TypedDict):\n", + " messages: Annotated[list, add_messages]\n", + "\n", + "def call_model(state: State, config: RunnableConfig, *, store: BaseStore):\n", + " # Get configuration\n", + " user_id = config[\"configurable\"][\"user_id\"]\n", + " session_id = config[\"configurable\"][\"session_id\"]\n", + " \n", + " # Store user message\n", + " conversation_namespace = (user_id, session_id)\n", + " user_message = state['messages'][-1]\n", + " \n", + " print(f\"Storing user message: {user_message.content[:50]}...\")\n", + " store.put(\n", + " conversation_namespace,\n", + " f\"user-msg-{int(time.time())}\",\n", + " value={\"message\": user_message}\n", + " )\n", + " \n", + " # Search for relevant memories\n", + " memory_namespace = (\"summaries\", user_id, session_id)\n", + " memories = store.search(\n", + " memory_namespace,\n", + " query=user_message.content,\n", + " limit=3\n", + " )\n", + " \n", + " print(f\"Found {len(memories)} relevant memories\")\n", + " \n", + " # Add memory context to messages if available\n", + " messages = state[\"messages\"].copy()\n", + " if memories:\n", + " memory_context = \"\\n\".join([\n", + " f\"Memory: {mem.value.get('content', '')[:100]}...\"\n", + " for mem in memories[:2]\n", + " ])\n", + " context_msg = SystemMessage(f\"Relevant memories:\\n{memory_context}\")\n", + " messages.insert(-1, context_msg)\n", + " \n", + " # Generate response\n", + " result = llm.invoke(messages)\n", + " \n", + " # Store AI response\n", + " print(f\"Storing AI response: {result.content[:50]}...\")\n", + " store.put(\n", + " conversation_namespace,\n", + " f\"ai-msg-{int(time.time())}\",\n", + " value={\"message\": result}\n", + " )\n", + " \n", + " return {\"messages\": [result]}\n", + "\n", + "# Build graph\n", + "graph_builder = StateGraph(State)\n", + "graph_builder.add_node(\"chatbot\", call_model)\n", + "graph_builder.add_edge(START, \"chatbot\")\n", + "\n", + "# Compile with memory store\n", + "checkpointer = InMemorySaver()\n", + "graph = graph_builder.compile(checkpointer=checkpointer, store=store)\n", + "\n", + "print(\"✅ Agent with memory compiled successfully\")" + ] + }, + { + "cell_type": "code", + "execution_count": 20, + "metadata": {}, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "INFO:bedrock_agentcore.memory.client:Created event: 0000001757113835000#7e243282\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "=== Testing Agent with Memory ===\n", + "\n", + "--- Conversation 1 ---\n", + "User: Hi, I'm interested in learning about machine learning\n", + "Storing user message: Hi, I'm interested in learning about machine learn...\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "INFO:bedrock_agentcore.memory.client:Retrieved 0 memories from namespace: /summaries/demo-user/demo-session\n", + "INFO:langchain_aws.chat_models.bedrock_converse:Using Bedrock Converse API to generate response\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Found 0 relevant memories\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "INFO:bedrock_agentcore.memory.client:Created event: 0000001757113843000#fcf5f078\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Storing AI response: # Introduction to Machine Learning\n", + "\n", + "I'm happy to h...\n", + "AI: # Introduction to Machine Learning\n", + "\n", + "I'm happy to help you learn about machine learning! Machine learning (ML) is a fascinating field where computers learn patterns from data without being explicitly p...\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "INFO:bedrock_agentcore.memory.client:Created event: 0000001757113845000#2f84629b\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "\n", + "--- Conversation 2 ---\n", + "User: What programming languages are best for ML?\n", + "Storing user message: What programming languages are best for ML?...\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "INFO:bedrock_agentcore.memory.client:Retrieved 0 memories from namespace: /summaries/demo-user/demo-session\n", + "INFO:langchain_aws.chat_models.bedrock_converse:Using Bedrock Converse API to generate response\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Found 0 relevant memories\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "INFO:bedrock_agentcore.memory.client:Created event: 0000001757113852000#30e87561\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Storing AI response: # Programming Languages for Machine Learning\n", + "\n", + "## T...\n", + "AI: # Programming Languages for Machine Learning\n", + "\n", + "## Top Languages for Machine Learning\n", + "\n", + "### Python\n", + "**The clear frontrunner for ML**\n", + "- Exceptional ecosystem of libraries: scikit-learn, TensorFlow, PyTorch...\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "INFO:bedrock_agentcore.memory.client:Created event: 0000001757113855000#9fb11fb6\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "\n", + "--- Conversation 3 ---\n", + "User: Can you remind me what we discussed about programming?\n", + "Storing user message: Can you remind me what we discussed about programm...\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "INFO:bedrock_agentcore.memory.client:Retrieved 0 memories from namespace: /summaries/demo-user/demo-session\n", + "INFO:langchain_aws.chat_models.bedrock_converse:Using Bedrock Converse API to generate response\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Found 0 relevant memories\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "INFO:bedrock_agentcore.memory.client:Created event: 0000001757113860000#0a1659ad\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Storing AI response: # Our Previous Discussion on Programming for Machi...\n", + "AI: # Our Previous Discussion on Programming for Machine Learning\n", + "\n", + "In our conversation, we discussed programming languages that are best suited for machine learning work. Here's a summary of what I shared...\n" + ] + } + ], + "source": [ + "import time\n", + "\n", + "# Test the agent with memory\n", + "print(\"=== Testing Agent with Memory ===\")\n", + "\n", + "# Configuration\n", + "config = {\n", + " \"configurable\": {\n", + " \"user_id\": \"demo-user\",\n", + " \"session_id\": \"demo-session\",\n", + " \"thread_id\": \"demo-thread\"\n", + " }\n", + "}\n", + "\n", + "# Test conversations\n", + "test_messages = [\n", + " \"Hi, I'm interested in learning about machine learning\",\n", + " \"What programming languages are best for ML?\",\n", + " \"Can you remind me what we discussed about programming?\"\n", + "]\n", + "\n", + "for i, message in enumerate(test_messages):\n", + " print(f\"\\n--- Conversation {i+1} ---\")\n", + " print(f\"User: {message}\")\n", + " \n", + " try:\n", + " result = graph.invoke(\n", + " {\"messages\": [HumanMessage(message)]},\n", + " config\n", + " )\n", + " \n", + " ai_response = result['messages'][-1].content\n", + " print(f\"AI: {ai_response[:200]}...\")\n", + " \n", + " except Exception as e:\n", + " print(f\"❌ Agent failed: {e}\")\n", + " \n", + " # Wait between messages\n", + " time.sleep(2)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Key Features Summary\n", + "\n", + "The BedrockAgentCoreMemoryStore provides:\n", + "\n", + "### Core Operations\n", + "- **put()**: Store messages with automatic processing by AgentCore\n", + "- **search()**: Semantic search and listing of processed memories\n", + "- **get()**: Retrieve individual memory records by ID\n", + "- **delete()**: Remove memory records\n", + "- **batch()**: Execute multiple operations efficiently\n", + "\n", + "### Memory Processing\n", + "- Automatic message processing and summarization\n", + "- Semantic search capabilities\n", + "- User preference extraction\n", + "- Long-term memory persistence\n", + "\n", + "### Integration Features\n", + "- LangGraph store interface compliance\n", + "- Support for all LangChain message types\n", + "- Namespace-based organization\n", + "- Configurable search parameters\n", + "\n", + "### Limitations\n", + "- No async operations support\n", + "- TTL managed by AgentCore service\n", + "- Custom indexing handled by AgentCore\n", + "- Processing delay for memory availability\n", + "\n", + "This memory store enables persistent, searchable memory for conversational AI agents with automatic processing and intelligent retrieval capabilities." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.11.10" + } + }, + "nbformat": 4, + "nbformat_minor": 4 +} From 4f1b448a8104b483353316c7d1f8ef9ec43e4ef9 Mon Sep 17 00:00:00 2001 From: Jack Gordley Date: Wed, 24 Sep 2025 15:54:38 -0700 Subject: [PATCH 2/4] Adding agent core memory store and tests --- .../langgraph_checkpoint_aws/__init__.py | 4 + .../agentcore/__init__.py | 3 +- .../agentcore/helpers.py | 41 ++ .../agentcore/store.py | 275 ++++++++++ .../integration_tests/agentcore/test_store.py | 460 ++++++++++++++++ .../tests/unit_tests/agentcore/test_store.py | 503 ++++++++++++++++++ 6 files changed, 1285 insertions(+), 1 deletion(-) create mode 100644 libs/langgraph-checkpoint-aws/langgraph_checkpoint_aws/agentcore/store.py create mode 100644 libs/langgraph-checkpoint-aws/tests/integration_tests/agentcore/test_store.py create mode 100644 libs/langgraph-checkpoint-aws/tests/unit_tests/agentcore/test_store.py diff --git a/libs/langgraph-checkpoint-aws/langgraph_checkpoint_aws/__init__.py b/libs/langgraph-checkpoint-aws/langgraph_checkpoint_aws/__init__.py index 0fd21db7..2f79b184 100644 --- a/libs/langgraph-checkpoint-aws/langgraph_checkpoint_aws/__init__.py +++ b/libs/langgraph-checkpoint-aws/langgraph_checkpoint_aws/__init__.py @@ -5,6 +5,9 @@ from langgraph_checkpoint_aws.agentcore.saver import ( AgentCoreMemorySaver, ) +from langgraph_checkpoint_aws.agentcore.store import ( + AgentCoreMemoryStore, +) __version__ = "0.1.2" SDK_USER_AGENT = f"LangGraphCheckpointAWS#{__version__}" @@ -12,5 +15,6 @@ # Expose the saver class at the package level __all__ = [ "AgentCoreMemorySaver", + "AgentCoreMemoryStore", "SDK_USER_AGENT", ] diff --git a/libs/langgraph-checkpoint-aws/langgraph_checkpoint_aws/agentcore/__init__.py b/libs/langgraph-checkpoint-aws/langgraph_checkpoint_aws/agentcore/__init__.py index 9c52f924..ece872a7 100644 --- a/libs/langgraph-checkpoint-aws/langgraph_checkpoint_aws/agentcore/__init__.py +++ b/libs/langgraph-checkpoint-aws/langgraph_checkpoint_aws/agentcore/__init__.py @@ -1,3 +1,4 @@ from langgraph_checkpoint_aws.agentcore.saver import AgentCoreMemorySaver +from langgraph_checkpoint_aws.agentcore.store import AgentCoreMemoryStore -__all__ = ["AgentCoreMemorySaver"] +__all__ = ["AgentCoreMemorySaver", "AgentCoreMemoryStore"] diff --git a/libs/langgraph-checkpoint-aws/langgraph_checkpoint_aws/agentcore/helpers.py b/libs/langgraph-checkpoint-aws/langgraph_checkpoint_aws/agentcore/helpers.py index 0af27bbf..7d28be43 100644 --- a/libs/langgraph-checkpoint-aws/langgraph_checkpoint_aws/agentcore/helpers.py +++ b/libs/langgraph-checkpoint-aws/langgraph_checkpoint_aws/agentcore/helpers.py @@ -12,7 +12,9 @@ from typing import Any, Dict, List, Union import boto3 +from bedrock_agentcore.memory.constants import MessageRole from botocore.config import Config +from langchain_core.messages import BaseMessage from langgraph.checkpoint.base import CheckpointTuple, SerializerProtocol from langgraph_checkpoint_aws.agentcore.constants import ( @@ -320,3 +322,42 @@ def build_checkpoint_tuple( parent_config=parent_config, pending_writes=pending_writes, ) + + +def convert_langchain_messages_to_event_messages( + messages: List[BaseMessage], +) -> List[Dict[str, Any]]: + """Convert LangChain messages to Bedrock Agent Core events + + Args: + messages: List of Langchain messages (BaseMessage) + + Returns: + List of AgentCore event tuples (text, role) + """ + converted_messages = [] + for msg in messages: + # Skip if event already saved + if msg.additional_kwargs.get("event_id") is not None: + continue + + text = msg.text() + if not text.strip(): + continue + + # Map LangChain roles to Bedrock Agent Core roles + if msg.type == "human": + role = MessageRole.USER.value + elif msg.type == "ai": + role = MessageRole.ASSISTANT.value + elif msg.type == "tool": + role = MessageRole.TOOL.value + elif msg.type == "system": + role = MessageRole.OTHER.value + else: + logger.warning(f"Skipping unsupported message type: {msg.type}") + continue + + converted_messages.append((text, role)) + + return converted_messages diff --git a/libs/langgraph-checkpoint-aws/langgraph_checkpoint_aws/agentcore/store.py b/libs/langgraph-checkpoint-aws/langgraph_checkpoint_aws/agentcore/store.py new file mode 100644 index 00000000..700532b4 --- /dev/null +++ b/libs/langgraph-checkpoint-aws/langgraph_checkpoint_aws/agentcore/store.py @@ -0,0 +1,275 @@ +""" +AgentCore Memory Store implementation following BaseStore pattern. + +This implementation uses the ops pattern and batch function handlers, +directly calling the AgentCore Memory API for create_event and retrieve_memory_records. +""" + +import logging +import uuid +from collections.abc import Iterable +from datetime import datetime, timezone +from typing import Any + +import boto3 +from botocore.config import Config +from botocore.exceptions import ClientError +from langchain_core.messages import BaseMessage +from langgraph.store.base import ( + BaseStore, + GetOp, + Item, + ListNamespacesOp, + Op, + PutOp, + Result, + SearchItem, + SearchOp, +) + +from langgraph_checkpoint_aws.agentcore.helpers import ( + convert_langchain_messages_to_event_messages, +) + +logger = logging.getLogger(__name__) + + +class AgentCoreMemoryStore(BaseStore): + """ + AgentCore Memory Store implementation using BaseStore pattern. + + This store saves chat messages as conversational events in AgentCore Memory + and retrieves processed memories through semantic search. The embedding and + memory processing happens automatically in the AgentCore Memory service. + + Args: + memory_id: The AgentCore Memory resource ID + **boto3_kwargs: Additional arguments passed to boto3.client() + + Example: + ```python + store = AgentCoreMemoryStore( + memory_id="memory_abc123", + region_name="us-west-2" + ) + + # Store a message + from langchain_core.messages import HumanMessage + store.put(("user123", "session456"), "msg1", { + "message": HumanMessage("I love coffee") + }) + + # Search for processed memories + results = store.search(("facts", "user123"), query="user preferences") + ``` + """ + + supports_ttl: bool = False + ttl_config = None + + def __init__(self, *, memory_id: str, **boto3_kwargs: Any): + self.memory_id = memory_id + + config = Config( + user_agent_extra="x-client-framework:langgraph_agentcore_memory_store", + retries={"max_attempts": 4, "mode": "adaptive"}, + ) + self.client = boto3.client("bedrock-agentcore", config=config, **boto3_kwargs) + + def batch(self, ops: Iterable[Op]) -> list[Result]: + """Execute multiple operations in a single batch.""" + results = [] + + for op in ops: + if isinstance(op, PutOp): + self._handle_put(op) + results.append(None) + elif isinstance(op, SearchOp): + result = self._handle_search(op) + results.append(result) + elif isinstance(op, GetOp): + result = self._handle_get(op) + results.append(result) + elif isinstance(op, ListNamespacesOp): + # ListNamespacesOp not supported for AgentCore Memory + results.append([]) + else: + raise ValueError(f"Unknown operation type: {type(op)}") + + return results + + async def abatch(self, ops: Iterable[Op]) -> list[Result]: + """Execute multiple operations asynchronously (not implemented).""" + raise NotImplementedError( + "AgentCore Memory client does not support async operations yet" + ) + + def _handle_put(self, op: PutOp) -> None: + """Handle PutOp by creating conversational events in AgentCore Memory.""" + if op.value is None: + # TODO: Delete operation support - need to figure out if we are deleting events or records + logger.warning("Delete operations not supported in AgentCore Memory") + return + + message = op.value.get("message") + if not isinstance(message, BaseMessage): + raise ValueError( + "Value must contain a 'message' key with a BaseMessage object" + ) + + # Convert namespace tuple to actor_id and session_id + if len(op.namespace) != 2: + raise ValueError("Namespace must be a tuple of (actor_id, session_id)") + + actor_id, session_id = op.namespace + event_messages = convert_langchain_messages_to_event_messages([message]) + + if not event_messages: + logger.warning( + f"No valid event messages to create for message type: {message.type}" + ) + return + + conversational_payloads = [] + for text, role in event_messages: + conversational_payloads.append( + {"conversational": {"content": {"text": text}, "role": role}} + ) + + self.client.create_event( + memoryId=self.memory_id, + actorId=actor_id, + sessionId=session_id, + eventTimestamp=datetime.now(timezone.utc), + payload=conversational_payloads, + ) + logger.debug(f"Created event for message in namespace {op.namespace}") + + def _handle_get(self, op: GetOp) -> Item | None: + """Handle GetOp by retrieving a specific memory record from AgentCore Memory.""" + try: + response = self.client.get_memory_record( + memoryId=self.memory_id, memoryRecordId=op.key + ) + + memory_record = response.get("memoryRecord") + if not memory_record: + return None + + return self._convert_memory_record_to_item(memory_record, op.namespace) + + except ClientError as e: + error_code = e.response["Error"]["Code"] + if error_code == "ResourceNotFoundException": + # Memory record not found + return None + else: + # Re-raise other client errors + logger.error(f"Failed to get memory record: {e}") + raise + except Exception as e: + logger.error(f"Failed to get memory record: {e}") + raise + + def _handle_search(self, op: SearchOp) -> list[SearchItem]: + """Handle SearchOp by retrieving memory records from AgentCore Memory.""" + if not op.query: + logger.warning("Search requires a query for AgentCore Memory") + return [] + + namespace_str = self._convert_namespace_to_string(op.namespace_prefix) + + search_criteria = {"searchQuery": op.query, "topK": op.limit} + + response = self.client.retrieve_memory_records( + memoryId=self.memory_id, + namespace=namespace_str, + searchCriteria=search_criteria, + maxResults=op.limit, + ) + + memory_records = response.get("memoryRecordSummaries", []) + return self._convert_memory_records_to_search_items( + memory_records, op.namespace_prefix + ) + + def _convert_namespace_to_string(self, namespace_tuple: tuple[str, ...]) -> str: + """Convert namespace tuple to AgentCore namespace string.""" + if not namespace_tuple: + return "/" + return "/" + "/".join(namespace_tuple) + + def _convert_memory_record_to_item( + self, memory_record: dict, namespace: tuple[str, ...] + ) -> Item: + """Convert a single AgentCore memory record to an Item object.""" + # Extract content + content = memory_record.get("content", {}) + text = content.get("text", "") if isinstance(content, dict) else str(content) + + # Extract metadata + memory_record_id = memory_record.get("memoryRecordId", str(uuid.uuid4())) + created_at = memory_record.get("createdAt") + + # Parse timestamp - API only provides createdAt, use it for both created_at and updated_at + if isinstance(created_at, str): + try: + created_at = datetime.fromisoformat(created_at.replace("Z", "+00:00")) + except (ValueError, AttributeError): + created_at = datetime.now(timezone.utc) + elif created_at is None: + created_at = datetime.now(timezone.utc) + + return Item( + namespace=namespace, + key=memory_record_id, + value={ + "content": text, + "memory_strategy_id": memory_record.get("memoryStrategyId"), + "namespaces": memory_record.get("namespaces", []), + }, + created_at=created_at, + updated_at=created_at, # Memory records are not updated + ) + + def _convert_memory_records_to_search_items( + self, memory_records: list, namespace: tuple[str, ...] + ) -> list[SearchItem]: + """Convert AgentCore memory records to SearchItem objects.""" + results = [] + + for record in memory_records: + content = record.get("content", {}) + text = ( + content.get("text", "") if isinstance(content, dict) else str(content) + ) + + memory_record_id = record.get("memoryRecordId", str(uuid.uuid4())) + score = record.get("score") + created_at = record.get("createdAt") + + if isinstance(created_at, str): + try: + created_at = datetime.fromisoformat( + created_at.replace("Z", "+00:00") + ) + except (ValueError, AttributeError): + created_at = datetime.now(timezone.utc) + elif created_at is None: + created_at = datetime.now(timezone.utc) + + search_item = SearchItem( + namespace=namespace, + key=memory_record_id, + value={ + "content": text, + "memory_strategy_id": record.get("memoryStrategyId"), + "namespaces": record.get("namespaces", []), + }, + created_at=created_at, + updated_at=created_at, # Memory records are not updated + score=float(score) if score is not None else None, + ) + results.append(search_item) + + return results diff --git a/libs/langgraph-checkpoint-aws/tests/integration_tests/agentcore/test_store.py b/libs/langgraph-checkpoint-aws/tests/integration_tests/agentcore/test_store.py new file mode 100644 index 00000000..6d3d6adc --- /dev/null +++ b/libs/langgraph-checkpoint-aws/tests/integration_tests/agentcore/test_store.py @@ -0,0 +1,460 @@ +""" +Integration tests for AgentCoreMemoryStore. + +These tests require real AWS credentials and an AgentCore Memory resource. +Set AGENTCORE_MEMORY_ID environment variable to run these tests. +""" + +import os +import random +import string +import time +import uuid + +import pytest +from langchain_core.messages import AIMessage, HumanMessage, SystemMessage, ToolMessage +from langgraph.store.base import GetOp, PutOp, SearchOp + +from langgraph_checkpoint_aws.agentcore.store import AgentCoreMemoryStore + + +def generate_valid_actor_id(): + """Generate a valid actor ID that matches AgentCore pattern [a-zA-Z0-9][a-zA-Z0-9-_]*""" + chars = string.ascii_letters + string.digits + return "actor" + "".join(random.choices(chars, k=6)) + + +def generate_valid_session_id(): + """Generate a valid session ID that matches AgentCore pattern [a-zA-Z0-9][a-zA-Z0-9-_]*""" + chars = string.ascii_letters + string.digits + return "session" + "".join(random.choices(chars, k=6)) + + +class TestAgentCoreMemoryStoreIntegration: + """Integration tests for AgentCoreMemoryStore with real AgentCore Memory service.""" + + @pytest.fixture + def memory_id(self): + """Get memory ID from environment variable.""" + memory_id = os.environ.get("AGENTCORE_MEMORY_ID") + if not memory_id: + pytest.skip("AGENTCORE_MEMORY_ID environment variable not set") + return memory_id + + @pytest.fixture + def store(self, memory_id): + """Create AgentCoreMemoryStore instance.""" + return AgentCoreMemoryStore(memory_id=memory_id, region_name="us-west-2") + + @pytest.fixture + def actor_id(self): + """Generate unique actor ID for test isolation.""" + return generate_valid_actor_id() + + @pytest.fixture + def session_id(self): + """Generate unique session ID for test isolation.""" + return generate_valid_session_id() + + def test_store_human_message(self, store, actor_id, session_id): + """Test storing a human message as conversational event.""" + message = HumanMessage("I love coffee and prefer dark roast") + + store.put( + namespace=(actor_id, session_id), + key=str(uuid.uuid4()), + value={"message": message}, + ) + + assert True, "Message stored successfully" + + def test_store_ai_message(self, store, actor_id, session_id): + """Test storing an AI message as conversational event.""" + message = AIMessage( + "I understand you enjoy dark roast coffee. That's a great choice!" + ) + + store.put( + namespace=(actor_id, session_id), + key=str(uuid.uuid4()), + value={"message": message}, + ) + + assert True, "AI message stored successfully" + + def test_store_system_message(self, store, actor_id, session_id): + """Test storing a system message as conversational event.""" + message = SystemMessage( + "You are a helpful assistant that remembers user preferences" + ) + + store.put( + namespace=(actor_id, session_id), + key=str(uuid.uuid4()), + value={"message": message}, + ) + + assert True, "System message stored successfully" + + def test_store_tool_message(self, store, actor_id, session_id): + """Test storing a tool message as conversational event.""" + message = ToolMessage( + content="Weather in San Francisco: 72°F, sunny", tool_call_id="call_123" + ) + + store.put( + namespace=(actor_id, session_id), + key=str(uuid.uuid4()), + value={"message": message}, + ) + + assert True, "Tool message stored successfully" + + def test_conversation_flow(self, store, actor_id, session_id): + """Test storing a complete conversation flow.""" + messages = [ + HumanMessage("Hi, I'm planning a trip to Italy"), + AIMessage( + "That sounds wonderful! Italy has so many amazing places to visit. What type of experience are you looking for?" + ), + HumanMessage("I love art and history, especially Renaissance art"), + AIMessage( + "Perfect! Florence would be ideal for you - it's the birthplace of the Renaissance with incredible museums like the Uffizi Gallery" + ), + HumanMessage("That sounds perfect! I also enjoy good food and wine"), + AIMessage( + "Excellent! Tuscany, where Florence is located, is famous for its cuisine and wines. You'll love the local trattorias and vineyards" + ), + ] + + for i, message in enumerate(messages): + store.put( + namespace=(actor_id, session_id), + key=f"msg_{i}", + value={"message": message}, + ) + time.sleep(1) + + time.sleep(2) + + assert True, "Conversation flow stored successfully" + + def test_search_processed_memories(self, store, actor_id): + """Test searching for processed memories after storing conversations.""" + session1 = generate_valid_session_id() + session2 = generate_valid_session_id() + + preference_messages = [ + HumanMessage("I really love Italian food, especially pasta carbonara"), + AIMessage("Great choice! Carbonara is a classic Roman dish"), + HumanMessage("I also enjoy red wine, particularly Chianti"), + AIMessage("Chianti pairs wonderfully with Italian cuisine"), + ] + + for i, msg in enumerate(preference_messages[:2]): + store.put( + namespace=(actor_id, session1), key=f"pref1_{i}", value={"message": msg} + ) + time.sleep(1) + + for i, msg in enumerate(preference_messages[2:]): + store.put( + namespace=(actor_id, session2), key=f"pref2_{i}", value={"message": msg} + ) + time.sleep(1) + + time.sleep(60) + + search_namespaces = [ + ("preferences", actor_id), + ("facts", actor_id), + ("summaries", "actors", actor_id, "sessions", session1), + ("summaries", "actors", actor_id, "sessions", session2), + ] + + found_results = False + for namespace in search_namespaces: + results = store.search( + namespace, query="food preferences Italian cuisine", limit=5 + ) + + if results: + found_results = True + break + + assert isinstance(found_results, bool) + + def test_batch_operations(self, store, actor_id, session_id): + """Test batch operations with multiple put and search operations.""" + messages = [ + HumanMessage("I'm interested in learning about machine learning"), + AIMessage( + "Machine learning is a fascinating field! What specific area interests you most?" + ), + HumanMessage("I'd like to understand neural networks and deep learning"), + AIMessage( + "Great choice! Neural networks are the foundation of modern AI systems" + ), + ] + + put_ops = [] + for i, message in enumerate(messages): + put_ops.append( + PutOp( + namespace=(actor_id, session_id), + key=f"batch_msg_{i}", + value={"message": message}, + ) + ) + + search_ops = [ + SearchOp( + namespace_prefix=("facts", actor_id), + query="machine learning interests", + limit=3, + ), + SearchOp( + namespace_prefix=("preferences", actor_id), + query="learning topics", + limit=3, + ), + ] + + all_ops = put_ops + search_ops + results = store.batch(all_ops) + + assert len(results) == len(all_ops) + + for i in range(len(put_ops)): + assert results[i] is None + + for i in range(len(put_ops), len(all_ops)): + assert isinstance(results[i], list) + + assert all(isinstance(r, list) for r in results[len(put_ops) :]) + + def test_multiple_actors_isolation(self, store): + """Test that different actors have isolated memory spaces.""" + actor1 = generate_valid_actor_id() + actor2 = generate_valid_actor_id() + session1 = generate_valid_session_id() + session2 = generate_valid_session_id() + + # Store different preferences for each actor + actor1_preference = "I love spicy food and hot sauce" + actor2_preference = "I prefer mild flavors and avoid spicy food" + + store.put( + namespace=(actor1, session1), + key="pref1", + value={"message": HumanMessage(actor1_preference)}, + ) + + store.put( + namespace=(actor2, session2), + key="pref2", + value={"message": HumanMessage(actor2_preference)}, + ) + + # Wait for processing the long term memory (usually done in under 2 minutes) + time.sleep(120) + + # Search should be isolated per actor + results1 = store.search(("facts", actor1), query="food preferences", limit=5) + + results2 = store.search(("facts", actor2), query="food preferences", limit=5) + + assert isinstance(results1, list) + assert isinstance(results2, list) + + # Check that actor1's results contain reference to spicy food preference + if results1: + actor1_content_found = any( + "hot" in result.value.get("content", "").lower() for result in results1 + ) + assert actor1_content_found, ( + f"Actor1's spicy food preference not found in search results: {[r.value.get('content', '') for r in results1]}" + ) + + # Check that actor2's results contain reference to mild food preference + if results2: + actor2_content_found = any( + "mild" in result.value.get("content", "").lower() for result in results2 + ) + assert actor2_content_found, ( + f"Actor2's mild food preference not found in search results: {[r.value.get('content', '') for r in results2]}" + ) + + assert True, "Actor isolation test completed with preference verification" + + def test_error_handling_invalid_message(self, store, actor_id, session_id): + """Test error handling for invalid message format.""" + with pytest.raises(ValueError, match="Value must contain a 'message' key"): + store.put( + namespace=(actor_id, session_id), + key="invalid", + value={"not_message": "invalid"}, + ) + + def test_error_handling_invalid_namespace(self, store): + """Test error handling for invalid namespace format.""" + with pytest.raises(ValueError, match="Namespace must be a tuple of"): + store.put( + namespace=("single_element",), + key="test", + value={"message": HumanMessage("test")}, + ) + + def test_search_without_query(self, store, actor_id): + """Test search behavior when no query is provided.""" + results = store.search((actor_id, "facts"), limit=5) + + # Should return empty list when no query provided + assert isinstance(results, list) + assert len(results) == 0 + + def test_complex_message_content(self, store, actor_id, session_id): + """Test storing messages with complex content structures.""" + complex_message = HumanMessage( + content=[ + {"type": "text", "text": "I'm planning a vacation and need help with:"}, + {"type": "text", "text": "1. Flight bookings to Europe"}, + {"type": "text", "text": "2. Hotel recommendations in Paris"}, + { + "type": "text", + "text": "3. Restaurant suggestions for Italian cuisine", + }, + ] + ) + + store.put( + namespace=(actor_id, session_id), + key="complex_msg", + value={"message": complex_message}, + ) + + def test_get_memory_record_success(self, store, actor_id): + """Test retrieving a specific memory record that exists.""" + # First, we need to search for existing memory records to get valid IDs + # This test assumes some memory records exist from previous conversations + + # Try to search for any existing records + search_namespaces = [ + ("preferences", actor_id), + ("facts", actor_id), + ] + + memory_record_id = None + test_namespace = None + + for namespace in search_namespaces: + results = store.search( + namespace, + query="preferences food coffee", + limit=1, + ) + + if results and len(results) > 0: + memory_record_id = results[0].key + test_namespace = namespace + break + + if memory_record_id and test_namespace: + # Test GetOp with existing memory record + item = store.get(test_namespace, memory_record_id) + + if item: + assert item.key == memory_record_id + assert item.namespace == test_namespace + assert "content" in item.value + assert item.created_at is not None + assert item.updated_at is not None + + def test_get_memory_record_not_found(self, store, actor_id): + """Test retrieving a memory record that doesn't exist.""" + # Use a non-existent memory record ID that follows AgentCore pattern: mem-[a-zA-Z0-9-_]* + non_existent_id = f"mem-nonexistent-{uuid.uuid4().hex}" + + # This should return None without raising an exception + item = store.get(("facts", actor_id), non_existent_id) + + assert item is None, "Non-existent memory record should return None" + + def test_batch_operations_with_get(self, store, actor_id, session_id): + """Test batch operations including GetOp operations.""" + # First, search for existing memory records to get valid IDs for GetOp + search_results = store.search(("facts", actor_id), query="preferences", limit=2) + + # Create batch operations including GetOp + batch_ops = [] + + # Add some PutOp operations + messages = [ + HumanMessage("I enjoy hiking in the mountains"), + AIMessage( + "Mountain hiking is a great way to stay active and enjoy nature!" + ), + ] + + for i, message in enumerate(messages): + batch_ops.append( + PutOp( + namespace=(actor_id, session_id), + key=f"hiking_msg_{i}", + value={"message": message}, + ) + ) + + # Add SearchOp + batch_ops.append( + SearchOp( + namespace_prefix=("facts", actor_id), + query="outdoor activities", + limit=3, + ) + ) + + if search_results: + for result in search_results[:2]: # Test up to 2 GetOps + batch_ops.append(GetOp(namespace=result.namespace, key=result.key)) + else: + batch_ops.append( + GetOp( + namespace=("facts", actor_id), + key=f"mem-nonexistent-{uuid.uuid4().hex}", + ) + ) + + results = store.batch(batch_ops) + + assert len(results) == len(batch_ops) + + # PutOp results should be None + assert results[0] is None # First PutOp + assert results[1] is None # Second PutOp + + # SearchOp result should be a list + assert isinstance(results[2], list) + + # GetOp results should be Item objects or None + for i in range(3, len(results)): + result = results[i] + assert result is None or hasattr(result, "key"), ( + f"GetOp result should be Item or None, got {type(result)}" + ) + + def test_get_operation_error_handling(self, store, actor_id): + """Test GetOp error handling with various edge cases.""" + # Test with empty memory record ID - should return None or raise ValueError + item = store.get(("facts", actor_id), "") + assert item is None, "Empty key should return None" + + # Test with very long memory record ID - should return None + long_id = "x" * 1000 + item = store.get(("facts", actor_id), long_id) + assert item is None, "Very long ID should return None" + + # Test with special characters in memory record ID - should return None + special_id = "test-record_123.456" + item = store.get(("facts", actor_id), special_id) + assert item is None, "Special character ID should return None" diff --git a/libs/langgraph-checkpoint-aws/tests/unit_tests/agentcore/test_store.py b/libs/langgraph-checkpoint-aws/tests/unit_tests/agentcore/test_store.py new file mode 100644 index 00000000..4cc28fa1 --- /dev/null +++ b/libs/langgraph-checkpoint-aws/tests/unit_tests/agentcore/test_store.py @@ -0,0 +1,503 @@ +""" +Unit tests for AgentCore Memory Store. +""" + +from datetime import datetime, timezone +from unittest.mock import ANY, MagicMock, Mock, patch + +import pytest +from botocore.exceptions import ClientError +from langchain_core.messages import AIMessage, HumanMessage, SystemMessage, ToolMessage +from langgraph.store.base import ( + GetOp, + Item, + ListNamespacesOp, + MatchCondition, + PutOp, + SearchItem, + SearchOp, +) + +from langgraph_checkpoint_aws.agentcore.store import AgentCoreMemoryStore + + +class TestAgentCoreMemoryStore: + """Test suite for AgentCoreMemoryStore.""" + + @pytest.fixture + def mock_boto_client(self): + """Mock boto3 client with all required methods.""" + mock_client = Mock() + mock_client.create_event = MagicMock() + mock_client.retrieve_memory_records = MagicMock() + mock_client.get_memory_record = MagicMock() + return mock_client + + @pytest.fixture + def memory_id(self): + return "test-memory-id" + + @pytest.fixture + def store(self, mock_boto_client, memory_id): + """Create store instance with mocked client.""" + with patch("boto3.client") as mock_boto3_client: + mock_boto3_client.return_value = mock_boto_client + return AgentCoreMemoryStore(memory_id=memory_id) + + @pytest.fixture + def sample_namespace(self): + return ("test_actor", "test_session") + + @pytest.fixture + def sample_item_data(self): + return { + "message": HumanMessage(content="Hello, world!"), + "metadata": {"timestamp": "2024-01-01T00:00:00Z"}, + } + + @pytest.fixture + def sample_memory_record(self): + return { + "memoryRecordId": "mem-test-record-12345678901234567890123456789012345", + "content": {"text": "Hello, world!"}, + "memoryStrategyId": "strategy-123", + "namespaces": ["test_actor", "test_session"], + "createdAt": datetime(2024, 1, 1, tzinfo=timezone.utc), + "score": 0.95, + } + + def test_init_with_region_name(self, memory_id): + """Test initialization with region name creates boto3 client.""" + with patch("boto3.client") as mock_boto3_client: + mock_client = Mock() + mock_boto3_client.return_value = mock_client + + store = AgentCoreMemoryStore(memory_id=memory_id, region_name="us-west-2") + + assert store.memory_id == memory_id + mock_boto3_client.assert_called_once_with( + "bedrock-agentcore", config=ANY, region_name="us-west-2" + ) + + def test_init_missing_parameters(self): + """Test initialization requires memory_id.""" + with pytest.raises(TypeError): + AgentCoreMemoryStore() + + def test_batch_get_op_success(self, store, mock_boto_client, sample_memory_record): + """Test successful GetOp operation.""" + mock_boto_client.get_memory_record.return_value = { + "memoryRecord": sample_memory_record + } + + ops = [GetOp(namespace=("test_actor", "test_session"), key="test-key")] + results = store.batch(ops) + + assert len(results) == 1 + result = results[0] + assert isinstance(result, Item) + assert result.namespace == ("test_actor", "test_session") + assert result.key == sample_memory_record["memoryRecordId"] + assert result.value["content"] == "Hello, world!" + + mock_boto_client.get_memory_record.assert_called_once_with( + memoryId="test-memory-id", memoryRecordId="test-key" + ) + + def test_batch_get_op_not_found(self, store, mock_boto_client): + """Test GetOp when record not found.""" + mock_boto_client.get_memory_record.side_effect = ClientError( + error_response={"Error": {"Code": "ResourceNotFoundException"}}, + operation_name="GetMemoryRecord", + ) + + ops = [GetOp(namespace=("test_actor", "test_session"), key="nonexistent")] + results = store.batch(ops) + + assert len(results) == 1 + assert results[0] is None + + def test_batch_get_op_other_error(self, store, mock_boto_client): + """Test GetOp with other boto3 errors.""" + mock_boto_client.get_memory_record.side_effect = ClientError( + error_response={"Error": {"Code": "AccessDeniedException"}}, + operation_name="GetMemoryRecord", + ) + + ops = [GetOp(namespace=("test_actor", "test_session"), key="test-key")] + + with pytest.raises(ClientError): + store.batch(ops) + + def test_batch_put_op_success(self, store, mock_boto_client, sample_item_data): + """Test successful PutOp operation.""" + mock_boto_client.create_event.return_value = { + "event": {"eventId": "event-123", "memoryId": "test-memory-id"} + } + + ops = [ + PutOp( + namespace=("test_actor", "test_session"), + key="test-key", + value=sample_item_data, + ) + ] + results = store.batch(ops) + + assert len(results) == 1 + assert results[0] is None # PutOp returns None + + mock_boto_client.create_event.assert_called_once() + call_args = mock_boto_client.create_event.call_args[1] + assert call_args["memoryId"] == "test-memory-id" + assert call_args["actorId"] == "test_actor" + assert call_args["sessionId"] == "test_session" + assert len(call_args["payload"]) == 1 + assert call_args["payload"][0]["conversational"]["role"] == "USER" + + def test_batch_put_op_delete(self, store, mock_boto_client): + """Test PutOp with None value (delete operation).""" + ops = [ + PutOp(namespace=("test_actor", "test_session"), key="test-key", value=None) + ] + results = store.batch(ops) + + assert len(results) == 1 + assert results[0] is None + + # Should not call create_event for delete operations + mock_boto_client.create_event.assert_not_called() + + def test_batch_put_op_with_ai_message(self, store, mock_boto_client): + """Test PutOp with AI message.""" + ai_message_data = {"message": AIMessage(content="I can help you with that!")} + + ops = [ + PutOp( + namespace=("test_actor", "test_session"), + key="test-key", + value=ai_message_data, + ) + ] + store.batch(ops) + + call_args = mock_boto_client.create_event.call_args[1] + assert call_args["payload"][0]["conversational"]["role"] == "ASSISTANT" + assert ( + "I can help you with that!" + in call_args["payload"][0]["conversational"]["content"]["text"] + ) + + def test_batch_put_op_with_tool_message(self, store, mock_boto_client): + """Test PutOp with tool message.""" + tool_message_data = { + "message": ToolMessage(content="Tool result", tool_call_id="call-123") + } + + ops = [ + PutOp( + namespace=("test_actor", "test_session"), + key="test-key", + value=tool_message_data, + ) + ] + store.batch(ops) + + call_args = mock_boto_client.create_event.call_args[1] + assert call_args["payload"][0]["conversational"]["role"] == "TOOL" + + def test_batch_put_op_with_system_message(self, store, mock_boto_client): + """Test PutOp with system message.""" + system_message_data = { + "message": SystemMessage(content="You are a helpful assistant") + } + + ops = [ + PutOp( + namespace=("test_actor", "test_session"), + key="test-key", + value=system_message_data, + ) + ] + store.batch(ops) + + call_args = mock_boto_client.create_event.call_args[1] + assert call_args["payload"][0]["conversational"]["role"] == "OTHER" + + def test_batch_put_op_with_invalid_value(self, store, mock_boto_client): + """Test PutOp with value that doesn't contain a message.""" + non_message_data = {"some_key": "some_value", "number": 42} + + ops = [ + PutOp( + namespace=("test_actor", "test_session"), + key="test-key", + value=non_message_data, + ) + ] + + with pytest.raises(ValueError, match="Value must contain a 'message' key"): + store.batch(ops) + + def test_batch_search_op_success( + self, store, mock_boto_client, sample_memory_record + ): + """Test successful SearchOp operation.""" + mock_boto_client.retrieve_memory_records.return_value = { + "memoryRecordSummaries": [sample_memory_record] + } + + ops = [ + SearchOp( + namespace_prefix=("test_actor",), query="test query", limit=5, offset=0 + ) + ] + results = store.batch(ops) + + assert len(results) == 1 + search_results = results[0] + assert len(search_results) == 1 + assert isinstance(search_results[0], SearchItem) + assert search_results[0].namespace == ("test_actor",) + assert search_results[0].score == 0.95 + + mock_boto_client.retrieve_memory_records.assert_called_once_with( + memoryId="test-memory-id", + namespace="/test_actor", + searchCriteria={"searchQuery": "test query", "topK": 5}, + maxResults=5, + ) + + def test_batch_search_op_no_query(self, store, mock_boto_client): + """Test SearchOp without query returns empty results.""" + ops = [SearchOp(namespace_prefix=("test_actor",), limit=5)] + results = store.batch(ops) + + assert len(results) == 1 + assert results[0] == [] + + # Should not call retrieve_memory_records without query + mock_boto_client.retrieve_memory_records.assert_not_called() + + def test_batch_search_op_empty_results(self, store, mock_boto_client): + """Test SearchOp with empty results.""" + mock_boto_client.retrieve_memory_records.return_value = { + "memoryRecordSummaries": [] + } + + ops = [ + SearchOp( + namespace_prefix=("test_actor",), query="nonexistent query", limit=5 + ) + ] + results = store.batch(ops) + + assert len(results) == 1 + assert results[0] == [] + + def test_batch_list_namespaces_op_success(self, store): + """Test successful ListNamespacesOp operation.""" + ops = [ListNamespacesOp(limit=10)] + results = store.batch(ops) + + assert len(results) == 1 + assert results[0] == [] # Always returns empty list + + def test_batch_list_namespaces_op_with_conditions(self, store): + """Test ListNamespacesOp with match conditions.""" + ops = [ + ListNamespacesOp( + match_conditions=( + MatchCondition(match_type="prefix", path=("test_actor",)), + ), + limit=5, + ) + ] + results = store.batch(ops) + + assert len(results) == 1 + assert results[0] == [] # Always returns empty list + + def test_batch_mixed_operations( + self, store, mock_boto_client, sample_memory_record, sample_item_data + ): + """Test batch with mixed operation types.""" + mock_boto_client.get_memory_record.return_value = { + "memoryRecord": sample_memory_record + } + mock_boto_client.retrieve_memory_records.return_value = { + "memoryRecordSummaries": [sample_memory_record] + } + mock_boto_client.create_event.return_value = {"event": {"eventId": "event-123"}} + + ops = [ + GetOp(namespace=("test_actor", "test_session"), key="test-key"), + PutOp( + namespace=("test_actor", "test_session"), + key="new-key", + value=sample_item_data, + ), + SearchOp(namespace_prefix=("test_actor",), query="test query", limit=5), + ListNamespacesOp(limit=10), + ] + results = store.batch(ops) + + assert len(results) == 4 + assert isinstance(results[0], Item) # GetOp result + assert results[1] is None # PutOp result + assert isinstance(results[2], list) # SearchOp result + assert results[3] == [] # ListNamespacesOp result + + def test_batch_unknown_operation(self, store): + """Test batch with unknown operation type.""" + + class UnknownOp: + pass + + ops = [UnknownOp()] + + with pytest.raises(ValueError, match="Unknown operation type"): + store.batch(ops) + + def test_abatch_not_implemented(self, store): + """Test that abatch raises NotImplementedError.""" + ops = [SearchOp(namespace_prefix=("test_actor",), query="test", limit=5)] + + # Use asyncio to test async method + import asyncio + + async def test_async(): + with pytest.raises(NotImplementedError): + await store.abatch(ops) + + asyncio.run(test_async()) + + def test_convert_memory_record_to_item(self, store, sample_memory_record): + """Test conversion of memory record to Item.""" + namespace = ("test_actor", "test_session") + + item = store._convert_memory_record_to_item(sample_memory_record, namespace) + + assert isinstance(item, Item) + assert item.namespace == namespace + assert item.key == sample_memory_record["memoryRecordId"] + assert item.value["content"] == "Hello, world!" + assert item.value["memory_strategy_id"] == "strategy-123" + assert isinstance(item.created_at, datetime) + assert isinstance(item.updated_at, datetime) + + def test_convert_memory_records_to_search_items(self, store, sample_memory_record): + """Test conversion of memory records to SearchItem objects.""" + namespace = ("test_actor",) + + search_items = store._convert_memory_records_to_search_items( + [sample_memory_record], namespace + ) + + assert len(search_items) == 1 + search_item = search_items[0] + assert isinstance(search_item, SearchItem) + assert search_item.namespace == namespace + assert search_item.key == sample_memory_record["memoryRecordId"] + assert search_item.value["content"] == "Hello, world!" + assert search_item.score == 0.95 + assert isinstance(search_item.created_at, datetime) + assert isinstance(search_item.updated_at, datetime) + + def test_convert_namespace_to_string(self, store): + """Test namespace tuple to string conversion.""" + # Test empty namespace + assert store._convert_namespace_to_string(()) == "/" + + # Test single element + assert store._convert_namespace_to_string(("actor",)) == "/actor" + + # Test multiple elements + assert ( + store._convert_namespace_to_string(("actor", "session")) == "/actor/session" + ) + + def test_error_handling_preserves_exceptions(self, store, mock_boto_client): + """Test that boto3 exceptions are properly preserved.""" + mock_boto_client.create_event.side_effect = ClientError( + error_response={ + "Error": {"Code": "ThrottlingException", "Message": "Rate exceeded"} + }, + operation_name="CreateEvent", + ) + + ops = [ + PutOp( + namespace=("test_actor", "test_session"), + key="test-key", + value={"message": HumanMessage(content="test")}, + ) + ] + + with pytest.raises(ClientError) as exc_info: + store.batch(ops) + + assert exc_info.value.response["Error"]["Code"] == "ThrottlingException" + + def test_memory_record_id_format_validation(self, store): + """Test that memory record IDs follow the required format.""" + # This is more of a documentation test to ensure we understand the format + test_id = "mem-test-record-12345678901234567890123456789012345" + assert test_id.startswith("mem-") + assert len(test_id) >= 40 # Minimum length requirement + assert all( + c.isalnum() or c in "-_" for c in test_id[4:] + ) # Valid characters after prefix + + def test_namespace_handling_edge_cases( + self, store, mock_boto_client, sample_memory_record + ): + """Test namespace handling with various edge cases.""" + # Test with single element namespace + ops = [SearchOp(namespace_prefix=("single_actor",), query="test", limit=5)] + + mock_boto_client.retrieve_memory_records.return_value = { + "memoryRecordSummaries": [sample_memory_record] + } + + store.batch(ops) + + call_args = mock_boto_client.retrieve_memory_records.call_args[1] + assert call_args["namespace"] == "/single_actor" + + def test_search_with_pagination_parameters( + self, store, mock_boto_client, sample_memory_record + ): + """Test SearchOp with offset parameter.""" + mock_boto_client.retrieve_memory_records.return_value = { + "memoryRecordSummaries": [sample_memory_record] + } + + ops = [ + SearchOp( + namespace_prefix=("test_actor",), + query="test query", + limit=10, + offset=5, # This should be ignored in current implementation + ) + ] + results = store.batch(ops) + + # Offset is currently ignored, but operation should still work + assert len(results) == 1 + assert len(results[0]) == 1 + + def test_put_op_invalid_namespace(self, store, mock_boto_client): + """Test PutOp with invalid namespace length.""" + ops = [ + PutOp( + namespace=("single_element",), # Should be (actor_id, session_id) + key="test-key", + value={"message": HumanMessage(content="test")}, + ) + ] + + with pytest.raises( + ValueError, match="Namespace must be a tuple of \\(actor_id, session_id\\)" + ): + store.batch(ops) From 302a4ffe0b506de0515bb8938b5eed6c302119f8 Mon Sep 17 00:00:00 2001 From: Jack Gordley Date: Wed, 24 Sep 2025 18:00:52 -0700 Subject: [PATCH 3/4] Adding new long term memory demo notebook --- .../stores/bedrock_agentcore_memory_store.py | 524 ----------- ...ntcore_memory_store_long_term_search.ipynb | 535 +++++++++++ ...rock_agentcore_memory_store_tutorial.ipynb | 875 ------------------ 3 files changed, 535 insertions(+), 1399 deletions(-) delete mode 100644 libs/langgraph-checkpoint-aws/langgraph_checkpoint_aws/stores/bedrock_agentcore_memory_store.py create mode 100644 samples/memory/agentcore_memory_store_long_term_search.ipynb delete mode 100644 samples/memory/bedrock_agentcore_memory_store_tutorial.ipynb diff --git a/libs/langgraph-checkpoint-aws/langgraph_checkpoint_aws/stores/bedrock_agentcore_memory_store.py b/libs/langgraph-checkpoint-aws/langgraph_checkpoint_aws/stores/bedrock_agentcore_memory_store.py deleted file mode 100644 index f99b6634..00000000 --- a/libs/langgraph-checkpoint-aws/langgraph_checkpoint_aws/stores/bedrock_agentcore_memory_store.py +++ /dev/null @@ -1,524 +0,0 @@ -import logging -from collections.abc import Iterable -from datetime import datetime -from typing import ( - Any, - Dict, - List, - Literal, - NamedTuple, - Union, -) - -from bedrock_agentcore.memory.client import MemoryClient -from bedrock_agentcore.memory.constants import MessageRole -from langchain_core.messages import ( - BaseMessage, -) -from langgraph.store.base import ( - BaseStore, - Item, - SearchItem, - TTLConfig, -) - -logger = logging.getLogger(__name__) - -class AgentCoreRetrieveOp(NamedTuple): - namespace: tuple[str, ...] # (actor_id, session_id) - query: str - top_k: int = 10 - memory_strategy_id: str | None = None - -class AgentCoreListOp(NamedTuple): - namespace: tuple[str, ...] # (actor_id, session_id) - max_results: int = 100 - -class AgentCoreStoreOp(NamedTuple): - """Operation to store a message event.""" - namespace: tuple[str, ...] # (actor_id, session_id) - key: str # event identifier - message: BaseMessage - event_timestamp: datetime | None = None - -class AgentCoreDeleteOp(NamedTuple): - """Operation to delete a memory record.""" - memory_record_id: str - -class AgentCoreGetOp(NamedTuple): - """Operation to get a memory record.""" - memory_record_id: str - -AgentCoreOp = Union[AgentCoreRetrieveOp, AgentCoreListOp, AgentCoreStoreOp, AgentCoreDeleteOp, AgentCoreGetOp] -AgentCoreResult = Union[Item, list[Item], list[SearchItem], list[tuple[str, ...]], None] - -# Define missing constants and types -class NotProvided: - pass - -NOT_PROVIDED = NotProvided() -NamespacePath = tuple[str, ...] - -class BedrockAgentCoreMemoryStore(BaseStore): - """Bedrock AgentCore Memory Store support for storage of chat messages and retrieval of long term memories - - !!! example "Examples" - Storing conversation messages: - memory_client = MemoryClient(region="us-west-2") - store = BedrockAgentCoreMemoryStore(memory_client) - - Stores enable persistence and memory that can be shared across threads, - scoped to user IDs, assistant IDs, or other arbitrary namespaces. - - Note: - This implementation depends on Amazon Bedrock AgentCore Memory to store and process - messages then later retrieve the processed memories through semantic search. An example - would be saving a conversation and then processing async user preferences for later - search in a user preferences namespace. - """ - - supports_ttl: bool = False - ttl_config: TTLConfig | None = None - - __slots__ = ("memory_client", "memory_id") - - def __init__(self, *, memory_id: str, memory_client: MemoryClient) -> None: - - # Bedrock AgentCore Memory Client - self.memory_client: MemoryClient = memory_client - self.memory_id = memory_id - - def batch(self, ops: Iterable[AgentCoreOp]) -> list[AgentCoreResult]: - """Execute a batch of AgentCore operations synchronously.""" - results = [] - - for op in ops: - if isinstance(op, AgentCoreRetrieveOp): - result = self._retrieve_memories(op) - results.append(result) - elif isinstance(op, AgentCoreListOp): - result = self._list_memory_records(op) - results.append(result) - elif isinstance(op, AgentCoreGetOp): - result = self._get_memory_record(op) - results.append(result) - elif isinstance(op, AgentCoreStoreOp): - self._store_message(op) - results.append(None) - elif isinstance(op, AgentCoreDeleteOp): - self._delete_memory_record(op) - results.append(None) - else: - raise ValueError(f"Unknown AgentCore operation type: {type(op)}") - - return results - - async def abatch(self, ops: Iterable[AgentCoreOp]) -> list[AgentCoreResult]: - """Execute a batch of AgentCore operations asynchronously.""" - raise NotImplementedError("The Bedrock AgentCore Memory client does not yet support async operations") - - def get( - self, - namespace: tuple[str, ...], - key: str, - *, - refresh_ttl: bool | None = None, - ) -> Item | None: - """Retrieve a single memory item. - - Args: - namespace: (actor_id, session_id) indicating where the memory is stored - key: Unique identifier for the memory event - refresh_ttl: Not applicable for Bedrock AgentCore Memory - - Returns: - Item with the individual record information retrieved - """ - op = AgentCoreGetOp(memory_record_id=key) - result = self.batch([op])[0] - return result - - - - def search( - self, - namespace_prefix: tuple[str, ...], - /, - *, - query: str | None = None, - filter: dict[str, Any] | None = None, - limit: int = 10, - offset: int = 0, - refresh_ttl: bool | None = None, - # AgentCore-specific parameters - memory_strategy_id: str | None = None, - top_k: int | None = None, - ) -> list[SearchItem]: - """Search for items within a namespace prefix. - - Args: - namespace_prefix: the namespace tuple of which to search (actor_id, session_id) - query: the query to search for in Bedrock AgentCore memory - filter: Not supported by Bedrock AgentCore Memory (will be ignored) - limit: Maximum number of items to return (used as top_k if not specified) - offset: Not supported by Bedrock AgentCore Memory (will be ignored) - refresh_ttl: Not applicable for Bedrock AgentCore Memory - memory_strategy_id (optional): strategy ID to search for - top_k (optional): the maximum number of top-scoring memory records to return - - Returns: - List of items matching the search criteria. - - ???+ example "Examples" - Basic listing of long term memories (no semantic search): - ```python - # List memory records in a namespace - results = store.search(("user-1", "session-1")) - ``` - - Basic semantic searching for long term memories: - ```python - # Search for user preferences for a certain query - results = store.search( - ("user-1", "session-1"), - query="favorite coffeeshops and past orders" - ) - ``` - """ - - if query: - # Use semantic search - op = AgentCoreRetrieveOp( - namespace=namespace_prefix, - query=query, - top_k=top_k or limit, - memory_strategy_id=memory_strategy_id - ) - else: - # Use list operation - op = AgentCoreListOp( - namespace=namespace_prefix, - max_results=limit - ) - - return self.batch([op])[0] or [] - - def put( - self, - namespace: tuple[str, ...], - key: str, - value: dict[str, Any], - index: Literal[False] | list[str] | None = None, - *, - ttl: float | None | NotProvided = NOT_PROVIDED, - ) -> None: - """Store or update a message event in Bedrock AgentCore Memory - - Args: - namespace: a tuple with actor id and session id as the arguments - Example: ("actorId", "sessionId") - key: the event identifier for the memory - value: The message data containing a "message" key with a BaseMessage object - index: Not supported - indexing is handled automatically by Bedrock AgentCore - ttl: Not supported - TTL is handled by Bedrock AgentCore service - - Note: - Async processing of messages in Bedrock AgentCore such as summarization or user - preference abstraction happens automatically in the service. Each message that - is saved here is then processed later. - - ???+ example "Examples" - Store a message. - ```python - from langchain_core.messages import HumanMessage - store.put(("user-1","session-1"), "123", {"message": HumanMessage("My favorite pirate is Blackbeard")}) - ``` - """ - self._validate_namespace(namespace) - - if index is not None and index is not False: - raise NotImplementedError("Custom indexing is handled by the Bedrock AgentCore service itself.") - - if not isinstance(ttl, NotProvided) and ttl is not None: - raise NotImplementedError("TTL is handled by the Bedrock AgentCore service itself.") - - message = value.get("message") - if message is None: - raise ValueError("Value must contain a 'message' key with a BaseMessage object") - - if not isinstance(message, BaseMessage): - raise ValueError("The 'message' value must be a BaseMessage instance") - - op = AgentCoreStoreOp( - namespace=namespace, - key=str(key), - message=message, - event_timestamp=None - ) - - self.batch([op]) - - def delete(self, namespace: tuple[str, ...], key: str) -> None: - """Delete an item. - - Args: - namespace: tuple with (actor_id, session_id) - key: the event_id of the memory to delete - - """ - op = AgentCoreDeleteOp(memory_record_id=key) - self.batch([op]) - - def list_namespaces( - self, - *, - prefix: NamespacePath | None = None, - suffix: NamespacePath | None = None, - max_depth: int | None = None, - limit: int = 100, - offset: int = 0, - ) -> list[tuple[str, ...]]: - """List and filter namespaces in the store""" - raise NotImplementedError("Listing namespaces is not yet implemented for Bedrock AgentCore APIs") - - async def aget( - self, - namespace: tuple[str, ...], - key: str, - *, - refresh_ttl: bool | None = None, - ) -> Item | None: - """Asynchronously retrieve a single memory item. - - Args: - namespace: (actor_id, session_id) indicating where the memory is stored - key: Unique identifier for the memory event - refresh_ttl: Not applicable for Bedrock AgentCore Memory - - Returns: - None - Individual memory retrieval by key is not supported by Bedrock AgentCore. - Use search() with a specific query instead. - - Note: - Bedrock AgentCore Memory is designed for semantic search rather than direct - key-based retrieval. Use the search() method with specific queries to find - relevant memories. - """ - raise NotImplementedError("The Bedrock AgentCore Memory client does not yet support async operations") - - - async def asearch( - self, - namespace_prefix: tuple[str, ...], - /, - *, - query: str | None = None, - filter: dict[str, Any] | None = None, - limit: int = 10, - offset: int = 0, - refresh_ttl: bool | None = None, - # AgentCore-specific parameters - memory_strategy_id: str | None = None, - top_k: int | None = None, - ) -> list[SearchItem]: - """Asynchronously search for memories within a namespace prefix using Bedrock AgentCore.""" - raise NotImplementedError("The Bedrock AgentCore Memory client does not yet support async operations") - - async def aput( - self, - namespace: tuple[str, ...], - key: str, - value: dict[str, Any], - index: Literal[False] | list[str] | None = None, - *, - ttl: float | None | NotProvided = NOT_PROVIDED, - ) -> None: - """Asynchronously store a message event in Bedrock AgentCore Memory.""" - raise NotImplementedError("The Bedrock AgentCore Memory client does not yet support async operations") - - async def adelete(self, namespace: tuple[str, ...], key: str) -> None: - """Asynchronously delete a memory event.""" - raise NotImplementedError("The Bedrock AgentCore Memory client does not yet support async operations") - - async def alist_namespaces( - self, - *, - prefix: NamespacePath | None = None, - suffix: NamespacePath | None = None, - max_depth: int | None = None, - limit: int = 100, - offset: int = 0, - ) -> list[tuple[str, ...]]: - """List and filter namespaces in the store asynchronously.""" - raise NotImplementedError("Listing namespaces is not yet implemented for Bedrock AgentCore APIs") - - def _retrieve_memories(self, op: AgentCoreRetrieveOp) -> list[SearchItem]: - """Retrieve memories using semantic search.""" - namespace_str = self._convert_namespace_tuple_to_str(op.namespace) - - try: - retrieve_params = { - "memory_id": self.memory_id, - "namespace": namespace_str, - "query": op.query, - "top_k": op.top_k - } - - # Add memory_strategy_id if provided - if op.memory_strategy_id is not None: - retrieve_params["memory_strategy_id"] = op.memory_strategy_id - - memories = self.memory_client.retrieve_memories(**retrieve_params) - return self._convert_memories_to_search_items(memories, op.namespace) - - except Exception as e: - logger.error(f"Failed to retrieve memories: {e}") - return [] - - def _list_memory_records(self, op: AgentCoreListOp) -> list[SearchItem]: - """List memory records in a namespace.""" - namespace_str = self._convert_namespace_tuple_to_str(op.namespace) - - response = self.memory_client.list_memory_records( - memoryId=self.memory_id, - namespace=namespace_str, - maxResults=op.max_results - ) - memories = response.get("memoryRecordSummaries", []) - return self._convert_memories_to_search_items(memories, op.namespace) - - - def _get_memory_record(self, op: AgentCoreGetOp) -> Item | None: - """Get a specific long term memory record by ID.""" - - response = self.memory_client.get_memory_record( - memoryId=self.memory_id, - memoryRecordId=op.memory_record_id, - ) - - record = response.get('memoryRecord') - if not record: - return None - - text = record.get('content', {}).get('text', '') - namespaces = record.get('namespaces', []) - - # Parse namespace - take first one and split by '/' - namespace_tuple = tuple(namespaces[0].split('/')) if namespaces else ('', '') - created_at = record.get('createdAt') - - return Item( - key=op.memory_record_id, - namespace=namespace_tuple, - value={"content": text}, - created_at=created_at, - updated_at=created_at - ) - - def _delete_memory_record(self, op: AgentCoreDeleteOp) -> None: - """Get a specific long term memory record by ID.""" - self.memory_client.delete_memory_record( - memoryId=self.memory_id, - memoryRecordId=op.memory_record_id, - ) - - def _store_message(self, op: AgentCoreStoreOp) -> None: - """Store a message event.""" - messages_to_store = convert_langchain_messages_to_event_messages([op.message]) - if not messages_to_store: - logger.warning(f"No valid messages to store for key {op.key}") - return - - self.memory_client.create_event( - memory_id=self.memory_id, - actor_id=op.namespace[0], - session_id=op.namespace[1], - messages=messages_to_store, - event_timestamp=op.event_timestamp - ) - logger.debug(f"Stored message event with key {op.key}") - - def _validate_namespace(self, namespace: tuple[str, ...]) -> None: - """Validate namespace format for Bedrock AgentCore.""" - if not isinstance(namespace, tuple) or len(namespace) != 2: - raise ValueError("Namespace must be a tuple of (actor_id, session_id)") - if not all(isinstance(part, str) and part.strip() for part in namespace): - raise ValueError("Namespace parts must be non-empty strings") - - def _convert_namespace_tuple_to_str(self, namespace_tuple): - return "/" + "/".join(namespace_tuple) - - def _convert_memories_to_search_items(self, memories: list, namespace: tuple[str, ...]) -> list[SearchItem]: - """Convert AgentCore memory records to SearchItem objects.""" - results = [] - - for item in memories: - if isinstance(item, dict): - content = item.get("content", {}) - if isinstance(content, dict): - text = content.get("text", "") - else: - text = str(content) - - score = item.get("score", 0.0) - record_id = item.get("memoryRecordId") or item.get("id") or str(len(results)) - - # Handle datetime parsing - created_at = item.get("createdAt") or item.get("timestamp") - if isinstance(created_at, str): - try: - created_at = datetime.fromisoformat(created_at.replace('Z', '+00:00')) - except (ValueError, AttributeError): - created_at = datetime.now() - elif created_at is None: - created_at = datetime.now() - - result = SearchItem( - namespace=namespace, - key=record_id, - value={"content": text, "metadata": item.get("metadata", {})}, - created_at=created_at, - updated_at=created_at, # memories are not updated - score=float(score) if score is not None else None, - ) - results.append(result) - - return results - -def convert_langchain_messages_to_event_messages( - messages: List[BaseMessage] -) -> List[Dict[str, Any]]: - """Convert LangChain messages to Bedrock Agent Core events - - Args: - messages: List of Langchain messages (BaseMessage) - - Returns: - List of AgentCore event tuples (text, role) - """ - converted_messages = [] - for msg in messages: - # Skip if event already saved - if msg.additional_kwargs.get("event_id") is not None: - continue - - text = msg.text() - if not text.strip(): - continue - - # Map LangChain roles to Bedrock Agent Core roles - if msg.type == "human": - role = MessageRole.USER.value - elif msg.type == "ai": - role = MessageRole.ASSISTANT.value - elif msg.type == "tool": - role = MessageRole.TOOL.value - elif msg.type == "system": - role = MessageRole.OTHER.value - else: - logger.warning(f"Skipping unsupported message type: {msg.type}") - continue - - converted_messages.append((text, role)) - - return converted_messages diff --git a/samples/memory/agentcore_memory_store_long_term_search.ipynb b/samples/memory/agentcore_memory_store_long_term_search.ipynb new file mode 100644 index 00000000..b215a1d2 --- /dev/null +++ b/samples/memory/agentcore_memory_store_long_term_search.ipynb @@ -0,0 +1,535 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Bedrock AgentCore Memory Store Walkthrough - Long Term Memory\n", + "\n", + "This sample notebook walks through setup and usage of the Bedrock AgentCore Memory Store with LangGraph. This approach enables saving of conversations to the AgentCore memory API to be later extracted and retrieved, enabling long term memory.\n", + "\n", + "### Setup\n", + "For this notebook you will need:\n", + "1. An Amazon Web Services development account\n", + "2. Bedrock Model Access (i.e. Claude 3.7 Sonnet)\n", + "3. An AgentCore Memory Resource configured (see below section for details)\n", + "4. Two strategies enabled for the Agent Core Memory resource, `/facts/{actor_id}` semantic search and `/preferences/{actor_id}` user preference search\n", + "\n", + "### AgentCore Memory Resource\n", + "\n", + "Either in the AWS developer portal or using the boto3 library you must create an [AgentCore Memory Resource](https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/bedrock-agentcore-control/client/create_memory.html). For this notebook, only two strategies need to be enabled, user preferences and semantic memory. These strategies will automatically run once we save our conversational messages to AgentCore Memory and extract chunks of information that our agent can retrieve later. For more information on long term memory, see the docs here [AgentCore Long Term Memory](https://docs.aws.amazon.com/bedrock-agentcore/latest/devguide/long-term-memory.html).\n", + "\n", + "Once you have the Memory enabled and in a `ACTIVE` state, take note of the `memoryId` and strategy namespaces, we will need them later." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "%pip install langchain langchain-aws" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Import LangGraph and LangChain components\n", + "from langchain.chat_models import init_chat_model\n", + "from langgraph.prebuilt import create_react_agent\n", + "from langchain_core.messages import HumanMessage, AIMessage\n", + "from langchain_core.tools import tool\n", + "from langchain_core.runnables import RunnableConfig\n", + "from langgraph.store.base import BaseStore\n", + "import uuid\n", + "import logging\n", + "logging.getLogger().setLevel(logging.DEBUG)\n", + "\n", + "from langgraph.utils.config import get_store" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Import the AgentCoreMemoryStore that we will use as a store\n", + "from langgraph_checkpoint_aws import AgentCoreMemoryStore\n", + "\n", + "# For this example, we will just use an InMemorySaver to save context.\n", + "# In production, we highly recommend the AgentCoreMemorySaver as a checkpointer\n", + "# which works seamlessly alongside the memory store\n", + "from langgraph.checkpoint.memory import InMemorySaver" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## AgentCore Memory Configuration\n", + "- `REGION` corresponds to the AWS region that your resources are present in, these are passed to the `AgentCoreMemorySaver`.\n", + "- `MEMORY_ID` corresponds to your top level AgentCore Memory resource. Within this resource we will store checkpoints for multiple actors and sessions\n", + "- `MODEL_ID` this is the bedrock model that will power our LangGraph agent through Bedrock Converse.\n", + "\n", + "We will use the `MEMORY_ID` and any additional boto3 client keyword args (in our case, `REGION`) to instantiate our checkpointer." + ] + }, + { + "cell_type": "code", + "execution_count": 39, + "metadata": {}, + "outputs": [], + "source": [ + "REGION = \"us-west-2\"\n", + "MEMORY_ID = \"YOUR_MEMORY_ID\"\n", + "MODEL_ID = \"us.anthropic.claude-3-7-sonnet-20250219-v1:0\"\n", + "\n", + "# Initialize the store to enable long term memory saving and retrieval\n", + "store = AgentCoreMemoryStore(memory_id=MEMORY_ID, region_name=REGION)\n", + "\n", + "# Initialize Bedrock LLM\n", + "llm = init_chat_model(MODEL_ID, model_provider=\"bedrock_converse\", region_name=REGION)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Define our long term memory retrieval tools\n", + "\n", + "In LangChain, tools can be injected at runtime with a Store argument [InjectedStore](https://langchain-ai.github.io/langgraph/reference/agents/#langgraph.prebuilt.tool_node.InjectedStore) to ensure that we can dynamically use the store to access different namespaces. In our case, this means we can pass the `actor_id` to the tool through the `config` (`RunnableConfig`). This is filled in when the tool is called so the tool can only access that actor_id's memories. \n", + "\n", + "The tool will search the namespace we specify, in this case the `/facts/{actor_id}` namespace which is a semantic memory namespace we specified above (at the top of the notebook). As the memories are extracted over time, these will be available to the agent through this tool." + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "metadata": {}, + "outputs": [], + "source": [ + "@tool\n", + "def retrieve_user_facts_from_past_conversations(\n", + " query: str,\n", + " config: RunnableConfig,\n", + " limit: int = 3,\n", + ") -> str:\n", + " \"\"\"Retrieve facts about the user that might be helfpul in answering vague questions\"\"\"\n", + " \n", + " # Actor ID comes from the runtime config we specify when invoking the agent\n", + " actor_id = config[\"configurable\"][\"actor_id\"]\n", + "\n", + " # Namespace we defined, where semantic facts are extracted for the user across\n", + " # sessions. This is combined under the hood with `/` to match AgentCore namespaces\n", + " search_namespace = (\"facts\", actor_id)\n", + "\n", + " store = get_store()\n", + " \n", + " result = store.search(search_namespace, query=query, limit=limit)\n", + " return result\n", + "\n", + "tools = [retrieve_user_facts_from_past_conversations]" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Storing the messages that we want for long term memory extraction\n", + "\n", + "In Bedrock AgentCore, each conversational message that is added for an `actor_id` and `session_id` is processed in the service according to the strategies specified for that memory resource. In our case, since we defined two strategies, user preference and semantic extraction, both user preferences and facts from previous conversations will be stored as long term memories in the service that can be retrieved by our retrieval tool above.\n", + "\n", + "For this example, we will use the AgentCoreMemoryStore to save messages in the pre and post model hooks so that we capture both the HumanMessage objects and the AIMessage objects that occur during the conversation. The pre-model hook runs before the LLM invocation and the post-model hook runs after it to capture the response from the agent.\n", + "\n", + "The pre-model hook will also perform a semantic search using the user's latest message to add a few relevant results to the LLMs input so it can have context into the users past preferences. If needed, the LLM can also search itself using the semantic search tool.\n", + "\n", + "**Note**: LangChain message types are converted under the hood by the store to AgentCore Memory message types so that they can be properly extracted to long term memories." + ] + }, + { + "cell_type": "code", + "execution_count": 32, + "metadata": {}, + "outputs": [], + "source": [ + "def pre_model_hook(state, config: RunnableConfig, *, store: BaseStore):\n", + " \"\"\"Hook that runs pre-LLM invocation to save the latest human message\"\"\"\n", + " actor_id = config[\"configurable\"][\"actor_id\"]\n", + " thread_id = config[\"configurable\"][\"thread_id\"]\n", + " # Saving the message to the actor and session combination that we get at runtime\n", + " namespace = (actor_id, thread_id)\n", + " \n", + " messages = state.get(\"messages\", [])\n", + " # Save the last human message we see before LLM invocation\n", + " for msg in reversed(messages):\n", + " if isinstance(msg, HumanMessage):\n", + " store.put(namespace, str(uuid.uuid4()), {\"message\": msg})\n", + " break\n", + " # Retrieve user preferences based on the last message and append to state\n", + " user_preferences_namespace = (\"preferences\", actor_id)\n", + " preferences = store.search(user_preferences_namespace, query=msg.content, limit=5)\n", + " \n", + " # Construct another AI message to add context before the current message\n", + " if preferences:\n", + " context_items = [pref.value for pref in preferences]\n", + " context_message = AIMessage(\n", + " content=f\"[User Context: {', '.join(str(item) for item in context_items)}]\"\n", + " )\n", + " # Insert the context message before the last human message\n", + " return {\"messages\": messages[:-1] + [context_message, messages[-1]]}\n", + " \n", + " return {\"llm_input_messages\": messages}\n", + "\n", + "def post_model_hook(state, config: RunnableConfig, *, store: BaseStore):\n", + " \"\"\"Hook that runs post-LLM invocation to save the latest human message\"\"\"\n", + " actor_id = config[\"configurable\"][\"actor_id\"]\n", + " thread_id = config[\"configurable\"][\"thread_id\"]\n", + "\n", + " # Saving the message to the actor and session combination that we get at runtime\n", + " namespace = (actor_id, thread_id)\n", + " \n", + " messages = state.get(\"messages\", [])\n", + " # Save the LLMs response to AgentCore Memory\n", + " for msg in reversed(messages):\n", + " if isinstance(msg, AIMessage):\n", + " store.put(namespace, str(uuid.uuid4()), {\"message\": msg})\n", + " break\n", + " \n", + " return {\"messages\": messages}" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Build our LangGraph agent graph\n", + "\n", + "Our agent will be built with the `create_react_agent` builder. It just has a few simple nodes, mainly a chatbot node and a tool node. The tool node will contain just our long term memory retrieval tool and the pre and post model hooks are specified as arguments.\n", + "\n", + "**Note**: for custom agent implementations the Store and tools can be configured to run as needed for any workflow following this pattern. Pre/post model hooks can be used, the whole conversation could be saved at the end, etc." + ] + }, + { + "cell_type": "code", + "execution_count": 33, + "metadata": {}, + "outputs": [ + { + "data": { + "image/png": "iVBORw0KGgoAAAANSUhEUgAAAPUAAAG/CAIAAAAPZwaMAAAQAElEQVR4nOydB1wTZx/Hn7uEhL1FGbJUXCi4Wmutozhate7WbdVaV7W1rrp3Kw6s63XVqnXvPeu27oF74EAEVESGssm6958chABJzkAS7i7PVz7xcs/d5cnld8/9nv8zTkhRFMJgeIoQYTD8Besbw2ewvjF8Busbw2ewvjF8Busbw2ewvk1Idja6e+5DQkxmbpZCJlVIc1ShWIJCFEERyv8RvBKIUqhWkwULACxrrlG+FcDavDVqSCFBKSj1yrwtyUK75x2TpCgZoc5A/v6QhULHFFoTAgEhtiHLVbQJ/dzJ3k2AuAyB49+mYOei+JS3EmmuQmwjEIoIkbVAQFKSHJWOSAIpKJWuKZW+lQJFSgmqF5QvsFxU36RSikX0LbBCCjkqpm9697wD0gcnBJRCSr9RXifq9Uip7wINiGwEchklyaVyM2UK5WEplwri9oO87RxJxEGwvo3MpjmxqYm5tg7C6g2cGn3jijjO1aMpD6+mZXyQOjhb9Zvmj7gG1rfROL8n6d6l9y7lRD1/80W8Y3tEXGJ8TpVQx6++L4+4A9a3cdg2Py4tVdr1Z1/XCryt0sjlaN3UFyIbsu9kP8QRsL6NwIlNiQkx2X2486uXhp0LX1NI/t2oiogLYH2Xlk1/xEL1rO8kHnoSXWxf8CorQ9p/uj9iPZysFLOHPcteK+SURYkb6DbGGyrQW+bGIdaD9V1you9lJbzM6TvFImxJEbqN9kl/L718OBWxG6zvknN8U0Ld5i7IUmnZo0LkmWTEbrC+S8ipzYnQRNOwjeXqO7C2rY2dcPeSV4jFYH2XkCd30ms1slxx03zevlxCbA5iMVjfJeFpZBYlh1/XrPresWPHtGnTkOGMHz9+//79yARUrWcntCKuHGGvC8f6Lgm3zibbu1gh8/Lw4UNUIkq848fg4iF6eisNsRUc/y4Jaya/CKzl8GU3d2QCYmJiVq5cefPmTfhpateu3bdv39DQ0EGDBkVGRtIbbNq0qVq1atu3b//vv//u378vFovr1q37008/+fj4QOq4ceMEAoGnp+eGDRvmzZsHb+m97O3tz549i4zNrdMfrh5PHjI3ELESXH6XBEmOIrCWHTIBEokEpAwCXbp06YoVK4RC4a+//pqTk7N69erg4OC2bdveuHEDxH379u358+eHhIQsWLBgxowZKSkpkydPpo9gZWX1TMXChQvr1Klz8eJFWDllyhRTiBuo09RJLlMgtoL7f5cEKFn9a9ggE/Dy5UsQa48ePUDE8DY8PByKbZlMVmSzWrVqgR339fWFCwDeSqVSuAw+fPjg5OREEMTr1683btxobW0NSbm5ucikCBBJErGPc3yrWSP2gfVtMEmxEsJktz2QrIuLy/Tp09u0aVOvXj0ooevXr198Myjg4+PjIyIiwJ9kZmbSK+HCAH3DQkBAAC1uM0Gi9ym5voiN+sb+xGBkyuEBBDINYKb/+uuvxo0bb9my5YcffujYseORI0eKb3bu3LlRo0bVqFEDNr5+/fqyZcuKHASZGTliJ1jfBuNRQYQUJqyU+/v7jxw58tChQ2CgK1euPHXq1MePHxfZZu/evVDphDplUFAQGJL09HRUhigIBxcRYiVY3wZDqn7KN89N4msheHLgwAFYAIPRpEmTuXPngsN+9OhRkc3Aant4eKjfnj59GpUdcrkiIMgktZHSg/VdEkgBEXXLJEUmCHfmzJmLFi2Ki4uDuua6deugcgkuHJIqVqwIbhvcCPhsKLavXLkCsRRI3bx5M73vmzdvih8QvApcCeqNkbG5dymdALPG0uIb67tEOLgI455kIRMAUp44ceLRo0c7derUpUuXW7duQSw8MFAZXe7cuTNYEfAkT58+HTZsWKNGjcCCf/bZZwkJCRAiBC/+888/Hzt2rPgxBwwYAFfF6NGjs7OzkbF5cj3Nxp69Y+xx+05JuH32/cWDST9FVEYWz4pxz4PqOoZ1L4dYCS6/S0JoM2coFu7+x952afOQGCeRyyjWihvh+HeJ8Qq0uXI0qfYXjro2GDx4cFRUVPH1crkc7pl0u0xx9u3b5+zsjEwANHlCWEZrEmSJhEYaQnvQ8+TJk7py+++mBLcKZo9FGgL2JyVn2ahnX/X2rFxXe0P9u3fvoFlRaxK0KeoKUXt5eSGTAe2ayHB0ZSnzg2Lt9OgRf7LapOHyu+TUaeZ6cntC5bqVtKaWK8e6u7ZxL55Nc2MCgh0Qu8H+u+R83t7V1kG4YyEHhtkanSN/J1gJyXY/sH2uH6zvUtF3st+HZNmhNQnIkri0PzX2adaAmf6I9WD/bQQ2/vESCvIuI7yRBXBya2LMg6yBs/0RF8D6Ng5rp8UIRajvJH/Ea7YuiE9PkQz6g6WjGYqD9W00di15lfgyp0qIQ8u+Hoh3nNuVdO/ye7fy4h7juDEzGw3WtzF59TT3yD9vZBK5u5d1864e7j7mHqNpdNKS5ae3v331PEtoRTbuUK7mZ2wPmBQB69v4PLyScflIUnamTEASNg4CO0crWweBSEDlSAtOtdAKyTSD4wQiSeVM9ZoIhMrJ6hWKoithDVVsRJiARHJtw8RIIaGQafmJhSJSJtGyg5WIRBSR/l6anirNzpRTcmRjL6j9hXODVpycDAPr24REnvoQG5WZniqTShQKuUIqKUgSWCF5ocYfChoQi0pZoJJy4d8HLgNYQ6+kVEsEqXrggwBRxQYZQLKg2GF1ZCAPoRgJYR8S2TkJK1a1a/gVt+d4wfrmMOvXr8/IyBg+fDjC6AC3X3IYmUymq2cIhgafHQ6D9c0Ibr/kMFjfjOCzw2GkUqmVFedDkCYF65vD4PKbEXx2OAzWNyP47HAYrG9G8NnhMKBv7L/1g/XNYXD5zQg+OxwG65sRfHY4DNY3I/jscBisb0bw2eEw0L6D9a0ffHY4DC6/GcFnh8NgfTOCzw6HwfpmBJ8dDoP7VzGC9c1hcPnNCD47HAbrmxF8djgM1jcj+OxwGKxvRvDZ4TC4fskI1jeHweU3I/jscBhPT0+BgL3PLmMDePw8h0lMTJRIJAijG1x+cxgwJ6Z4ZCufwPrmMFjfjGB9cxisb0awvjkM1jcjWN8cBuubEaxvDoP1zQjWN4fB+mYE65vDYH0zgvXNYbC+GcH65jCgb7lcjjC6wfrmMLj8ZgTrm8NgfTOC9c1hsL4ZwfrmMFjfjGB9cxisb0awvjkM1jcj+PnF3KNly5bJyckEQdBv6V+wcuXKO3bsQJjC4PE73AP0Da9EPiRJWltbd+vWDWGKgfXNPXr16uXt7a25xsfHp3PnzghTDKxv7gHiDgsLU78VCATt2rVT2xWMJljfnKRv374VK1akl0HuXbt2RRhtYH1zEldX19atWyOVC4cFW1tbhNGGpcRPYh9lP72dmZVReDYF5T2dQqoTQJJIoaBXql41zgpBEpSCojenz5Z6DSAQEnKZKpVElCJvF/pohLL0KNhSY71ypeb29JHVa9QLmtvnH4FQKChYL5HKIiNvKeSyunXriUQi9cYFX0QDrUmaGSh0BvLfUlSBPOjPLboZUXCi1BsU/2iKICg5VfjjiEKnOB9rO6vAWnaVahntcrUIfa+b+jI3V24lIiU5RX55SqUs5VLB2S+mb+VNTp2UtzFFKfL8LilECjoGTcClkrcy72iEaqWiwBnT6wnlWScK/cT0svoI+Z+Yt73Gx+WvUb4qfzsCEgSaB1F/EQVBkZSWvQoyQ9B5KZQ3zayCXLWkqs8GUmZW1+6FvjKl/F66NtBEbENKcimhFdF/qr9AhEoP//W9avwL3+oOjTu6IwxHiDyZ+uja+wHTA0Q2qJTwXN9/TYqp/qlbSFMHhOEUb55Izux+NTg8AJUOPtcvLx5IhtszFjcX8QwSgVc5+nciKh187n8S+yTbzhl3sOEqTu6ipNfZqHTwufyWZMoYKzQY1kIKUHZOaX8/PhdvMogO4OGJnEUulyukWN8YjG6wvjEshUBG6FOD9Y1hKRQyQuwa6xvDZ7C+MXyGz/omSdwpmsMQBEmUOnzN5/i3QoEHl3IYilKUvvmCz+W3QKjsvIow3IQgjVB+81nfchm0geECnKsY5eaL65cYlkIao/KE9Y1hKQqFEfw3n+uXJIF4Hz85c/ZE87D679+n6t+sY+cWGzau0b/NocN74VBGnBBr2vRxo8cMRWUKn8tvBYVw/MTCwf4Ew1II1b9SgvVdiHbtm/bs0T8q6uH5/07b2dnVqlVn4oRZDvYO0dHPfvix+5zfFy1YONvZ2WXN6q2w8bHjBw8c3P3ixbOAgMpfNm/VpXMPxvYk8An9vh8cHx+7e89WOM5nDb8Y/tOYP8KnXLx4rmJFv949B7Rq1ZbeEtb8s2H1y9gXTk7OlStX/WXEb+XLV6CTVq5a/O+Jw7Y2tmFhX/n4+KkPDtbi77XLr1y9kJiYEBwc2qnDdw0bNkYGkpycNOv3iQ8e3PXx8e3erW/bNh0Z86MnSfOwQ4b1CQmpN3ni7I/LiGpig1LffvH8J4UQCIQ7d21u167z6ZPX54Uvi42NWbpsPqy3srKC1w2b1nT7rs/oUZNh+eSpY3PnzQiqUm3LpgMDf/hp1+4ty5ZHMB4fjrNt+z++vv7Hj16CvY4eO/DrqEFhX3514viV5s1azo+YlZ6RDpvduHl16vSxoPUd245MmxL+9u2bRUvC6SPsP7Br/4Gdv/z82/LlGzw9vTds/Et98CVL50E2OnXstmXzwaZNwqbNGHfu/ClkCEKhcMmyeX16D1wYsbJatZqLFsNHJ+jPj54kNdnZ2ePGD3dzdR83ZioyL1jfRalcKahB/YZQEteoUatD+65nz56QSqV0wQzrv+3aq3q1mrB85Mi+2rXrjPxlvIuLa906Dfp/P2Tfvh2pqSmMx69SuVr7b7qIRKJmTZXTZNasWRuUDcJq3qwVFMCxL1/AyrXrVjT54suuXXpCiQgbDBs66sqVC4+jHkLSnr3bmjZpAfJ1dHD8qvU38NH0YXNzc4//e6hnj35wcCdHpzZfd4DLRlP9HwNkoP03XT/9pFGd0Ppwn4G3jx7f158fPUk0crl8ytTRWZmZ4XOW0PO0fCyUEYIDvI6fkMoQiqHAHVa97O1VEcT9+nU8/TaoSnV6AUJX9x/caVD/M/WWdeo0gJV3791iPD4U3vQC+B949fevRL+1sVFOapOengav0dFPq6muIpqqQTXg9fHjB9Dk8epVnL9/oDopKCgvS0+ePJJIJJpZCg2pB7bqQ9oHZAghtevSC85OLvCam5OjJz/6k+jpbectmPk46sG8ucvAjyGDIHD7jl4UCqQw/BSJxdbqZWsb5QQcmZkZjo5OsCASi+n1oCTQPZhd+NPc92PK7yIenSSLFjEZGRlQGGtmg55+LQvKwMxMKA7pKyEvh9Y2+Xspjc2IX34ocrTUlGQnVeY/EriTFMmnnvzoSUKqBsg7dyPhJgAVGM1tPhJl+7wAlRJcvywKqFm9nJOtHL+tfHj6vgAAEABJREFU1pAaa2tr+CFbtWzbpEmY5novTx9UauDgyo/OKRg6nqmSC/hXKPIFAkFubo46KTs7i15wcy8Hr6NHTfL2rqh5NA+PCshk+dGTRL+1s7OfPnVuxJ+/h8+dFrFghUH9OSlo3yn16Fms76LcuXNTvfz0WRSUZ6CYd+/eFtmsUqUgqAuCT6XfQnH+5s0rD4/yqNTAJ1YNqg4RDPUaejmwUhXQR/nynsq33+YlQbSEXvDx9hWrbi/qLMHNBErQ0k+9qSc/epLot5UCq4SG1psxbd7gob03b1nXu9cAZF54Xb8sUf3kXVIihFDABkDw5NDhPc2btxLn2xJNfvxh+MWLZ48c3Q+2+9692zNnTRg1Zgj4FmQMIAZy4eLZ3bu3pqWn3bp9Y/mKhVCPrKKqGEBlFGKX0GwJy1u3/fPw4T16F9Ax1AihQgmZgWxA5GTMuGEQAEHGQE9+9CSpCQys/OPA4ev/WQXlBTIvvC6/S1Q/ade2ExRCy1f8CcvwU40YPlbrZrVqha5euRnKpFWrl8ANumaN2rNnLdR6JZQACLfBZbZ950aIOUIsuX69hqAPOql3rx+gNR6ilnBFQR4gXvH7H5PprnYQroa7ypZt6yMjr4ExgCyNHj0ZGQM9+dGTpMl33/a+du3SH3OmrPvbrA8J4vP8g39Pi7ESE51+8vv4XTp0CoNmmr59BiJMWXNiU3ziy9wh8yqhUoD9N4al4PZ51gHed+KkkbpSN23cB40gqOyYMGnk/Xu3tSa1adNx6JCRiE0YpX2e1+OLCYMLgP17DWvQLoLSlK/eoiu1bMUNjBk1WSLVXgO2tWHdE04IApff+iEUROlbCAzEs4IXYitublya5F/11AdUSvjdflno2TcYjkEgXH5j+AuFcPmtD8ICxqfxGKPM78NnfVN4fBqXwfP7YDAM8NyfYCwcnvsTjIWD/QmGz2B9Y/gMn/UttiaFYjyAmquIxFYi29JOpsXnn9/eSZibhT04V8lIk9nalrZ7BZ/1/UUHj6w04wyowZifD+8kNRuVtkcan/Xt5i308LHdueAlwnCNPYvi4PZbq7EDKh18Hr9Dc+lQyoPLaZ4Btr5V7eUKBj9HEIVOCKV6r+zlQ68jmHpEEJCu2iX/cAVBSj370oF61ZZFMqB3R4JA2rtIazkIvZ4kKUVekyBBFcyfo2v7vPwX//raslRkXcFX15p/HV+KJK1eP894/TzTO8jmqz5GGKzNf30D14+/f3DlQ06WXCYxtMGXUdFG2UW/+rUnUXp615UoC7op9lEfcXwq//ozTN8iZG0tDAx2bPatKzIGFqHvUpKTk/PXX39t3bp1+vTprVq1QqxhxIgRmZmZa9euRaYEvvL79+9dXFyEQqGnp2flypUDAwN9fHwaNWqEWA+Of+sD1APK3rVr16BBgy5duoTYxK1bt6KiokiSvHHjRv369ZHJ6NSp0/r165OTk2H5zZs3kZGRIHRXV1cHB4edO3cidoPDw9pJT09fuHBhmzZt3N3dL1y40LdvX8Qy/v7775SUlKSkJFOLrHv37lBs08ukCoVCASU6+8WNsL6LA7/c/Pnz27dvX6FChXPnzvXu3Ruxj8uXLz948IBehoX79+8jkwHO5KuvvlLPS4hUI8euXLmCuADWdwFwC547d27Xrl19fX3PnDnTs2dPxFbANcEdhl5OSEiAugEyJVCEw9WufisQCMCoIC6A9a3k3bt3c+bMAUFDzenkyZPdunVDLObEiRNPnz7VXHPv3r0nT54gk+Hs7AxFOMgalh0dHffu3QsVkgMHDiDWY+n6fvv27ezZs8FeV61a9fjx499++y1iPVDbg4qv5pq4uLjt27cjU9KnTx9vb2+Q+OnTp2Hh4MGDd+7cmTJlCmI3lhsffP369Zo1a65duzZw4MCOHTsi7tCyZUuo5MnlcghcQlUPWmdkMhkUsXB9IvNy9OjRxYsXr1ixIiAgALESS9R3fHw8+FeIr4GyoR6JMKUAAjhDhw6F+953332H2Idl6fvly5dQZoNb/fHHH9u2bYs4zt27dz08PDRrfmXFvHnzQOjwiliGpfjvFy9eTJo0adSoUdDqtm/fPh6IG9i8ebM6Sli2jBs3DiqgzZo1e/z4MWIT/C+/nz9/vnr16ujoaCizWdW6XnpA33Xq1KlRowZiBxkZGUOGDIGTzJ7mMD7rG0Jm4LMhtgDKDgsLQxizsGTJkmfPnsErYgH81DfcJUHZ0AYBym7evDniKTdv3vT393dzc0Ms49KlS6NHj4a4SmhoKCpT+KZv8KOgbKjrQANEkyZNEK8ZPHgwfM169eoh9iGVSiGu8umnn0IRg8oO/ugboiKg7A8fPsAJbdzY4OeucxGoV7Ru3drPzw+xlVWrVkVGRi5fvpxu+zQ/fNA3NKTBL52VlQXK5kSnZIsCTNSwYcPAjkNZjswOt/UN5w7i2XArBGWXyekrWy5fvhwcHOzgUNpBimbgp59+ql69+vDhw5F54aq+r1+/DsqGBWiDbNCgAbJIoMkwPDw8MDAQcYH169efOXMGKp2lf+Tsx8O98TtXr14Fn21lZcXaqpXZaNiwoZOTAc+WL1v69esHJRE0A82ePdtsVX8uld8QdYIy28bGBtxImQeeMCUGQoeenp5jxoxBpocb+r5w4QIo29HREdxI7dq1EUbF+fPnodZhrIcmm5Nt27bt27cPvIqLiwsyJWzXN/yE4EagCQPK7Jo1ayKMBtASDkJxdTXOVApm5vnz59CYP3bsWJN2mmCv/wafDUGlChUqTJw4EareCFOML774wpx1NeNSqVKlEydOTJo0CUIF8IpMA0vLbwhp/+9//wOjVrVqVYThNVCKQS35+++/RyaApf1jExMTwZNgcevn1KlTMllpZxAucyC+GR0djUwDS/UtFAp58MuZmhkzZkgkfJgglyRNpUOW+m+s748hLCxMc1oSTHGwvjnMtGnTEEYv2J9wGH74b5OC9c1heOO/TQf2JxwG+29GsL45DPbfjGB/wmGw/2YE65vDYP/NCPYnHAb7b0awvjkM9t+MYH/CYbD/ZgTrm8Ng/80IS/2JlZWVVCpFGL1g/80I9t8cBvtvRrA/4TDYfzPCrvJ75MiR586dI4i8p0Grp3+4efMmwhQD/PexY8ewRdEDu8rvRYsW+fn5kfkQKipWrBgXF4cwxcD+mxHW+ZPPP/9cc0goLDds2BAkjjDFAP8tEokQRjes03evXr38/f3Vb0HZ7HxwERvA/psR1unby8sLbrv0dLoKhaJOnTpcmV/P/OD4NyNsjJ/06NHD19cXqQpvTjxwtazA/puRUp2duKjs7AwoZOWF1kK1UEFRBIWovDAI/Kcxx0r+OwiSgM+mXwuSkDKVELZo+P3pzFPVq1QnM32irqdRhGovzZla6OOrQi1qv05Qyn/0kYj8qV0Kf7oSsVgcUJt7c5oVB8e/GSmhvg+sTHgTnQUCk0kVyJAJgmhV6kwuEGO1L6pWQ3J0cksihbTsULBSQ7+FtsxfX3x3oRUJF6Czq6jnBG5XW8F/N23aFBfheijJqTm67u2HZGmzb328grhaeZdkojM7E9ZOiRkwyx9xFhz/ZsRg/70j4tXrF7kdh1fkrrgBkR1q3a+CZ6DDmkkvEGfB/psRw/QtyUBJCTnfjfZFvKBxZzeCRKe3JSFuguPfjBim7/8OJItteFVguJa3fvU0C3ETHP9mxDB9p6flEgZVJ1kPKUa5Eq52xMXxb0YMK4xluQopBEx4hFyikHFWIdh/M4LPDsXdGxKOfzPC0v7fZkPZQZGz5wD7b0YsXd/Q3ERx1nBh/82IYf4kr/WbRyjdCYE4CvbfjBh2dgSqAQeIRyi/DWe/EPbfjBjmTxQKxOnn1WuFu/ck7L8ZsXj/zeXyG/tvRizdvVEKirv1S+y/GbH0s8PpCgX234wY5k8IkuJX9ZIOCXH1K2H/zYhh+qYUBO+qlxTibAMm9t+MWLz/5nL7PPbfjPAqfjJj5vgjR/cbtIvSfXPZf+P+3/rhlb6joh4iA1GG83H8m78YeHcjKUMLuxcvnh84uCvy1vWEhNf+foFt2nTs0L4rnZSamjInfOqDh3d9K/p36PBtfHzsfxfO/LNuFyTBz/b32uVXrl5ITEwIDg7t1OG7hg0b00cbMLDb8v/9s2XLugsXz5Yr59G8WatBP44QCATNw+rDBvMXzFqx8s+D+89+ZPZIASkQcfUix+MvGTEwfkIZfDf/3/KI69cv//Lzb+FzloC4Fy+Ze+XqRTpp3oKZsXEx8+ctnz1r4dWrF+GPJPPys2TpvF27t3Tq2G3L5oNNm4RNmzHu3PlTSDUvOLxGLJwdFvbVv8cuT5owe8fOTWfOnoCVx44oDzt2zJSPFzegkCvkEq4GwLH/ZsTA+AmFDI2fTJkyZ/785XXrNKgTWh9K7qpB1a9dvwTrP3x4f+XKhe++7VOjerCbm/voUZOhgKd3yc3NPf7voZ49+rX/pouTo1ObrzuEffnVho1/qY/ZtEmLZk1bgNZDQup6eXo/efIIWSTYfzNi2NVPkMjg+DdF7dmz7eq1i3FxL+kVnp7e8Po8+im8BgeH0Cvt7e3r1v0EinNYBr1C2KtB/c/UxwgNqXf02IEPaR/ot0FB1dVJ9vYOGRnpqKSoJgJCHOXMmTNNmjSh57LjLlDFN91dyMD+sQrDym+FQjF+4i9SqeTHgcNDQ+s72DuM+OUHOik9PQ1e7ezs1Rs7OjrRC7Re1VuqSU1Jpk+E2saUHk7HB6H8Bv9ta2uLuAxU8U1XSzbwujGwrx1UBx8/frBg/vJ6dT+h14B2y7l7IOUkadbwKtVonkh9n0IvuLmXg9fRoyZ5exeaX8rDo0JKipHnciAIASHkagGO/TcjBvoTA5vn09KVjoIWNBATEw1/Af6VkHLuTD94fRHz3N9fOT1sRkZGZOS18uU9YdnH21csVs4PCJad3hEiLXCVQ0GVkoKMC0XJKRlXC3Dc/4QRA+uXBrZlV/TxgwJm+46NaelpsbExS5fNb1C/YcLbN5Dk7eXj5xfwz4bVr17Hg7gXLZ5D+3IAdNzv+8FQobx37zYYcYicjBk3bNHicP2fBZcEhAtv3Lhy6/YN/nVS1wqOfzNi2tCvu3u5SRNnP3x0r0PHLydO/nXgDz+1b9/10aP73/dXhsDHjZkKTrpP306/jhoEVcbgmiFWQit6x+7d+o4dM3XLtvXfdGgGIUUvT5/RoyczflyvngMg0D5l6uiP1zen65e4/wkjhro3g3sjQSAP/jTXnDl1g14Aez3n98Xly1eg306YNJL2JzRQ0sNfkaP5+Piqd6dZtXKTehnij+rGo48E9z/hN4b2jyWNOJ3CjJnjoeSGNkuIhW/c9PfNm1fbG6jO0qN8jJWAqwU4jn8zYmj/WMqI1nbatLmBlar8tWZZ957tLl48O21KePEC29QolNPzc7UAx/6bkbK8u0Hb5OyZEaiM4fB4Ddz/hBF8amTtoTIAABAASURBVDgcacH+m5EStM/zLPTG4e+D49+MGFhZVCBE8Wt+HxKRnO0Dj/03I6Zt3+EAXL5ccfybEYsffwkhITz/CX/BZ4fDYP/NiKHtO+BWeeW/4etwt30H+29GDG3fgQYRXjlw+Drcbd/B/psR7E84DPbfjOCzw2Gw/2bEMH9iJSZFnJ1NQSsisVBkw9Xxi9h/M2KYWB1cRQpePR4Q5WQqRGKu1i+x/2bEMH1/+Z17brYc8YgPiRK/qg6Im2D/zYjBZsOnsu22eTGIFxz/+zVpRXzRxRVxE9z/mxGD9d1+iGeVEPvdf768f7Hks46UObGPc/eviM9Il/af7oc4C/bfjJTk7tbsO/eTW6h7F5JunUmUyyitT6SHKLn2hiDlDG9atqcoonjPRK0rleuVswwV21hbV26tWxICUiAg3L3FXcdzWNwI9//+CEp4alr0LIdQObkEZX+QF/LjRH4PLCJfcZRGCi1AqtjGquf0UZrrERoxfMSUqZM9PMoXOiANqerJqPkpKP/I+Zvl/a/5iflJIpHAxgnxAOy/GSnV2RGIkH05UwXXktJinNyFTu7cnnzMpOD4NyPsDWaDs8SFk36w/2aEvfqWSqX0bMgYXeD4NyPsLSBx+c0I9t+MsFrfuPzWD/bfjLDXn8jlcpK7QyPNAvbfjLBUQApVNxesb/1g/80IS/0JNt8fA/bfjGB9cxjsvxlhqQHAwcGPAftvRliqb1x+fwzYfzOC/QmHwf6bEaxvDoP9NyPYf3MY7L8Zwf6bw2D/zQj2JxwG+29GsL45DPbfjLDXn2D/zQj234xg/81hsP9mBPsTDoP9NyNY3xwG+29G2Bv/xvpmBPtvRrD/5jDYfzPCXg15e3sjjF46d+7MgyiTWCx2c3NDpoGl5XfLli1fvnx55swZhNHGrVu34PWXX37hgb5zc3OTk5ORaWDvALD58+dPmjQJ33+Lc/ny5a1btyLlpF+8ehaSKWD1AMclS5b8/PPPCFOY9PT0efPmIcxHwGp9169fv3r16hs3bkQYFRMmTIDXVq1aIczHwfYB6mAxDx48GB0djSyecePGDRo0CGEMgQMTMCxevBhUjiwYujb5xx9/BAQEIIwhcEDfnp6e/fr1mzNnDrJI9u3bd+3aNVjADQIlgBsT6HTp0uXdu3fnz59HlodCoRg8eDDClAjOTBC1cOHCsWPHyuW8erqVHiAqDF8ZqRpxEKakcGkCNIsy4t27d//+++8RpnRwSd8NGzYMDAzcsmUL4jUPHjyA171795qu1dpy4NgElqNGjdq9ezc03SOesnTpUh5/O/PDvQla+e1SPDw82rRpgzBGgnv69vHx6dGjx/z58xGPSE5Opn1Xt27dEMZ4cHKCbRBBfHz8pUuXEC+AoFDPnj0hBoowxoarE8jzxqU8e/ZMIpEcP35cLBYjjLHh8AMSFi1apClxLtrW2bNnZ2Zm2tjYIIxp4LC+P//8c29v7x07dsByu3btEhISoFBH3CExMTE4ODgkJARhTAa3H3Azbty4zZs3N27cGMRNEASYcsQFYmNjz58/7+zs3LFjR4QxJdzWd6dOneLi4nJycmCZoihQOWI9Hz58GDlyJFyTIpEIYUwMh7ukhYWFgVbUz1iDhdTU1IyMDHt7e8RWwJNkZWXt2bMHYcwCh8tvFxcXgUCguQYCbVCcI7by22+/wU3G398fYcwFh/W9a9cuuNFDc4+6CIfCGwpIxEquXbvWqlWr8uXLI4wZ4bb/hobMffv2DR48mFY53PpjYmIQy4iKioKKL4RKwFAhjHlh8N9ndryLvpchyVHIZQo9m1EURC8oLesRomcwUDBcSbAzRZQwFWjSIqApClBmIP02Wnr7abGNCVVetBxZVS/VnnlVtglS+45KKEQQOlILJ8FXz0boVX4SIj72aET+5lSxjbUchFIdQhNSKBAKkJu3dZcRXsgi0afv0zuSo+9mBgY7VavnRBUyugWKoxfgj1To1iChOvVI+xHoDYrrpGADQnV96D64bgUqUwmFKp0ofEyNXWFNcXmrhK9SJqUjzzouu7xjastvXk6LZ1jXPtrkTajWFD8fFKH8V+QTSYHg1eP0x9ffr5v+sv90P2R56NT3jj9fZX2QdxvrjzBcpmpDB/i7vCd1zeSYgbP9kYWh3TUkx8qT3+R2+dUXYXjBZ51dhELyyN/vkIWhXd8XDyfaOuDR2rzCK9A24WUGsjC06zs7Uy604nZoBVMEB3craS6FLAzthXROtkyhQBg+IZXKpVKL+1GxCcHwGaxvDJ/Rrm+BkEByi/NqPIfQ3YbAX7TrWy6jsP/mGxSBLK/Iwv7EUlD2QcDlNw1+7gX/oCiEy+88CIIgcfibX1hmmaVd3woK+2++QVlkvECH/8axEwwvwPVLS0HZC5jA7fMYnkIhLR3EeY92fZMCZInBJF5DkIQF/qTaoyQKOaIUlujBz5w90Tys/vv3qfo3mz7jtzFjh+nf5tDhvXAomUyGjMS06eNGjxmKSoryB7W8n5SlUcAZM8cfObofYTClg6X6jop6iDBGhcD9Twow/ES0a9+0Z4/+oMvz/522s7OrVavOxAmzHOwd6NQNG9cc//dQUlKih0eF0JB6v46cQE9acuXqxe3bNzyOeuDq6h4cHDJo4Ag3N3e4rUPS/AWzVqz88+D+s3o+FIp5aIr6rOEX8yNmCQSCalVrTp82d9/+nf9sWO3o6NS6Vbshg38hVA0bsbExixaHP3n6SCAQ+vsH9vt+cJ3Q+vRBVq5a/O+Jw7Y2tmFhX/n4FBqEe+z4wQMHd7948SwgoPKXzVt16dyDMLCZJDk5adbvEx88uOvj49u9W9+2bfImHNSTHz1JmocdMqxPzRq14fsiA7A4g6K9/CYFBCEw7IeEH2Pnrs3t2nU+ffL6vPBl8CMtXZb3iIV161fu279j6OCRu3Ye/2HAsLPnTsCWsP7J08cTJv5Sp06D9Wt3/Txi3PPnT+bOmw7rjx25CK9jx0zRL26keuTp/Qd34G/n9qMrl2+EhV9+/VGhkB86cG7a1PAdOzddvao8VGpqyvAR/eHSWr1qy/+WrnNxdp01e2JWVhYk7T+wa/+Bnb/8/Nvy5Rs8Pb03bPxLffCTp47NnTcjqEq1LZsODPzhp127tyxbHoEMAbK3ZNm8Pr0HLoxYWa1aTVDt27cJ+vOjJ0lNdnb2uPHD3VzdoQT56LzQ7fMWV4DrqF/KKMrw/rGVKwU1qN8QSrgaNWp1aN/17NkTUqk0PSN967Z/4Ddu3LgZFOfNmrbo1LHbps1/Q9L9e7etra179xpQvnyFTz9pFDF/RY8e/ZCBSCSS4T+NcXJy9vMLCAyoDKV4/35DbG1tocxzdnZ5Hv0UtoHLSSQWjxk92cvTG8rRsWOmZmdngawhac/ebU2btGjaJMzRwfGr1t/UrdNAfeQjR/bVrl1n5C/jXVxcYX3/74fs27cD9PfxeYPKZftvusJXg8xAMQxvHz2+rz8/epJo5HL5lKmjszIzw+cswTN0MmJM/125clX1srdXRVDw69fxcXEvYaF69WB1UlBQ9YyMjFev4oJrhebk5EyYNBJ+1PhXcaDR4jdiRry9K1pZWdHLNra2/n6B6iQ7W7uMjHRYiH7xrEqVaurnW4N9qujj9+TJI4qiIBvgATTzRi8oFAq4GzSo/5k6Ce4zsPLuvVvIEEJq16UXnJ1c4DVXNdWtrvzoTyJUzFswE+zcvLnL4OpFvACKJNPN8G/M9h2x2Fq9bK3KcWZmRkpqsvKtRpKNjS1S3mSzQPRQCJ0/f2r1X0uXr/izXt1PoJADF44MgSzcEYzU1i8sJTkJLgPNNZC9rGwoBDOhOKTzk7feOu9Ew20BLsu/1y6HP80dDSq/kcZD4zWNu6786E+Cq/HO3Ui4CcBtUPNUfyQkW+uX8BOA40KmwZj6BjWrl3NUOQa52NkpZyvOzin4AllZmfAKFUp4hXs3/IGjuHnz6u49WydOGrln9wlkbGzt7HJyczTXZGdl+Xj7QukIhUeuRlJ2dp7TBeMEJqdVy7ZNmhSaNNDL0weZLD/6k5CyOLefPnVuxJ+/h8+dFrFghUGVXcoSwyc6/IlASJACg8/GnTs31ctPn0VB0QVFUaVKQaChBw/uqJMePboPJVC5ch63b9+8ek35DDR393KtW7f7adhoMOsJb98gY1M1qAZ8KJTH9Nu09LSXsS8CAiqBPsqX94TghnrLK1cvqJch55AfsEz0X3DNEKjSeXgYYQJYXfnRn6TMUmCV0NB6M6bNu3f/9uYt65AhQP3SArsQate3cnya4fXLd0mJ4KThdgPBk0OH9zRv3kosFkO9rWWLNps2r7106Tz8Wv/+e3jvvu1du/YCIwEGd/qMcQcP7YH2woeP7kNVD4Reobwn7AXqv3Hjyq3bN4zS/vfNN13g3hKx8HcIX8TERM8Jnwp+qc3XylBd82YtIaAJzZawDPXghw/vqff68YfhFy+ehWYmsN337t2eOWvCqDFDwLegUqMnP3qS1AQGVv5x4PD1/6yCABTC6MWY9ct2bTtBWdii1aff9+/q5xswYvhYej0UzJ83agph4C5dW23eug7C5D1VcZLvvu3dtk2nZf9b0KlLy19HDbK1tftz4WrasPbqOSDy1nUIFGgamxLj410RwoUQxu7es93IUYNgzeJFa8CcwELvXj9ATBpCmRB0v3zlv2FDRyGV04XXWrVCV6/cfPfuLcjemHHDQHazZy00ylP89ORHT5ImcOqgGWHmzPEIoxeC0nbT+mdWjEKBuo70Rx9Nh05h0PzRt89AhGElt8+l3jmXPDyiMmIZhw4dunnz5rRp05AJ0F6/pBAe4sA3KERZYPuOjvlPyLxZs8ucb9o305X022/TG3/eDJUdW7au37p1vdYkP//AZUvWItaBxzeoUCgMHn+5f+8pZAK2bDmoK8nGuowf+wt+DKqDWpPY2NUaj79kIeoeWixErAJhWAwen2Yx4PkhNLHAsXr8hrDE6qVufVvgWGveY4Ellq7xxZY4FpXfWGbMV0f8RF6S9nkMm1F2xsL+pAA8xyb/wO07GL5CWeQEsljfGD6jo33eiiSx/+YXApIUCC1u0mvtX9jWXkQgAcLwCHkOZSXC+lYRGGyXmWGEjvwY9hD3PMPJ3QpZGNr1HdrMQWQtOLMtEWF4gUSC0lKk3470RhaGzhtW/2l+KW+yj64x/mhIjJm5diRlx/znPcf7IctDX/yk33S/TX/EbZz9HOol0lx58Q1IgXKm2eIQJKK0da8lBIjStr3yUAShKDaSSBWC1z7CSPUphM5JbkkC6Z7/VteO9Ax9lO6OwVp3hL0gg1rzX5AdEmn2N6Z3KUjVcRqL7ktoie/pOaWASCxQyCihNdlnbKC9qyU2aDDEB3tPrIjk6OaZtKyMnOKpun5UnQIiSZ39yotIIG+H/J9E66cgkkLaj0YQpGq4inbBnfvvvwYN6tsWn1NGOX+OvomhKdW8/H0GAAAQAElEQVQX1pIR5Wzp2q/pN28SXr9+XbdBvUI7Fha4vtOCGAROCEhKrnNfK6GVfy27Cn6WO83VR8S/BaheC0eEHBEvSE5O/n3lX5MjuiAz4bFp092gT+QVKlRAGLNjcQEjsVi8dq1ZR4717t3bxcXlxo0bz549QxjzYnH6tre39/LyQuYFLqq6detOnjz57du3CGNGLE7fY8aMefLkCTI7JElu27YtLS0tNTUVYcyFZelboVCcP38+KCgIlRFVqlQRiUTgWCjLfNyq2bEsfYOqTp8+jcoUOzu7KVOmbN++HWFMj2XpWyAQgP9GZU3VqlW7d+8OC+BYEMaUWJa+J06cCP4EsYb4+PjDhw8jjMmwLH3fvn07ODgYsQao7Pr7+yPlnOhZCGMCLEvfR44ccXV1RWyiZs2a8Prjjz/i6LgpsCB9y2SynJwcxEo2b97877//IoyxsSB9R0REHDx4ELGVYcOUD/xes2YNwhgPC9J3XFxc/foGP5/NzHzyySeDBg1CGCNhQfpetmxZQEAAYje1a9eeO1f5SGJsx42Cpeg7Ozv7zRtujNVwcVE+2BJCPatWrUKY0mEp+t60adOBAwcQd+jatSt+PHHpsRR9Z2RkNGrUCHGK/v37w+uOHTugGQhhSoSl6PvXX3+tVasW4iAdOnQYPnx4bm4uwhiORegbzPe9e/cQNxGLxfv27YPI/fPnzxHGQCxC38eOHeOW+S6Ok5MTCH3gQPz4RcOwCH3Dzb1169aI4/j4+IBRiYyMxF7l47GI+TXpzqg8IDQ0lKKoV69eXbt2rXPnzgjDBP/Lb6lUeuqUSZ5dWCYQBAEF+ePHj+/cuYMwTPBf31euXLl+/TriFxMnTnR2duZHr1qoVzg4mOopkPzXt6ura9WqVRHv8PPzA2WEhYXJZDLEZSAu5Ohoqtl1+K/vmjVrdurUCfERgUCwZ88eiB4qDH3aNJt4+vRplSpVkGngv77fvHkDtTHEUyBuCC35EOC/cOEC4ibPnj2rXLkyMg381zecvq1btyJeY2dnt2vXrqioKMQ14MpMSUnx9jbVxM3817eXl9cnn3yC+M6iRYvS0tIgWIQ4hUnNCbIEfVeqVKlHjx7IAmjQoAFEDydMmIC4A9xdsb5LRVJS0sWLF5FlIBQKv/zyy6NHjyKOAPqGAgiZDP7rOz4+ft26dchiaNmyJfixnJwcTsQNsT8pLeXKlfv888+RJeHm5gah8caNG6enpyN2g/VdWqBuTg8UsCjAiEPD7ZkzZ+RyOWIrb9++tVeBTAb/9f3hw4ezZ88ii6R9+/bgUljbNxgKb9NFvmkson65cuVKZKmAUblz505kZCRiH6Y2J8gS9O3s7Ny0aVNkwUyZMgXiKiysbpq05ZKG//qGytbQoUORZVO7dm2SJAcPHozYBNa3EcjKysJT+yHVA1IGDRoEzfiIHVAUFR0dbdLgN7IEfUOMbPHixQiDUL169b7++muJRMKGqY7MYL6RJegbwk/Q5IEwKuzs7EQiERTkZf4kN1O3zNPwX9/wi44cORJhNDh48CBEVIr0GoeiHZkRU7fM01jE+MsjR44gTGFAzaBv9aNuIcQEDQXHjh1D5gL7E+MA+g4PD0eYYkDQMDc39/Lly61bt87MzITlvXv3InNhHn3zf34IaOBo06YNwmgDIqdQkCcnJyNVk35MTIx5ZAf3CojHu7u7IxPD//JbIBCMHz8eYbTRuXPnd+/eqd9CW+/+/fuR6TFDyzyNRcxfZZ7fjItABFrzLRTh58+fN8P8WOa5SyAL0fcff/zB5m50ZUi1atV8fX1tbW3VsRQows0wPMIMLZc0hCU8CH3OnDnjxo0Do4IwxYCiNCoq6vr16w8fPoS6eIj7UFdHH7GVjUymIKCVUbUNQUBzIyryFiAJpNBcr7GACq8kVKvUWoPLiSRIejv1XqoDEor8jdSfotqG0NhbidhaaOsgaNzW3a+WDdINn/UdEhKiqWlCdY579OgxZswYhCmGJJtaMzXawdnKK8BOaE1IpTINHStlR1KEgqCQhgrhlIJ+BIiUI1Xxn78DyJFSLhesJFULClCyxgHpzyUIkqLy7h6a6zWXVdcHhTSkKhBaJcVmJSdImnRyr9FQ5/RXfI6fBAYGxsXFaa7x9vbu06cPwhTjzXPJvpXx3UZWEplwsIHRUT6oaEv4i/in2a36eGjdgs/+G8KCJFnoC37xxRfly5dHmGIcWvuqRiMXTok7j57jA57dzUAS7al81nfv3r2h8qR+6+Xl1a1bN4QpRsxdiVxK1f3SBXETGwfh4fXau9PwWd82NjYQ31Vb8IYNG1asWBFhihH/PFMgJBBnEVuTaanaY5o8jw/27NnTx8cHFjw9PXHhrYtcCfzjcPw0N0eWk6s9TMKu+mVOJoq+m5H6TiKTUjJZwVRj+fEpQl2FJpU197waNUkSCgVF1+U1doH3UKunvq435o7VHahZxtywi72VqJAXnWoVPDqEqwiSoBT5NXpV4Erjs0hFfgVfQAhIIbJ3FvkF2br5WCEMu2GFvi/sT358Iy0nUw6KIgQqaRKEXKYhxLwgqkaktPiy5hqkGYMtF+QZBvGqR1czNEVcsCEd1Cq+u7bPUsapCOVBLqmuK5GNwCvAps0AXGdlKWWs730rXsc/ywI1i+2svKq5ulbkUgU+K1WSFPsh7mn2/0Y/cy4n7jWeq+aeFJCkgMP+W0AiXY04Zabvh1czzu9JhGx5Vy3nwilZq7F1Efm6lIMFhQTF3Hq9bNQz/xr27QZWQFwDPJtCzuFmPuWNXkf2y0bfu5e9ehOd7Vm1nJsvJ5VdBFKEAj/1goWo87F/TYr+8fdAhDEjlO6nV5RB/OTwurdJryTBLQP4IW5NqjbxFdlZrxgXjbgFSSAO2xN9mFvfG+fExkZlgQ4QT/GrU97exW7FWE5JXEEhLvdCUnbU0tGNyqz63rPsdXaGonpT3oqbpmKou1MF+1UTOCNxgiQ5XX4re36R2r+A+fT94FJ6wsucoMYW0YLoVcNNYCXcHvEKcQFKoeB0+Q3+W1cvWPPp+7997zyruCGLofJn3klvcmLuZyNM2WEmfR9ekwANfxyNA5YYRze7U9sTEcbE0GMntCaZSd8vozLLV7agwpumYmi53GzZk8gMxG4EQpLT/atA4Lp0bA59XzqUDC2Uzl62iJVkZKaOmfLp7XsnkQmwdhBfP56K2I1cppDLzG3Ad+/Z1qLVp8goEKoQpzbMoe+ntzOt7cXIIikX4PwhRYJ4x4sXz7v3bIfYAdQvdT2g3Bz6zkqTOXs5IovEoZwNVO2f38lC/CLqyUPEGiC8SeoQssnb5zNTIPpEuXibypykpScfPLooJu6uRJJTtUrDFk0HeJTzg/Vv3j6PWNbz58FrT5//5/6jc06OHqG1WrZp+RM93OHW3X+PnVqVnZ1Wo9oXTT/vhUwJuNund9IqhbDUnpWAw0f2LYiYDQvNw+oPG/rrt117ZWVlLVz0x+3bN9LT0/z9Ar/+ukPHDt/SG8fGxixaHP7k6SOBQOjvH9jv+8F1QusXOSBss279ytt3blIUVbNm7e7f9a1VKxR9NMrgoI42epOX3y+jMuiB66ZALpevXDvseUxkl2/Gjx6+xd7OdcnqAUnJ8ZAkFCg7Z+/cP6dO7dbh0y707Drj3MXNdx4oTfabt8+27Jpav06b8SN31w9tu/9wBDIlAqEgLYnVowcEApI0pH7Ztk3H7t36li9f4cypGyBuWDN+4s+vX8fPmhmxY9uRJk3CFi+Z++jxA1ifmpoyfER/D48Kq1dt+d/SdS7OrrNmT4SLQfNoEolk5KhBUO7MDV8aMX+FUCCcNPnXnJycj84OUo0E0K5kk+v7fbKUMNmHvIi9nZgU06PrjGpBnzk6uH3z1c92ts7/Xd6m3iCk5pchwWFCoVWlgLpuLt7xrx7DyktXdzs7VWjZ7AdbW8fKgfU+rd8RmRKSIHKyWf2oVblcoShF/fLK1Yv37t0eO3pK9Wo1nZyce/XsD6XvPxtWQ9LOXZtFYvGY0ZO9PL19fHzHjpmanZ21/8BOzd3j4l7CZdClc4+gKtUqVaoybWr4jBnzDXpaEKG7+4zJ9Z2bLdfTvauUxLy8IxBYVQnMu9/BjQJ0HB1zS72Bj1d19bK1tUN2jvJ5p0kpcRXKF3Txq+hdA5kUAsmkvJ1kBinrms+sra0DAgom8w6qUj0qSmnQo188q1KlmlCYZ4Pt7Owq+vg9efJIc3fQvbOzS/i86Zs2r71//w5JkmBgDHooJkWVXf9vO3th8SEzxiI7J0Mul0J0T3OlvV3BOHBC270jKyvN3a2gm4BIpG8CpNIDhaOY1wPZkpOTrK0LnUNbW1sop2EhJTnJ27tQjwxrG5us7EL+RCwWL/7zL/D0u3Zv+Xvtci8vn359B7VsacCUv3r8r8n17eYtVphM3w72bqDOAb0KGWiSZLgpgS2RSgvsXW5uJjIllFxh5yhC/AVK5ZycQt0QMrMy3d2UIz9sISm3kJPOzsry8S7awc7X13/okJH9+w2JjLx29NiBP8Kn+vkHgl1BHwmhmidOGyb3J5VqK+MGctOEgL09gySSbGfn8mCj6T8X5wrenlX17+Xi7Bkb/0A9o+TDqAvIlCjklKe/aW8RpUQ5Pk1YciVUDaoB1cGnz6LUax49uu+vsiuQBMtSad5Q8bT0tJexLzSdDFIFT0DTSGkgrRs1ajJ92lzwM0U8jH4oBT07ohbMEf8WConEaJO04VWp1KBalc927vs99X1CRub7i1d3LV7Z71rkQf17hdRsAW2W+w5HQDTqWfTNS1dN+8g8mVQR0swJsRjl+DSZYZUkMM1gSy5cOAu1w08+aQSmYuHC3x9HPUxJSQaPAZru9q1yHrxvvumSmZkRsfD3t28TYmKi54RPtRZbt/m6UIU+Le3DvPkzV6xcFP8qDo62ecs6qFwG1wxBxsAc+nYpL05LMpUHGNB7Ye2aYZt2TJ4e3vrClR11Q7764jOGeU6qVvm0XesRUU8vj53acNuemd27TFWtNomJevs0VSQW2Dvxberahp82rhUcOmXamFOnj0NxO3tmhKOj07Cfvu/Zu/3NyGuzZi6gA9g+3hUhHgIVUGjshCAgrFm8aA34Gc1DBQeHjPp14slTR/v07dS3X5d7924tjFgJkXJkDMwxf2zso9yDa+JrtvBHlseTC/Ee3lYdh3khFnNqe8Ljaxl9p5pjQm5TsGdpDEUR/ab4FU8yR/ntW10sshbE30tCloYCSbJlLBc3D1AG6ORlOn/VJ61dLx4Afet8ntCU31toDWIqFHKI8elqAYUGSHs7Z2Qk/t446kXsHa1JtjaOWdlpWpNmTTypK3tPL8e7eXIgckIQ5ijmTIeycqmj/6CZ9B3SxPHmqdQXN94G1Nc+1dPYEduQ4RhR3ED3LtPkGpPCaSKVSayE2pWqS9xZyRJJjrTHbC7c9AnE6fGXBGLB/D4DZvgtG/UsuCtpVgAAAwFJREFU673E1lmLUBwdTf6oOEbsbI0Z5Xhx+3XDNh6IC3B+/KXuJLPemDoOrhh9nRtDbkvJk4uvfCrb1vvSQnsFswez6tunqrjTUN8Hp14iXvPwdEylWjYdhngirqDsoMRhg0IQFFG24y/VeFcRdRzidf/ki+QXbB+VWDIenX7pFWAb1q0c4hDKDkocNiiEasphrUllUHH2rmzda1xAYkxS1H9xiEe8vJV4/8SLqvUdOg7jTsmtguvzx+qpH5fN/Jou5QVD51XatiD+3r/R1nYiz2ru9m7WiLOAsjNSssQ2guELOdlEwvn5Y3X3LSjL+b+7j/FBcrR5XmxM5BuIcVtZC63txfbONlb2IkKESB0d3CmCDnjmRYUojUtXAWFQKm8bIv/3ooo/bjT/CPnvlTsK6PA7pTFbfuF3ea9ww5MpcnNk2WmS3IwcSaZMJlXYOgibf1e+xidcnd2F0DdCgNuU9fMbBKjXBGVvyev/vn8SmZb5ISszJQuKE6X8NK5KTb3SfQrUdgs21QjtFzxBVyVHlWJVDy7J21hBkCQtYyIvNW8p723+J+Udh16vQASZL2/VU1KUn0gKSXsngW+IfYuenLLa2qD0jRDgNmx5/k6DVs7whzAYo8Ln5xdjPhKBQECQHG6iFxKkAmkfwY31jUE2tlZCKy4/f0dEikVlNH4ew34+beEsl5hsELjpyU6TeQfYaU3C+sYgJEKObqKja18jDvLoSpqcUnzeUfvDxc0xvgHDCTb+EWtlJWw7iEu91e+ceX//UsqPcwIFOgZIYX1jCtjwe1xWmlRsQ0IAVnNGWUL9fJvCXVFVj4wm6JVEkWfgFNoyr0Eh7znU2p6Wk/+UXUrVPIG0fHRhwHPLcuSkAA2cEYB0j/7D+sYUIi4q997F95npEk1Hrn7uc5EHQBMCgpJTytYBRdEkpfNV5KuczJu/gRAgSl54y/zLQPNQmmh95DTg4CD2qW4b0pShhybWN4bP4Pgghs9gfWP4DNY3hs9gfWP4DNY3hs9gfWP4zP8BAAD//7fyBTEAAAAGSURBVAMAcOA1/oyLBv8AAAAASUVORK5CYII=", + "text/plain": [ + "" + ] + }, + "execution_count": 33, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "graph = create_react_agent(\n", + " llm, \n", + " tools=tools,\n", + " store=store,\n", + " checkpointer=InMemorySaver(),\n", + " pre_model_hook=pre_model_hook,\n", + " post_model_hook=post_model_hook\n", + ")\n", + "\n", + "graph" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## IMPORTANT: Input and Config\n", + "\n", + "### Graph Invoke Input\n", + "We only need to pass the newest user message in as an argument `inputs`. This could include other state variables too but for the simple `create_react_agent`, messages are all that's required.\n", + "\n", + "### LangGraph RuntimeConfig\n", + "In LangGraph, config is a `RuntimeConfig` that contains attributes that are necessary at invocation time, for example user IDs or session IDs. For the `AgentCoreMemorySaver`, `thread_id` and `actor_id` must be set in the config. For instance, your AgentCore invocation endpoint could assign this based on the identity or user ID of the caller. Additional documentation here: [https://langchain-ai.github.io/langgraphjs/how-tos/configuration/](https://langchain-ai.github.io/langgraphjs/how-tos/configuration/)\n", + "\n" + ] + }, + { + "cell_type": "code", + "execution_count": 35, + "metadata": {}, + "outputs": [], + "source": [ + "config = {\n", + " \"configurable\": {\n", + " \"thread_id\": \"session-1\", # REQUIRED: This maps to Bedrock AgentCore session_id under the hood\n", + " \"actor_id\": \"user-1\", # REQUIRED: This maps to Bedrock AgentCore actor_id under the hood\n", + " }\n", + "}" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Run the Agent\n", + "\n", + "For this example, we will run through a conversation where the user is talking about what they like to cook with. This will give the backend enough context to extract facts and user preferences that we can retrieve the next time the user asks for what to make on a given evening." + ] + }, + { + "cell_type": "code", + "execution_count": 36, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "================================\u001b[1m Human Message \u001b[0m=================================\n", + "\n", + "\n", + "Hey there! Im cooking one of my favorite meals tonight, salmon with rice and veggies (healthy). Has\n", + "great macros for my weightlifting competition that is coming up. What can I add to this dish to make it taste better\n", + "and also improve the protein and vitamins I get?\n", + "\n", + "==================================\u001b[1m Ai Message \u001b[0m==================================\n", + "\n", + "[{'type': 'text', 'text': \"I'd be happy to help you enhance your salmon dish for your upcoming weightlifting competition! Let me see if I have some additional information about your dietary preferences or nutritional goals from our past conversations that might help me give more personalized suggestions.\"}, {'type': 'tool_use', 'name': 'retrieve_user_facts_from_past_conversations', 'input': {'query': 'dietary preferences weightlifting nutrition protein requirements supplements'}, 'id': 'tooluse_rjCYWu9BS_SleO9B7iRJUg'}]\n", + "Tool Calls:\n", + " retrieve_user_facts_from_past_conversations (tooluse_rjCYWu9BS_SleO9B7iRJUg)\n", + " Call ID: tooluse_rjCYWu9BS_SleO9B7iRJUg\n", + " Args:\n", + " query: dietary preferences weightlifting nutrition protein requirements supplements\n", + "=================================\u001b[1m Tool Message \u001b[0m=================================\n", + "Name: retrieve_user_facts_from_past_conversations\n", + "\n", + "[]\n", + "==================================\u001b[1m Ai Message \u001b[0m==================================\n", + "\n", + "Since I don't have specific information about your dietary preferences from previous conversations, I'll provide some general recommendations to enhance your salmon, rice, and veggies meal:\n", + "\n", + "For better taste:\n", + "- Add fresh herbs like dill, parsley, or cilantro\n", + "- Use a squeeze of lemon or lime juice\n", + "- Try a light glaze with honey and soy sauce (or coconut aminos)\n", + "- Incorporate minced garlic and ginger\n", + "- Season with various spices like paprika, cumin, or a spice blend\n", + "\n", + "For more protein:\n", + "- Mix in some quinoa with your rice (it's a complete protein)\n", + "- Add edamame to your veggie mix\n", + "- Include a side of Greek yogurt (can be made into a sauce with herbs)\n", + "- Sprinkle hemp seeds or chopped nuts (walnuts are great with salmon)\n", + "- Consider a side of egg whites\n", + "\n", + "For more vitamins and minerals:\n", + "- Add leafy greens like spinach or kale to your vegetable mix\n", + "- Include colorful bell peppers for vitamin C\n", + "- Add avocado for healthy fats and vitamins\n", + "- Mix in some broccoli for vitamins A, C, and K\n", + "- Consider sweet potato instead of or with rice for vitamin A and fiber\n", + "\n", + "These additions would maintain the healthy profile of your meal while boosting both flavor and the nutritional content that's valuable for your weightlifting competition. Is there any particular aspect of your diet I should focus on more specifically?\n" + ] + } + ], + "source": [ + "# Helper function to pretty print agent output while running\n", + "def run_agent(query: str, config: RunnableConfig):\n", + " printed_ids = set()\n", + " events = graph.stream(\n", + " {\"messages\": [{\"role\": \"user\", \"content\": query}]},\n", + " config,\n", + " stream_mode=\"values\",\n", + " )\n", + " for event in events:\n", + " if \"messages\" in event:\n", + " for msg in event[\"messages\"]:\n", + " # Check if we've already printed this message\n", + " if id(msg) not in printed_ids:\n", + " msg.pretty_print()\n", + " printed_ids.add(id(msg))\n", + "\n", + "\n", + "prompt = \"\"\"\n", + "Hey there! Im cooking one of my favorite meals tonight, salmon with rice and veggies (healthy). Has\n", + "great macros for my weightlifting competition that is coming up. What can I add to this dish to make it taste better\n", + "and also improve the protein and vitamins I get?\n", + "\"\"\"\n", + "\n", + "run_agent(prompt, config)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### What was stored?\n", + "As you can see, the model does not yet have any insight into our preferences or dietary restrictions.\n", + "\n", + "For this implementation with pre/post model hooks, two messages were stored here. The first message from the user and the response from the AI model were both stored as conversational events in AgentCore Memory.\n", + "\n", + "These messages were then extracted to AgentCore long term memory in our fact and user preferences namespaces. In fact, we can check the store ourselves to verify what has been stored there so far:" + ] + }, + { + "cell_type": "code", + "execution_count": 37, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Facts namespace result: [Item(namespace=['facts', 'user-1'], key='mem-596ad8e5-f561-4bea-861a-c498276e92f8', value={'content': 'The user is preparing for a weightlifting competition and is cooking a healthy meal of salmon, rice, and vegetables.', 'memory_strategy_id': 'memory_semantic_ghc4p-SLjZ3l87ji', 'namespaces': ['/facts/user-1']}, created_at='2025-09-24T17:55:09-07:00', updated_at='2025-09-24T17:55:09-07:00', score=0.40105253), Item(namespace=['facts', 'user-1'], key='mem-c040719d-81a4-41eb-9b8a-6d4848d75dcf', value={'content': 'The user is focused on maintaining good macronutrient balance in their diet to support their weightlifting training.', 'memory_strategy_id': 'memory_semantic_ghc4p-SLjZ3l87ji', 'namespaces': ['/facts/user-1']}, created_at='2025-09-24T17:55:09-07:00', updated_at='2025-09-24T17:55:09-07:00', score=0.37003443)]\n", + "\n", + "Preferences namespace result: [Item(namespace=['preferences', 'user-1'], key='mem-52e75280-1c1c-41aa-874b-e4725dba60bc', value={'content': '{\"context\":\"User mentioned cooking salmon with rice and vegetables, indicating a focus on healthy eating for a weightlifting competition\",\"preference\":\"Follows a health-conscious diet with high-protein, nutritious meals\",\"categories\":[\"food\",\"nutrition\",\"fitness\",\"diet\"]}', 'memory_strategy_id': 'memory_preference_ghc4p-zjtCX1D9Eo', 'namespaces': ['/preferences/user-1']}, created_at='2025-09-24T17:55:09-07:00', updated_at='2025-09-24T17:55:09-07:00', score=0.40248412), Item(namespace=['preferences', 'user-1'], key='mem-4e4031c9-4629-4aef-b8b5-09a48a252c8e', value={'content': '{\"context\":\"User is preparing for a weightlifting competition and is concerned about meal\\'s nutritional value\",\"preference\":\"Interested in meals that support athletic performance and nutrition\",\"categories\":[\"fitness\",\"nutrition\",\"sports\"]}', 'memory_strategy_id': 'memory_preference_ghc4p-zjtCX1D9Eo', 'namespaces': ['/preferences/user-1']}, created_at='2025-09-24T17:55:09-07:00', updated_at='2025-09-24T17:55:09-07:00', score=0.38236186)]\n" + ] + } + ], + "source": [ + "# Search our facts namespace (the one that is used by the agent long term retrieval)\n", + "search_namespace = (\"facts\", \"user-1\")\n", + "result = store.search(search_namespace, query=\"food\", limit=3)\n", + "print(f\"Facts namespace result: {result}\\n\")\n", + "\n", + "# Search our user preferences namespace\n", + "search_namespace = (\"preferences\", \"user-1\")\n", + "result = store.search(search_namespace, query=\"food\", limit=3)\n", + "print(f\"Preferences namespace result: {result}\")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Agent access to the store\n", + "\n", + "Great! Now we have seen that long term memories were extracted to our namespaces based on the earlier messages in the conversation.\n", + "\n", + "Now, let's start a new session and ask about recommendations for what to cook for dinner. The agent can use the store to access the long term memories that were extracted to make a recommendation that the user will be sure to like." + ] + }, + { + "cell_type": "code", + "execution_count": 38, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "================================\u001b[1m Human Message \u001b[0m=================================\n", + "\n", + "Today's a new day, what should I make for dinner tonight?\n", + "==================================\u001b[1m Ai Message \u001b[0m==================================\n", + "\n", + "[User Context: {'content': '{\"context\":\"User is preparing for a weightlifting competition and is concerned about meal\\'s nutritional value\",\"preference\":\"Interested in meals that support athletic performance and nutrition\",\"categories\":[\"fitness\",\"nutrition\",\"sports\"]}', 'memory_strategy_id': 'memory_preference_ghc4p-zjtCX1D9Eo', 'namespaces': ['/preferences/user-1']}, {'content': '{\"context\":\"User mentioned cooking salmon with rice and vegetables, indicating a focus on healthy eating for a weightlifting competition\",\"preference\":\"Follows a health-conscious diet with high-protein, nutritious meals\",\"categories\":[\"food\",\"nutrition\",\"fitness\",\"diet\"]}', 'memory_strategy_id': 'memory_preference_ghc4p-zjtCX1D9Eo', 'namespaces': ['/preferences/user-1']}]\n", + "==================================\u001b[1m Ai Message \u001b[0m==================================\n", + "\n", + "[{'type': 'text', 'text': \"\\n\\nI'd be happy to suggest some dinner options for you today! To provide recommendations that match your preferences, let me first check some information about your dietary needs and preferences.\"}, {'type': 'tool_use', 'name': 'retrieve_user_facts_from_past_conversations', 'input': {'query': 'dietary preferences, fitness goals, meal preferences', 'limit': 3}, 'id': 'tooluse_mlZ_AmFESOmY2nKRbtIaIw'}]\n", + "Tool Calls:\n", + " retrieve_user_facts_from_past_conversations (tooluse_mlZ_AmFESOmY2nKRbtIaIw)\n", + " Call ID: tooluse_mlZ_AmFESOmY2nKRbtIaIw\n", + " Args:\n", + " query: dietary preferences, fitness goals, meal preferences\n", + " limit: 3\n", + "=================================\u001b[1m Tool Message \u001b[0m=================================\n", + "Name: retrieve_user_facts_from_past_conversations\n", + "\n", + "[Item(namespace=['facts', 'user-1'], key='mem-c040719d-81a4-41eb-9b8a-6d4848d75dcf', value={'content': 'The user is focused on maintaining good macronutrient balance in their diet to support their weightlifting training.', 'memory_strategy_id': 'memory_semantic_ghc4p-SLjZ3l87ji', 'namespaces': ['/facts/user-1']}, created_at='2025-09-24T17:55:09-07:00', updated_at='2025-09-24T17:55:09-07:00', score=0.4182649), Item(namespace=['facts', 'user-1'], key='mem-596ad8e5-f561-4bea-861a-c498276e92f8', value={'content': 'The user is preparing for a weightlifting competition and is cooking a healthy meal of salmon, rice, and vegetables.', 'memory_strategy_id': 'memory_semantic_ghc4p-SLjZ3l87ji', 'namespaces': ['/facts/user-1']}, created_at='2025-09-24T17:55:09-07:00', updated_at='2025-09-24T17:55:09-07:00', score=0.38666412)]\n", + "==================================\u001b[1m Ai Message \u001b[0m==================================\n", + "\n", + "[User Context: {'content': '{\"context\":\"User is preparing for a weightlifting competition and is concerned about meal\\'s nutritional value\",\"preference\":\"Interested in meals that support athletic performance and nutrition\",\"categories\":[\"fitness\",\"nutrition\",\"sports\"]}', 'memory_strategy_id': 'memory_preference_ghc4p-zjtCX1D9Eo', 'namespaces': ['/preferences/user-1']}, {'content': '{\"context\":\"User mentioned cooking salmon with rice and vegetables, indicating a focus on healthy eating for a weightlifting competition\",\"preference\":\"Follows a health-conscious diet with high-protein, nutritious meals\",\"categories\":[\"food\",\"nutrition\",\"fitness\",\"diet\"]}', 'memory_strategy_id': 'memory_preference_ghc4p-zjtCX1D9Eo', 'namespaces': ['/preferences/user-1']}]\n", + "==================================\u001b[1m Ai Message \u001b[0m==================================\n", + "\n", + "\n", + "\n", + "Based on what I know about your dietary preferences and fitness goals, I have some dinner suggestions that would support your weightlifting training and competition preparation:\n", + "\n", + "### High-Protein Dinner Options:\n", + "\n", + "1. **Herb-Crusted Chicken Breast with Sweet Potato and Roasted Vegetables**\n", + " - Lean protein from chicken\n", + " - Complex carbs from sweet potato\n", + " - Nutrients from colorful vegetables\n", + " - Season with herbs for flavor without excess sodium\n", + "\n", + "2. **Turkey and Vegetable Stir-Fry with Quinoa**\n", + " - Lean protein from turkey\n", + " - Complete protein and complex carbs from quinoa\n", + " - Variety of vegetables for micronutrients\n", + " - Light stir-fry sauce with ginger and garlic\n", + "\n", + "3. **Baked White Fish with Lemon, Brown Rice and Steamed Broccoli**\n", + " - Similar to your salmon meal but with variety\n", + " - Clean protein source with minimal fat\n", + " - Complex carbs from brown rice\n", + " - Broccoli for fiber and micronutrients\n", + "\n", + "4. **Greek Yogurt Marinated Chicken Skewers with Mediterranean Vegetables and Farro**\n", + " - Protein-rich and tender chicken\n", + " - Ancient grain for sustained energy\n", + " - Vegetables for vitamins and minerals\n", + " - Good balance of macronutrients\n", + "\n", + "Any of these options would provide a good balance of macronutrients to support your weightlifting training. Would you like me to provide a more detailed recipe for any of these suggestions?\n" + ] + } + ], + "source": [ + "config = {\n", + " \"configurable\": {\n", + " \"thread_id\": \"session-2\", # New session ID\n", + " \"actor_id\": \"user-1\", # Same actor ID\n", + " }\n", + "}\n", + "\n", + "run_agent(\"Today's a new day, what should I make for dinner tonight?\", config)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Wrapping up\n", + "\n", + "As you can see, the agent received both pre-model hook context from the user preferences namespace search and was able to search on its own for long term memories in the fact namespace to create a comprehensive answer for the user.\n", + "\n", + "The AgentCoreMemoryStore is very flexible and can be implemented in a variety of ways, including pre/post model hooks or just tools themselves with store operations. Used alongside the AgentCoreMemorySaver for checkpointing, both full conversational state and long term insights can be combined to form a complex and intelligent agent system." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.11.10" + } + }, + "nbformat": 4, + "nbformat_minor": 4 +} diff --git a/samples/memory/bedrock_agentcore_memory_store_tutorial.ipynb b/samples/memory/bedrock_agentcore_memory_store_tutorial.ipynb deleted file mode 100644 index e1ebda6d..00000000 --- a/samples/memory/bedrock_agentcore_memory_store_tutorial.ipynb +++ /dev/null @@ -1,875 +0,0 @@ -{ - "cells": [ - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "# Bedrock AgentCore Memory Store Tutorial\n", - "\n", - "This tutorial demonstrates the complete functionality of the BedrockAgentCoreMemoryStore, which provides persistent memory capabilities for LangGraph agents using Amazon Bedrock AgentCore Memory service.\n", - "\n", - "## Prerequisites\n", - "\n", - "1. AWS credentials configured\n", - "2. Bedrock AgentCore Memory resource created and ACTIVE\n", - "3. Required packages installed" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Setup and Configuration" - ] - }, - { - "cell_type": "code", - "execution_count": 2, - "metadata": {}, - "outputs": [], - "source": [ - "import logging\n", - "from datetime import datetime\n", - "from typing import Annotated\n", - "\n", - "from bedrock_agentcore.memory.client import MemoryClient\n", - "from bedrock_agentcore_memory_store import BedrockAgentCoreMemoryStore\n", - "from langchain.chat_models import init_chat_model\n", - "from langchain_core.messages import AIMessage, HumanMessage, SystemMessage, ToolMessage\n", - "from langchain_core.runnables import RunnableConfig\n", - "from langgraph.checkpoint.memory import InMemorySaver\n", - "from langgraph.graph import START, StateGraph\n", - "from langgraph.graph.message import add_messages\n", - "from langgraph.store.base import BaseStore\n", - "from typing_extensions import TypedDict\n", - "\n", - "# Configure logging\n", - "logging.basicConfig(level=logging.INFO)\n", - "logger = logging.getLogger(__name__)" - ] - }, - { - "cell_type": "code", - "execution_count": 3, - "metadata": {}, - "outputs": [ - { - "name": "stderr", - "output_type": "stream", - "text": [ - "INFO:botocore.credentials:Found credentials in environment variables.\n", - "INFO:bedrock_agentcore.memory.client:Initialized MemoryClient for control plane: us-west-2, data plane: us-west-2\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Memory store initialized with ID: MEMORY_ID\n", - "Supports TTL: False\n" - ] - } - ], - "source": [ - "# Configuration - Update these values\n", - "REGION = \"us-west-2\"\n", - "MEMORY_ID = \"MEMORY_ID\" # Replace with your actual memory ID\n", - "\n", - "# Initialize memory client and store\n", - "memory_client = MemoryClient(REGION)\n", - "store = BedrockAgentCoreMemoryStore(memory_client=memory_client, memory_id=MEMORY_ID)\n", - "\n", - "print(f\"Memory store initialized with ID: {MEMORY_ID}\")\n", - "print(f\"Supports TTL: {store.supports_ttl}\")" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Core Store Operations\n", - "\n", - "### 1. Storing Messages (put operation)" - ] - }, - { - "cell_type": "code", - "execution_count": 4, - "metadata": {}, - "outputs": [ - { - "name": "stderr", - "output_type": "stream", - "text": [ - "INFO:bedrock_agentcore.memory.client:Created event: 0000001757113343000#0f3ea8da\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "✅ Stored human message with key: msg-1\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "INFO:bedrock_agentcore.memory.client:Created event: 0000001757113344000#b3cf6a09\n", - "INFO:bedrock_agentcore.memory.client:Created event: 0000001757113345000#866aac05\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "✅ Stored ai message with key: msg-2\n", - "✅ Stored human message with key: msg-3\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "INFO:bedrock_agentcore.memory.client:Created event: 0000001757113345000#abb0945d\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "✅ Stored ai message with key: msg-4\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "ERROR:bedrock_agentcore.memory.client:Failed to create event: An error occurred (ThrottledException) when calling the CreateEvent operation (reached max retries: 4): Rate exceeded.\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "❌ Failed to store message msg-5: An error occurred (ThrottledException) when calling the CreateEvent operation (reached max retries: 4): Rate exceeded.\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "INFO:bedrock_agentcore.memory.client:Created event: 0000001757113356000#175223c3\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "✅ Stored system message with key: msg-6\n" - ] - } - ], - "source": [ - "# Define namespace (actor_id, session_id)\n", - "namespace = (\"user-123\", \"session-456\")\n", - "\n", - "# Store different types of messages\n", - "messages_to_store = [\n", - " (\"msg-1\", HumanMessage(\"I love playing soccer and my favorite team is Barcelona\")),\n", - " (\"msg-2\", AIMessage(\"That's great! Barcelona has a rich history in football.\")),\n", - " (\"msg-3\", HumanMessage(\"My name is John and I'm a software engineer\")),\n", - " (\"msg-4\", AIMessage(\"Nice to meet you John! Software engineering is a fascinating field.\")),\n", - " (\"msg-5\", HumanMessage(\"I prefer Python for backend development\")),\n", - " (\"msg-6\", SystemMessage(\"User preferences updated: Python, Backend Development\")),\n", - "]\n", - "\n", - "for key, message in messages_to_store:\n", - " try:\n", - " store.put(namespace, key, {\"message\": message})\n", - " print(f\"✅ Stored {message.type} message with key: {key}\")\n", - " except Exception as e:\n", - " print(f\"❌ Failed to store message {key}: {e}\")" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "### 2. Searching and Retrieving Memories\n", - "\n", - "Wait for messages to be processed by AgentCore (typically 30-60 seconds)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "import time\n", - "\n", - "print(\"Waiting 45 seconds for AgentCore to process messages...\")\n", - "time.sleep(45)\n", - "print(\"Processing complete!\")" - ] - }, - { - "cell_type": "code", - "execution_count": 6, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "=== Basic Listing ===\n", - "Found 1 memory records\n", - "\n", - "Record 1:\n", - " Key: mem-e2facd2a22ded39a2e94e522b857bfaba224\n", - " Content: \n", - " \n", - " The user's name is John, and he works as a...\n", - " Created: 2025-09-05 16:02:36-07:00\n" - ] - } - ], - "source": [ - "# Search namespace for processed memories\n", - "search_namespace = (\"summaries\", \"user-123\", \"session-456\")\n", - "\n", - "# Basic listing (no semantic search)\n", - "print(\"=== Basic Listing ===\")\n", - "try:\n", - " results = store.search(search_namespace, limit=10)\n", - " print(f\"Found {len(results)} memory records\")\n", - " \n", - " for i, item in enumerate(results[:3]):\n", - " print(f\"\\nRecord {i+1}:\")\n", - " print(f\" Key: {item.key}\")\n", - " print(f\" Content: {item.value.get('content', '')[:100]}...\")\n", - " print(f\" Created: {item.created_at}\")\n", - "except Exception as e:\n", - " print(f\"❌ Search failed: {e}\")" - ] - }, - { - "cell_type": "code", - "execution_count": 7, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "=== Semantic Search ===\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "INFO:bedrock_agentcore.memory.client:Retrieved 1 memories from namespace: /summaries/user-123/session-456\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "\n", - "Query: 'soccer and Barcelona'\n", - "Results: 1\n", - " Score: 0.42368466 | Content: \n", - " \n", - " The user's name is Joh...\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "INFO:bedrock_agentcore.memory.client:Retrieved 1 memories from namespace: /summaries/user-123/session-456\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "\n", - "Query: 'user preferences and programming'\n", - "Results: 1\n", - " Score: 0.43479428 | Content: \n", - " \n", - " The user's name is Joh...\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "INFO:bedrock_agentcore.memory.client:Retrieved 1 memories from namespace: /summaries/user-123/session-456\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "\n", - "Query: 'John software engineer'\n", - "Results: 1\n", - " Score: 0.4470298 | Content: \n", - " \n", - " The user's name is Joh...\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "INFO:bedrock_agentcore.memory.client:Retrieved 1 memories from namespace: /summaries/user-123/session-456\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "\n", - "Query: 'Python backend development'\n", - "Results: 1\n", - " Score: 0.43134356 | Content: \n", - " \n", - " The user's name is Joh...\n" - ] - } - ], - "source": [ - "# Semantic search with queries\n", - "print(\"=== Semantic Search ===\")\n", - "\n", - "search_queries = [\n", - " \"soccer and Barcelona\",\n", - " \"user preferences and programming\",\n", - " \"John software engineer\",\n", - " \"Python backend development\"\n", - "]\n", - "\n", - "for query in search_queries:\n", - " try:\n", - " results = store.search(\n", - " search_namespace,\n", - " query=query,\n", - " limit=3\n", - " )\n", - " \n", - " print(f\"\\nQuery: '{query}'\")\n", - " print(f\"Results: {len(results)}\")\n", - " \n", - " for item in results:\n", - " score = getattr(item, 'score', 'N/A')\n", - " content = item.value.get('content', '')[:80]\n", - " print(f\" Score: {score} | Content: {content}...\")\n", - " \n", - " except Exception as e:\n", - " print(f\"❌ Search failed for '{query}': {e}\")" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "### 3. Get Individual Memory Records" - ] - }, - { - "cell_type": "code", - "execution_count": 9, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "=== Get Individual Records ===\n", - "Testing get operation with record ID: mem-e2facd2a22ded39a2e94e522b857bfaba224\n", - "✅ Retrieved record:\n", - " Key: mem-e2facd2a22ded39a2e94e522b857bfaba224\n", - " Namespace: ('', 'summaries', 'user-123', 'session-456')\n", - " Content: \n", - " \n", - " The user's name is John, and he works as a software engineer. He enjoys playing soccer and supports Barcelona football club.\n", - " \n", - " \n", - " The assistant acknowledged John's interest in Barcelona, noting the team's rich history in football. The assistant also greeted John and commented that software engineering is a fascinating field.\n", - " \n", - " \n", - " User preferences were updated to include Python and Backend Development.\n", - " \n", - "\n", - " Created: 2025-09-05 16:02:36-07:00\n", - "❌ Get non-existent failed: An error occurred (ResourceNotFoundException) when calling the GetMemoryRecord operation: Resource not found with memory id jgordleTestMemoryTools1-XKfUGT7fO4 and memory record id non-existent-key\n" - ] - } - ], - "source": [ - "# Get a specific memory record by ID\n", - "print(\"=== Get Individual Records ===\")\n", - "\n", - "# First get some record IDs from search\n", - "try:\n", - " search_results = store.search(search_namespace, limit=2)\n", - " \n", - " if search_results:\n", - " record_id = search_results[0].key\n", - " print(f\"Testing get operation with record ID: {record_id}\")\n", - " \n", - " # Get the specific record\n", - " result = store.get(namespace, record_id)\n", - " \n", - " if result:\n", - " print(f\"✅ Retrieved record:\")\n", - " print(f\" Key: {result.key}\")\n", - " print(f\" Namespace: {result.namespace}\")\n", - " print(f\" Content: {result.value.get('content', '')}\")\n", - " print(f\" Created: {result.created_at}\")\n", - " else:\n", - " print(\"❌ No record returned\")\n", - " else:\n", - " print(\"No search results available for get test\")\n", - " \n", - "except Exception as e:\n", - " print(f\"❌ Get operation failed: {e}\")\n", - "\n", - "# Test get with non-existent key\n", - "try:\n", - " result = store.get(namespace, \"non-existent-key\")\n", - " if result is None:\n", - " print(\"✅ Correctly returned None for non-existent key\")\n", - " else:\n", - " print(\"❌ Should have returned None\")\n", - "except Exception as e:\n", - " print(f\"❌ Get non-existent failed: {e}\")" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "### 4. Delete Operations" - ] - }, - { - "cell_type": "code", - "execution_count": 12, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "=== Delete Operations ===\n", - "Record ID to delete: mem-e2facd2a22ded39a2e94e522b857bfaba224\n", - "✅ Delete operation called for key: mem-e2facd2a22ded39a2e94e522b857bfaba224\n" - ] - } - ], - "source": [ - "# Test delete operations\n", - "print(\"=== Delete Operations ===\")\n", - "\n", - "print(f\"Record ID to delete: {record_id}\")\n", - "\n", - "try:\n", - " # Delete the record\n", - " store.delete(namespace, record_id)\n", - " print(f\"✅ Delete operation called for key: {record_id}\")\n", - " \n", - "except Exception as e:\n", - " print(f\"❌ Delete operation failed: {e}\")" - ] - }, - { - "cell_type": "code", - "execution_count": 16, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "✅ Record mem-e2facd2a22ded39a2e94e522b857bfaba224 was successfully deleted\n" - ] - } - ], - "source": [ - "# Verify deletion\n", - "try:\n", - " result = store.get(namespace, record_id)\n", - "except Exception as e:\n", - " print(f\"✅ Record {record_id} was successfully deleted\")" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Complete Agent Example with Memory\n", - "\n", - "This demonstrates how to use the memory store in a complete LangGraph agent." - ] - }, - { - "cell_type": "code", - "execution_count": 18, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "✅ Agent with memory compiled successfully\n" - ] - } - ], - "source": [ - "# Initialize LLM\n", - "llm = init_chat_model(\n", - " \"us.anthropic.claude-3-7-sonnet-20250219-v1:0\",\n", - " model_provider=\"bedrock_converse\",\n", - ")\n", - "\n", - "# Define agent state\n", - "class State(TypedDict):\n", - " messages: Annotated[list, add_messages]\n", - "\n", - "def call_model(state: State, config: RunnableConfig, *, store: BaseStore):\n", - " # Get configuration\n", - " user_id = config[\"configurable\"][\"user_id\"]\n", - " session_id = config[\"configurable\"][\"session_id\"]\n", - " \n", - " # Store user message\n", - " conversation_namespace = (user_id, session_id)\n", - " user_message = state['messages'][-1]\n", - " \n", - " print(f\"Storing user message: {user_message.content[:50]}...\")\n", - " store.put(\n", - " conversation_namespace,\n", - " f\"user-msg-{int(time.time())}\",\n", - " value={\"message\": user_message}\n", - " )\n", - " \n", - " # Search for relevant memories\n", - " memory_namespace = (\"summaries\", user_id, session_id)\n", - " memories = store.search(\n", - " memory_namespace,\n", - " query=user_message.content,\n", - " limit=3\n", - " )\n", - " \n", - " print(f\"Found {len(memories)} relevant memories\")\n", - " \n", - " # Add memory context to messages if available\n", - " messages = state[\"messages\"].copy()\n", - " if memories:\n", - " memory_context = \"\\n\".join([\n", - " f\"Memory: {mem.value.get('content', '')[:100]}...\"\n", - " for mem in memories[:2]\n", - " ])\n", - " context_msg = SystemMessage(f\"Relevant memories:\\n{memory_context}\")\n", - " messages.insert(-1, context_msg)\n", - " \n", - " # Generate response\n", - " result = llm.invoke(messages)\n", - " \n", - " # Store AI response\n", - " print(f\"Storing AI response: {result.content[:50]}...\")\n", - " store.put(\n", - " conversation_namespace,\n", - " f\"ai-msg-{int(time.time())}\",\n", - " value={\"message\": result}\n", - " )\n", - " \n", - " return {\"messages\": [result]}\n", - "\n", - "# Build graph\n", - "graph_builder = StateGraph(State)\n", - "graph_builder.add_node(\"chatbot\", call_model)\n", - "graph_builder.add_edge(START, \"chatbot\")\n", - "\n", - "# Compile with memory store\n", - "checkpointer = InMemorySaver()\n", - "graph = graph_builder.compile(checkpointer=checkpointer, store=store)\n", - "\n", - "print(\"✅ Agent with memory compiled successfully\")" - ] - }, - { - "cell_type": "code", - "execution_count": 20, - "metadata": {}, - "outputs": [ - { - "name": "stderr", - "output_type": "stream", - "text": [ - "INFO:bedrock_agentcore.memory.client:Created event: 0000001757113835000#7e243282\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "=== Testing Agent with Memory ===\n", - "\n", - "--- Conversation 1 ---\n", - "User: Hi, I'm interested in learning about machine learning\n", - "Storing user message: Hi, I'm interested in learning about machine learn...\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "INFO:bedrock_agentcore.memory.client:Retrieved 0 memories from namespace: /summaries/demo-user/demo-session\n", - "INFO:langchain_aws.chat_models.bedrock_converse:Using Bedrock Converse API to generate response\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Found 0 relevant memories\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "INFO:bedrock_agentcore.memory.client:Created event: 0000001757113843000#fcf5f078\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Storing AI response: # Introduction to Machine Learning\n", - "\n", - "I'm happy to h...\n", - "AI: # Introduction to Machine Learning\n", - "\n", - "I'm happy to help you learn about machine learning! Machine learning (ML) is a fascinating field where computers learn patterns from data without being explicitly p...\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "INFO:bedrock_agentcore.memory.client:Created event: 0000001757113845000#2f84629b\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "\n", - "--- Conversation 2 ---\n", - "User: What programming languages are best for ML?\n", - "Storing user message: What programming languages are best for ML?...\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "INFO:bedrock_agentcore.memory.client:Retrieved 0 memories from namespace: /summaries/demo-user/demo-session\n", - "INFO:langchain_aws.chat_models.bedrock_converse:Using Bedrock Converse API to generate response\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Found 0 relevant memories\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "INFO:bedrock_agentcore.memory.client:Created event: 0000001757113852000#30e87561\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Storing AI response: # Programming Languages for Machine Learning\n", - "\n", - "## T...\n", - "AI: # Programming Languages for Machine Learning\n", - "\n", - "## Top Languages for Machine Learning\n", - "\n", - "### Python\n", - "**The clear frontrunner for ML**\n", - "- Exceptional ecosystem of libraries: scikit-learn, TensorFlow, PyTorch...\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "INFO:bedrock_agentcore.memory.client:Created event: 0000001757113855000#9fb11fb6\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "\n", - "--- Conversation 3 ---\n", - "User: Can you remind me what we discussed about programming?\n", - "Storing user message: Can you remind me what we discussed about programm...\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "INFO:bedrock_agentcore.memory.client:Retrieved 0 memories from namespace: /summaries/demo-user/demo-session\n", - "INFO:langchain_aws.chat_models.bedrock_converse:Using Bedrock Converse API to generate response\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Found 0 relevant memories\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "INFO:bedrock_agentcore.memory.client:Created event: 0000001757113860000#0a1659ad\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Storing AI response: # Our Previous Discussion on Programming for Machi...\n", - "AI: # Our Previous Discussion on Programming for Machine Learning\n", - "\n", - "In our conversation, we discussed programming languages that are best suited for machine learning work. Here's a summary of what I shared...\n" - ] - } - ], - "source": [ - "import time\n", - "\n", - "# Test the agent with memory\n", - "print(\"=== Testing Agent with Memory ===\")\n", - "\n", - "# Configuration\n", - "config = {\n", - " \"configurable\": {\n", - " \"user_id\": \"demo-user\",\n", - " \"session_id\": \"demo-session\",\n", - " \"thread_id\": \"demo-thread\"\n", - " }\n", - "}\n", - "\n", - "# Test conversations\n", - "test_messages = [\n", - " \"Hi, I'm interested in learning about machine learning\",\n", - " \"What programming languages are best for ML?\",\n", - " \"Can you remind me what we discussed about programming?\"\n", - "]\n", - "\n", - "for i, message in enumerate(test_messages):\n", - " print(f\"\\n--- Conversation {i+1} ---\")\n", - " print(f\"User: {message}\")\n", - " \n", - " try:\n", - " result = graph.invoke(\n", - " {\"messages\": [HumanMessage(message)]},\n", - " config\n", - " )\n", - " \n", - " ai_response = result['messages'][-1].content\n", - " print(f\"AI: {ai_response[:200]}...\")\n", - " \n", - " except Exception as e:\n", - " print(f\"❌ Agent failed: {e}\")\n", - " \n", - " # Wait between messages\n", - " time.sleep(2)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Key Features Summary\n", - "\n", - "The BedrockAgentCoreMemoryStore provides:\n", - "\n", - "### Core Operations\n", - "- **put()**: Store messages with automatic processing by AgentCore\n", - "- **search()**: Semantic search and listing of processed memories\n", - "- **get()**: Retrieve individual memory records by ID\n", - "- **delete()**: Remove memory records\n", - "- **batch()**: Execute multiple operations efficiently\n", - "\n", - "### Memory Processing\n", - "- Automatic message processing and summarization\n", - "- Semantic search capabilities\n", - "- User preference extraction\n", - "- Long-term memory persistence\n", - "\n", - "### Integration Features\n", - "- LangGraph store interface compliance\n", - "- Support for all LangChain message types\n", - "- Namespace-based organization\n", - "- Configurable search parameters\n", - "\n", - "### Limitations\n", - "- No async operations support\n", - "- TTL managed by AgentCore service\n", - "- Custom indexing handled by AgentCore\n", - "- Processing delay for memory availability\n", - "\n", - "This memory store enables persistent, searchable memory for conversational AI agents with automatic processing and intelligent retrieval capabilities." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [] - } - ], - "metadata": { - "kernelspec": { - "display_name": "Python 3 (ipykernel)", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.11.10" - } - }, - "nbformat": 4, - "nbformat_minor": 4 -} From 81c4b1b245d9fdaa9b5760273b31edb3c2324451 Mon Sep 17 00:00:00 2001 From: Jack Gordley Date: Thu, 25 Sep 2025 21:55:21 -0700 Subject: [PATCH 4/4] Added langmem style tool factories for search and retrieval --- .../langgraph_checkpoint_aws/__init__.py | 8 + .../agentcore/__init__.py | 13 +- .../agentcore/tools.py | 453 +++++++++++++++ libs/langgraph-checkpoint-aws/pyproject.toml | 1 + .../tests/unit_tests/agentcore/test_tools.py | 523 ++++++++++++++++++ ...ntcore_memory_store_long_term_search.ipynb | 187 +++---- 6 files changed, 1073 insertions(+), 112 deletions(-) create mode 100644 libs/langgraph-checkpoint-aws/langgraph_checkpoint_aws/agentcore/tools.py create mode 100644 libs/langgraph-checkpoint-aws/tests/unit_tests/agentcore/test_tools.py diff --git a/libs/langgraph-checkpoint-aws/langgraph_checkpoint_aws/__init__.py b/libs/langgraph-checkpoint-aws/langgraph_checkpoint_aws/__init__.py index 2f79b184..3d59c03f 100644 --- a/libs/langgraph-checkpoint-aws/langgraph_checkpoint_aws/__init__.py +++ b/libs/langgraph-checkpoint-aws/langgraph_checkpoint_aws/__init__.py @@ -8,6 +8,11 @@ from langgraph_checkpoint_aws.agentcore.store import ( AgentCoreMemoryStore, ) +from langgraph_checkpoint_aws.agentcore.tools import ( + create_manage_memory_tool, + create_search_memory_tool, + create_store_event_tool, +) __version__ = "0.1.2" SDK_USER_AGENT = f"LangGraphCheckpointAWS#{__version__}" @@ -16,5 +21,8 @@ __all__ = [ "AgentCoreMemorySaver", "AgentCoreMemoryStore", + "create_manage_memory_tool", + "create_search_memory_tool", + "create_store_event_tool", "SDK_USER_AGENT", ] diff --git a/libs/langgraph-checkpoint-aws/langgraph_checkpoint_aws/agentcore/__init__.py b/libs/langgraph-checkpoint-aws/langgraph_checkpoint_aws/agentcore/__init__.py index ece872a7..ffb6ac9a 100644 --- a/libs/langgraph-checkpoint-aws/langgraph_checkpoint_aws/agentcore/__init__.py +++ b/libs/langgraph-checkpoint-aws/langgraph_checkpoint_aws/agentcore/__init__.py @@ -1,4 +1,15 @@ from langgraph_checkpoint_aws.agentcore.saver import AgentCoreMemorySaver from langgraph_checkpoint_aws.agentcore.store import AgentCoreMemoryStore +from langgraph_checkpoint_aws.agentcore.tools import ( + create_manage_memory_tool, + create_search_memory_tool, + create_store_event_tool, +) -__all__ = ["AgentCoreMemorySaver", "AgentCoreMemoryStore"] +__all__ = [ + "AgentCoreMemorySaver", + "AgentCoreMemoryStore", + "create_manage_memory_tool", + "create_search_memory_tool", + "create_store_event_tool", +] diff --git a/libs/langgraph-checkpoint-aws/langgraph_checkpoint_aws/agentcore/tools.py b/libs/langgraph-checkpoint-aws/langgraph_checkpoint_aws/agentcore/tools.py new file mode 100644 index 00000000..24763755 --- /dev/null +++ b/libs/langgraph-checkpoint-aws/langgraph_checkpoint_aws/agentcore/tools.py @@ -0,0 +1,453 @@ +""" +Tool factories for AgentCore Memory Store operations. + +This module provides tool factories for creating LangChain tools that interact with +AgentCore Memory Store, following the pattern established by langmem but adapted +for the AgentCore service specifics. +""" + +import functools +import logging +import typing +import uuid +from typing import Optional, Type + +from langchain_core.messages import BaseMessage +from langchain_core.tools import StructuredTool +from langgraph.store.base import BaseStore +from langgraph.utils.config import get_config, get_store + +if typing.TYPE_CHECKING: + from langchain_core.tools.base import ArgsSchema + +try: + from pydantic import ConfigDict +except ImportError: + ConfigDict = None + +logger = logging.getLogger(__name__) + + +class NamespaceTemplate: + """Template for namespace configuration with runtime substitution.""" + + def __init__(self, namespace: tuple[str, ...] | str): + if isinstance(namespace, str): + self.namespace_parts = (namespace,) + else: + self.namespace_parts = namespace + + def __call__(self, config: dict | None = None) -> tuple[str, ...]: + """Format namespace with runtime configuration.""" + if not config: + try: + config = get_config() + except RuntimeError: + # If we're outside a runnable context, just return the template + # This allows the tool to be created outside of a runnable context + return self.namespace_parts + + configurable = config.get("configurable", {}) + formatted_parts = [] + + for part in self.namespace_parts: + if "{" in part and "}" in part: + # Format with configurable values + try: + formatted_part = part.format(**configurable) + formatted_parts.append(formatted_part) + except KeyError as e: + raise ValueError( + f"Missing required configurable key for namespace: {e}" + ) + else: + formatted_parts.append(part) + + return tuple(formatted_parts) + + +def create_manage_memory_tool( + namespace: tuple[str, ...] | str, + *, + instructions: str = "Proactively call this tool when you:\n\n" + "1. Identify a new USER preference.\n" + "2. Receive an explicit USER request to remember something or otherwise alter your behavior.\n" + "3. Are working and want to record important context.\n" + "4. Identify that an existing MEMORY is incorrect or outdated.\n", + schema: Type = BaseMessage, + actions_permitted: Optional[ + tuple[typing.Literal["create", "update", "delete"], ...] + ] = ("create",), + store: Optional[BaseStore] = None, + name: str = "manage_memory", +): + """Create a tool for managing persistent memories in AgentCore Memory. + + This function creates a tool that allows AI assistants to create memories + that are automatically processed by AgentCore Memory service. The tool stores + conversational events that get transformed into searchable memories. + + Args: + namespace: The namespace structure for organizing memories. For AgentCore, + this should be a tuple of (actor_id, session_id). Uses runtime + configuration with placeholders like `{actor_id}` and `{thread_id}`. + instructions: Custom instructions for when to use the memory tool. + schema: The schema for memory content. Defaults to BaseMessage for AgentCore. + actions_permitted: Currently only "create" is supported for AgentCore Memory. + store: The BaseStore to use. If not provided, uses the configured store. + name: The name of the tool. + + Returns: + A StructuredTool that can be used for memory management. + + Example: + ```python + from langmem import create_manage_memory_tool + from langgraph.prebuilt import create_react_agent + from langgraph_checkpoint_aws.agentcore import AgentCoreMemoryStore + + memory_tool = create_manage_memory_tool( + namespace=("{actor_id}", "{thread_id}"), + ) + + store = AgentCoreMemoryStore( + memory_id="memory_abc123", + region_name="us-west-2" + ) + + agent = create_react_agent( + "anthropic:claude-3-5-sonnet-latest", + tools=[memory_tool], + store=store, + ) + ``` + """ + if actions_permitted != ("create",): + logger.warning( + "AgentCore Memory currently only supports 'create' action. " + "Other actions will be ignored." + ) + + namespacer = NamespaceTemplate(namespace) + initial_store = store + + async def amanage_memory( + content: schema, # type: ignore + action: typing.Literal["create"] = "create", # type: ignore + *, + id: Optional[uuid.UUID] = None, + ): + """Async version of manage_memory.""" + store = _get_store(initial_store) + + if action != "create": + raise ValueError( + "AgentCore Memory only supports 'create' action. " + "Memory updates and deletions happen automatically through the service." + ) + + if id is not None: + logger.warning( + "AgentCore Memory does not support custom IDs. Ignoring provided ID." + ) + + namespace = namespacer() + if len(namespace) != 2: + raise ValueError( + "AgentCore Memory requires namespace to be (actor_id, session_id)" + ) + + # Generate a unique key for this memory + key = str(uuid.uuid4()) + + # Ensure content is a BaseMessage + if not isinstance(content, BaseMessage): + raise ValueError("Content must be a BaseMessage for AgentCore Memory") + + # Store the message + await store.aput(namespace, key, {"message": content}) + + return f"Created memory event {key} in AgentCore Memory" + + def manage_memory( + content: schema, # type: ignore + action: typing.Literal["create"] = "create", # type: ignore + *, + id: Optional[uuid.UUID] = None, + ): + """Sync version of manage_memory.""" + store = _get_store(initial_store) + + if action != "create": + raise ValueError( + "AgentCore Memory only supports 'create' action. " + "Memory updates and deletions happen automatically through the service." + ) + + if id is not None: + logger.warning( + "AgentCore Memory does not support custom IDs. Ignoring provided ID." + ) + + namespace = namespacer() + if len(namespace) != 2: + raise ValueError( + "AgentCore Memory requires namespace to be (actor_id, session_id)" + ) + + # Generate a unique key for this memory + key = str(uuid.uuid4()) + + # Ensure content is a BaseMessage + if not isinstance(content, BaseMessage): + raise ValueError("Content must be a BaseMessage for AgentCore Memory") + + # Store the message + store.put(namespace, key, {"message": content}) + + return f"Created memory event {key} in AgentCore Memory" + + description = f"""Create a memory event in AgentCore Memory. +The memory will be automatically processed and made searchable by the AgentCore service. +{instructions}""" + + return _ToolWithRequired.from_function( + manage_memory, amanage_memory, name=name, description=description + ) + + +def create_search_memory_tool( + namespace: tuple[str, ...] | str, + *, + instructions: str = "Search for relevant memories and user preferences to provide context for your responses.", + store: BaseStore | None = None, + response_format: typing.Literal["content", "content_and_artifact"] = "content", + name: str = "search_memory", +): + """Create a tool for searching memories in AgentCore Memory Store. + + This function creates a tool that allows AI assistants to search through + processed memories using semantic search powered by AgentCore Memory service. + + Args: + namespace: The namespace for searching memories. For AgentCore, this is + typically ("facts", "{actor_id}") for user facts/preferences. + instructions: Custom instructions for when to use the search tool. + store: The BaseStore to use. If not provided, uses the configured store. + response_format: Whether to return just content or content with artifacts. + name: The name of the tool. + + Returns: + A StructuredTool for memory search. + + Example: + ```python + search_tool = create_search_memory_tool( + namespace=("facts", "{actor_id}"), + ) + ``` + """ + namespacer = NamespaceTemplate(namespace) + initial_store = store + + async def asearch_memory( + query: str, + *, + limit: int = 10, + offset: int = 0, + filter: Optional[dict] = None, + ): + """Async version of search_memory.""" + store = _get_store(initial_store) + namespace = namespacer() + + memories = await store.asearch( + namespace, + query=query, + filter=filter, + limit=limit, + offset=offset, + ) + + if response_format == "content_and_artifact": + return _format_search_results(memories), memories + return _format_search_results(memories) + + def search_memory( + query: str, + *, + limit: int = 10, + offset: int = 0, + filter: Optional[dict] = None, + ): + """Sync version of search_memory.""" + store = _get_store(initial_store) + namespace = namespacer() + + memories = store.search( + namespace, + query=query, + filter=filter, + limit=limit, + offset=offset, + ) + + if response_format == "content_and_artifact": + return _format_search_results(memories), memories + return _format_search_results(memories) + + description = f"""Search AgentCore Memory for relevant information. +{instructions}""" + + # Create the tool with proper response format handling + if response_format == "content_and_artifact": + return _SearchToolWithArtifacts.from_function( + search_memory, + asearch_memory, + name=name, + description=description, + ) + else: + return StructuredTool.from_function( + search_memory, + asearch_memory, + name=name, + description=description, + ) + + +def _get_store(initial_store: BaseStore | None = None) -> BaseStore: + """Get the store instance, either from parameter or configuration.""" + try: + if initial_store is not None: + return initial_store + else: + return get_store() + except RuntimeError as e: + raise RuntimeError( + "Could not get store. Make sure a store is configured in your graph." + ) from e + + +def _format_search_results(memories: list) -> str: + """Format search results for display.""" + if not memories: + return "No memories found." + + results = [] + for i, memory in enumerate(memories, 1): + content = memory.value.get("content", "") + score = memory.score + memory_id = memory.key + + result_str = f"{i}. {content}" + if score is not None: + result_str += f" (relevance: {score:.2f})" + result_str += f" [id: {memory_id}]" + + results.append(result_str) + + return "\n".join(results) + + +class _ToolWithRequired(StructuredTool): + """Tool with required parameters properly configured.""" + + @functools.cached_property + def tool_call_schema(self) -> "ArgsSchema": + tcs = super().tool_call_schema + try: + if tcs.model_config: + tcs.model_config["json_schema_extra"] = _ensure_schema_contains_required + elif ConfigDict is not None: + tcs.model_config = ConfigDict( + json_schema_extra=_ensure_schema_contains_required + ) + except Exception: + pass + return tcs + + +class _SearchToolWithArtifacts(StructuredTool): + """Search tool that returns both content and artifacts as a tuple.""" + + @functools.cached_property + def tool_call_schema(self) -> "ArgsSchema": + tcs = super().tool_call_schema + try: + if tcs.model_config: + tcs.model_config["json_schema_extra"] = _ensure_schema_contains_required + elif ConfigDict is not None: + tcs.model_config = ConfigDict( + json_schema_extra=_ensure_schema_contains_required + ) + except Exception: + pass + return tcs + + +def _ensure_schema_contains_required(schema: dict) -> None: + """Ensure schema contains required fields.""" + schema.setdefault("required", []) + + +# Additional helper tool for direct event storage (AgentCore specific) +def create_store_event_tool( + *, + store: Optional[BaseStore] = None, + name: str = "store_conversation_event", +): + """Create a tool for storing conversation events directly in AgentCore Memory. + + This is an AgentCore-specific tool that allows storing conversation events + that will be processed into memories by the AgentCore service. + + Args: + store: The BaseStore to use. If not provided, uses the configured store. + name: The name of the tool. + + Returns: + A StructuredTool for storing conversation events. + + Example: + ```python + store_tool = create_store_event_tool() + ``` + """ + initial_store = store + + async def astore_event( + message: BaseMessage, + actor_id: str, + session_id: str, + ): + """Store a conversation event asynchronously.""" + store = _get_store(initial_store) + namespace = (actor_id, session_id) + key = str(uuid.uuid4()) + + await store.aput(namespace, key, {"message": message}) + return f"Stored conversation event {key}" + + def store_event( + message: BaseMessage, + actor_id: str, + session_id: str, + ): + """Store a conversation event synchronously.""" + store = _get_store(initial_store) + namespace = (actor_id, session_id) + key = str(uuid.uuid4()) + + store.put(namespace, key, {"message": message}) + return f"Stored conversation event {key}" + + description = """Store a conversation event in AgentCore Memory. +This event will be automatically processed into searchable memories by the service.""" + + return StructuredTool.from_function( + store_event, + astore_event, + name=name, + description=description, + ) diff --git a/libs/langgraph-checkpoint-aws/pyproject.toml b/libs/langgraph-checkpoint-aws/pyproject.toml index 0a67d820..45e788b1 100644 --- a/libs/langgraph-checkpoint-aws/pyproject.toml +++ b/libs/langgraph-checkpoint-aws/pyproject.toml @@ -20,6 +20,7 @@ python = ">=3.9,<4.0" langgraph-checkpoint = ">=2.0.0" langgraph = ">=0.2.55,<0.7" boto3 = ">=1.37.3" +bedrock-agentcore = ">=0.1.5" [tool.poetry.group.dev] optional = true diff --git a/libs/langgraph-checkpoint-aws/tests/unit_tests/agentcore/test_tools.py b/libs/langgraph-checkpoint-aws/tests/unit_tests/agentcore/test_tools.py new file mode 100644 index 00000000..a821b5ca --- /dev/null +++ b/libs/langgraph-checkpoint-aws/tests/unit_tests/agentcore/test_tools.py @@ -0,0 +1,523 @@ +""" +Unit tests for AgentCore Memory Store tools. +""" + +import uuid +from unittest.mock import AsyncMock, Mock, patch + +import pytest +from langchain_core.messages import AIMessage, HumanMessage +from langchain_core.tools import StructuredTool +from langgraph.store.base import SearchItem + +from langgraph_checkpoint_aws.agentcore.tools import ( + NamespaceTemplate, + create_manage_memory_tool, + create_search_memory_tool, + create_store_event_tool, +) + + +class TestNamespaceTemplate: + """Test suite for NamespaceTemplate.""" + + def test_init_with_string(self): + """Test initialization with string namespace.""" + template = NamespaceTemplate("test_namespace") + assert template.namespace_parts == ("test_namespace",) + + def test_init_with_tuple(self): + """Test initialization with tuple namespace.""" + template = NamespaceTemplate(("part1", "part2", "part3")) + assert template.namespace_parts == ("part1", "part2", "part3") + + def test_call_with_static_namespace(self): + """Test calling template with static namespace (no placeholders).""" + template = NamespaceTemplate(("static", "namespace")) + # Provide a config to avoid runtime context error + config = {"configurable": {}} + result = template(config) + assert result == ("static", "namespace") + + def test_call_with_placeholders(self): + """Test calling template with placeholders.""" + template = NamespaceTemplate(("facts", "{actor_id}")) + config = {"configurable": {"actor_id": "user123"}} + result = template(config) + assert result == ("facts", "user123") + + def test_call_with_multiple_placeholders(self): + """Test calling template with multiple placeholders.""" + template = NamespaceTemplate(("{actor_id}", "{thread_id}", "memories")) + config = {"configurable": {"actor_id": "user123", "thread_id": "thread456"}} + result = template(config) + assert result == ("user123", "thread456", "memories") + + def test_call_missing_placeholder_value(self): + """Test calling template with missing placeholder value.""" + template = NamespaceTemplate(("facts", "{actor_id}")) + config = {"configurable": {}} # Missing actor_id + + with pytest.raises(ValueError, match="Missing required configurable key"): + template(config) + + @patch("langgraph_checkpoint_aws.agentcore.tools.get_config") + def test_call_without_config_uses_get_config(self, mock_get_config): + """Test calling template without config uses get_config.""" + # Test successful get_config call + mock_get_config.return_value = {"configurable": {"actor_id": "user123"}} + template = NamespaceTemplate(("facts", "{actor_id}")) + result = template() + assert result == ("facts", "user123") + mock_get_config.assert_called_once() + + # Test RuntimeError handling - returns template as-is + mock_get_config.reset_mock() + mock_get_config.side_effect = RuntimeError("Not in runnable context") + template = NamespaceTemplate(("facts", "{actor_id}")) + result = template() + assert result == ("facts", "{actor_id}") # Returns template unchanged + mock_get_config.assert_called_once() + + +class TestCreateManageMemoryTool: + """Test suite for create_manage_memory_tool.""" + + @pytest.fixture + def mock_store(self): + """Create a mock store.""" + store = Mock() + store.put = Mock() + store.aput = AsyncMock() + return store + + def test_create_tool_basic(self, mock_store): + """Test creating a basic memory management tool.""" + tool = create_manage_memory_tool( + namespace=("{actor_id}", "{thread_id}"), + store=mock_store, + ) + + assert isinstance(tool, StructuredTool) + assert tool.name == "manage_memory" + assert "Create a memory event" in tool.description + + def test_create_tool_with_custom_params(self, mock_store): + """Test creating tool with custom parameters.""" + custom_instructions = "Custom instructions for memory tool" + tool = create_manage_memory_tool( + namespace=("memories", "{user_id}"), + instructions=custom_instructions, + name="custom_memory_tool", + store=mock_store, + ) + + assert tool.name == "custom_memory_tool" + assert custom_instructions in tool.description + + def test_tool_sync_invocation(self, mock_store): + """Test synchronous tool invocation.""" + tool = create_manage_memory_tool( + namespace=("user123", "session456"), + store=mock_store, + ) + + message = HumanMessage(content="Test message") + result = tool.invoke({"content": message}) + + assert "Created memory event" in result + mock_store.put.assert_called_once() + call_args = mock_store.put.call_args + assert call_args[0][0] == ("user123", "session456") # namespace + assert call_args[0][2]["message"] == message # value + + @pytest.mark.asyncio + async def test_tool_async_invocation(self, mock_store): + """Test asynchronous tool invocation.""" + tool = create_manage_memory_tool( + namespace=("user123", "session456"), + store=mock_store, + ) + + message = AIMessage(content="Test AI response") + result = await tool.ainvoke({"content": message}) + + assert "Created memory event" in result + mock_store.aput.assert_called_once() + call_args = mock_store.aput.call_args + assert call_args[0][0] == ("user123", "session456") # namespace + assert call_args[0][2]["message"] == message # value + + def test_tool_with_runtime_namespace(self, mock_store): + """Test tool with runtime namespace resolution.""" + tool = create_manage_memory_tool( + namespace=("{actor_id}", "{thread_id}"), + store=mock_store, + ) + + with patch( + "langgraph_checkpoint_aws.agentcore.tools.get_config" + ) as mock_get_config: + mock_get_config.return_value = { + "configurable": {"actor_id": "user123", "thread_id": "thread456"} + } + + message = HumanMessage(content="Test message") + result = tool.invoke({"content": message}) + + assert "Created memory event" in result + call_args = mock_store.put.call_args + assert call_args[0][0] == ("user123", "thread456") + + def test_tool_invalid_namespace_length(self, mock_store): + """Test tool with invalid namespace length.""" + tool = create_manage_memory_tool( + namespace=("single_element",), + store=mock_store, + ) + + message = HumanMessage(content="Test message") + with pytest.raises( + ValueError, match="namespace to be \\(actor_id, session_id\\)" + ): + tool.invoke({"content": message}) + + def test_tool_ignores_provided_id(self, mock_store): + """Test that tool ignores provided ID.""" + tool = create_manage_memory_tool( + namespace=("user123", "session456"), + store=mock_store, + ) + + message = HumanMessage(content="Test message") + result = tool.invoke({"content": message, "id": uuid.uuid4()}) + + assert "Created memory event" in result + # Should still work, just ignore the ID + + @patch("langgraph_checkpoint_aws.agentcore.tools.get_store") + def test_tool_without_store_uses_get_store(self, mock_get_store, mock_store): + """Test tool without store parameter uses get_store.""" + mock_get_store.return_value = mock_store + + tool = create_manage_memory_tool( + namespace=("user123", "session456"), + ) + + message = HumanMessage(content="Test message") + tool.invoke({"content": message}) + + mock_get_store.assert_called_once() + mock_store.put.assert_called_once() + + +class TestCreateSearchMemoryTool: + """Test suite for create_search_memory_tool.""" + + @pytest.fixture + def mock_store(self): + """Create a mock store with search capabilities.""" + store = Mock() + store.search = Mock() + store.asearch = AsyncMock() + return store + + @pytest.fixture + def sample_search_results(self): + """Create sample search results.""" + return [ + SearchItem( + namespace=("facts", "user123"), + key="mem-123", + value={"content": "User likes coffee"}, + created_at="2024-01-01T00:00:00Z", + updated_at="2024-01-01T00:00:00Z", + score=0.95, + ), + SearchItem( + namespace=("facts", "user123"), + key="mem-456", + value={"content": "User is allergic to peanuts"}, + created_at="2024-01-02T00:00:00Z", + updated_at="2024-01-02T00:00:00Z", + score=0.87, + ), + ] + + def test_create_search_tool_basic(self, mock_store): + """Test creating a basic search tool.""" + tool = create_search_memory_tool( + namespace=("facts", "{actor_id}"), + store=mock_store, + ) + + assert isinstance(tool, StructuredTool) + assert tool.name == "search_memory" + assert "Search AgentCore Memory" in tool.description + + def test_search_tool_sync_invocation(self, mock_store, sample_search_results): + """Test synchronous search tool invocation.""" + mock_store.search.return_value = sample_search_results + + tool = create_search_memory_tool( + namespace=("facts", "user123"), + store=mock_store, + ) + + result = tool.invoke({"query": "user preferences"}) + + assert "User likes coffee" in result + assert "User is allergic to peanuts" in result + assert "relevance: 0.95" in result + assert "relevance: 0.87" in result + + mock_store.search.assert_called_once_with( + ("facts", "user123"), + query="user preferences", + filter=None, + limit=10, + offset=0, + ) + + @pytest.mark.asyncio + async def test_search_tool_async_invocation( + self, mock_store, sample_search_results + ): + """Test asynchronous search tool invocation.""" + mock_store.asearch.return_value = sample_search_results + + tool = create_search_memory_tool( + namespace=("facts", "user123"), + store=mock_store, + ) + + result = await tool.ainvoke({"query": "user preferences", "limit": 5}) + + assert "User likes coffee" in result + mock_store.asearch.assert_called_once_with( + ("facts", "user123"), + query="user preferences", + filter=None, + limit=5, + offset=0, + ) + + def test_search_tool_with_content_and_artifact( + self, mock_store, sample_search_results + ): + """Test search tool with content_and_artifact response format.""" + mock_store.search.return_value = sample_search_results + + tool = create_search_memory_tool( + namespace=("facts", "user123"), + store=mock_store, + response_format="content_and_artifact", + ) + + # When response_format is "content_and_artifact", the tool returns a tuple + result = tool.invoke({"query": "test"}) + + # The result should be a tuple + assert isinstance(result, tuple) + assert len(result) == 2 + + content, artifacts = result + assert isinstance(content, str) + assert artifacts == sample_search_results + + def test_search_tool_empty_results(self, mock_store): + """Test search tool with empty results.""" + mock_store.search.return_value = [] + + tool = create_search_memory_tool( + namespace=("facts", "user123"), + store=mock_store, + ) + + result = tool.invoke({"query": "nonexistent"}) + assert result == "No memories found." + + def test_search_tool_with_filter(self, mock_store, sample_search_results): + """Test search tool with filter parameter.""" + mock_store.search.return_value = sample_search_results + + tool = create_search_memory_tool( + namespace=("facts", "user123"), + store=mock_store, + ) + + filter_dict = {"category": "preferences"} + tool.invoke({"query": "test", "filter": filter_dict}) + + mock_store.search.assert_called_once_with( + ("facts", "user123"), + query="test", + filter=filter_dict, + limit=10, + offset=0, + ) + + def test_search_tool_with_runtime_namespace(self, mock_store): + """Test search tool with runtime namespace resolution.""" + mock_store.search.return_value = [] + + tool = create_search_memory_tool( + namespace=("facts", "{actor_id}"), + store=mock_store, + ) + + with patch( + "langgraph_checkpoint_aws.agentcore.tools.get_config" + ) as mock_get_config: + mock_get_config.return_value = {"configurable": {"actor_id": "user456"}} + + tool.invoke({"query": "test"}) + + call_args = mock_store.search.call_args + assert call_args[0][0] == ("facts", "user456") + + +class TestCreateStoreEventTool: + """Test suite for create_store_event_tool.""" + + @pytest.fixture + def mock_store(self): + """Create a mock store.""" + store = Mock() + store.put = Mock() + store.aput = AsyncMock() + return store + + def test_create_store_event_tool(self, mock_store): + """Test creating a store event tool.""" + tool = create_store_event_tool(store=mock_store) + + assert isinstance(tool, StructuredTool) + assert tool.name == "store_conversation_event" + assert "Store a conversation event" in tool.description + + def test_store_event_sync_invocation(self, mock_store): + """Test synchronous store event invocation.""" + tool = create_store_event_tool(store=mock_store) + + message = HumanMessage(content="Test message") + result = tool.invoke( + {"message": message, "actor_id": "user123", "session_id": "session456"} + ) + + assert "Stored conversation event" in result + mock_store.put.assert_called_once() + call_args = mock_store.put.call_args + assert call_args[0][0] == ("user123", "session456") # namespace + assert call_args[0][2]["message"] == message # value + + @pytest.mark.asyncio + async def test_store_event_async_invocation(self, mock_store): + """Test asynchronous store event invocation.""" + tool = create_store_event_tool(store=mock_store) + + message = AIMessage(content="AI response") + result = await tool.ainvoke( + {"message": message, "actor_id": "user123", "session_id": "session456"} + ) + + assert "Stored conversation event" in result + mock_store.aput.assert_called_once() + + def test_store_event_with_custom_name(self, mock_store): + """Test store event tool with custom name.""" + tool = create_store_event_tool(store=mock_store, name="custom_store_tool") + + assert tool.name == "custom_store_tool" + + +class TestGetStore: + """Test suite for _get_store helper function.""" + + def test_get_store_with_provided_store(self): + """Test _get_store returns provided store.""" + from langgraph_checkpoint_aws.agentcore.tools import _get_store + + mock_store = Mock() + result = _get_store(mock_store) + assert result == mock_store + + @patch("langgraph_checkpoint_aws.agentcore.tools.get_store") + def test_get_store_without_provided_store(self, mock_get_store): + """Test _get_store uses get_store when no store provided.""" + from langgraph_checkpoint_aws.agentcore.tools import _get_store + + mock_store = Mock() + mock_get_store.return_value = mock_store + + result = _get_store(None) + assert result == mock_store + mock_get_store.assert_called_once() + + @patch("langgraph_checkpoint_aws.agentcore.tools.get_store") + def test_get_store_runtime_error(self, mock_get_store): + """Test _get_store handles RuntimeError.""" + from langgraph_checkpoint_aws.agentcore.tools import _get_store + + mock_get_store.side_effect = RuntimeError("No store configured") + + with pytest.raises(RuntimeError, match="Could not get store"): + _get_store(None) + + +class TestFormatSearchResults: + """Test suite for _format_search_results helper function.""" + + def test_format_empty_results(self): + """Test formatting empty search results.""" + from langgraph_checkpoint_aws.agentcore.tools import _format_search_results + + result = _format_search_results([]) + assert result == "No memories found." + + def test_format_single_result(self): + """Test formatting single search result.""" + from langgraph_checkpoint_aws.agentcore.tools import _format_search_results + + memory = SearchItem( + namespace=("facts", "user123"), + key="mem-123", + value={"content": "Test content"}, + created_at="2024-01-01T00:00:00Z", + updated_at="2024-01-01T00:00:00Z", + score=0.95, + ) + + result = _format_search_results([memory]) + assert "1. Test content" in result + assert "relevance: 0.95" in result + assert "id: mem-123" in result + + def test_format_multiple_results(self): + """Test formatting multiple search results.""" + from langgraph_checkpoint_aws.agentcore.tools import _format_search_results + + memories = [ + SearchItem( + namespace=("facts", "user123"), + key="mem-123", + value={"content": "First memory"}, + created_at="2024-01-01T00:00:00Z", + updated_at="2024-01-01T00:00:00Z", + score=0.95, + ), + SearchItem( + namespace=("facts", "user123"), + key="mem-456", + value={"content": "Second memory"}, + created_at="2024-01-02T00:00:00Z", + updated_at="2024-01-02T00:00:00Z", + score=None, # No score + ), + ] + + result = _format_search_results(memories) + assert "1. First memory" in result + assert "2. Second memory" in result + assert "relevance: 0.95" in result + assert "relevance:" not in result.split("\n")[1] # No score for second item diff --git a/samples/memory/agentcore_memory_store_long_term_search.ipynb b/samples/memory/agentcore_memory_store_long_term_search.ipynb index b215a1d2..4f33978a 100644 --- a/samples/memory/agentcore_memory_store_long_term_search.ipynb +++ b/samples/memory/agentcore_memory_store_long_term_search.ipynb @@ -33,7 +33,7 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 1, "metadata": {}, "outputs": [], "source": [ @@ -41,24 +41,26 @@ "from langchain.chat_models import init_chat_model\n", "from langgraph.prebuilt import create_react_agent\n", "from langchain_core.messages import HumanMessage, AIMessage\n", - "from langchain_core.tools import tool\n", "from langchain_core.runnables import RunnableConfig\n", "from langgraph.store.base import BaseStore\n", "import uuid\n", "import logging\n", - "logging.getLogger().setLevel(logging.DEBUG)\n", - "\n", - "from langgraph.utils.config import get_store" + "logging.getLogger().setLevel(logging.DEBUG)" ] }, { "cell_type": "code", - "execution_count": null, + "execution_count": 2, "metadata": {}, "outputs": [], "source": [ "# Import the AgentCoreMemoryStore that we will use as a store\n", - "from langgraph_checkpoint_aws import AgentCoreMemoryStore\n", + "from langgraph_checkpoint_aws import (\n", + " AgentCoreMemoryStore,\n", + " create_search_memory_tool\n", + ")\n", + "\n", + "from langgraph.checkpoint.memory import InMemorySaver\n", "\n", "# For this example, we will just use an InMemorySaver to save context.\n", "# In production, we highly recommend the AgentCoreMemorySaver as a checkpointer\n", @@ -80,7 +82,7 @@ }, { "cell_type": "code", - "execution_count": 39, + "execution_count": 19, "metadata": {}, "outputs": [], "source": [ @@ -101,38 +103,27 @@ "source": [ "### Define our long term memory retrieval tools\n", "\n", - "In LangChain, tools can be injected at runtime with a Store argument [InjectedStore](https://langchain-ai.github.io/langgraph/reference/agents/#langgraph.prebuilt.tool_node.InjectedStore) to ensure that we can dynamically use the store to access different namespaces. In our case, this means we can pass the `actor_id` to the tool through the `config` (`RunnableConfig`). This is filled in when the tool is called so the tool can only access that actor_id's memories. \n", + "We will use a tool factory built for the AgentCore memory store so we can define tools for any namespace that we want to search over based on the strategies created for our memory resource. These tool factories take care of the logic under the hood for loading `actor_id` and `thread_id` from the runtime configuration, ensuring that a tool is only searching over the namespaces for that current user and session. \n", + "\n", + "To accomplish this, similar to how AgentCore memory implements namespace placeholders for `{actor_id}` and `{session_id}`, we will also provide these to the tool factory so that it knows how to inject these arguments at runtime.\n", "\n", "The tool will search the namespace we specify, in this case the `/facts/{actor_id}` namespace which is a semantic memory namespace we specified above (at the top of the notebook). As the memories are extracted over time, these will be available to the agent through this tool." ] }, { "cell_type": "code", - "execution_count": 6, + "execution_count": 10, "metadata": {}, "outputs": [], "source": [ - "@tool\n", - "def retrieve_user_facts_from_past_conversations(\n", - " query: str,\n", - " config: RunnableConfig,\n", - " limit: int = 3,\n", - ") -> str:\n", - " \"\"\"Retrieve facts about the user that might be helfpul in answering vague questions\"\"\"\n", - " \n", - " # Actor ID comes from the runtime config we specify when invoking the agent\n", - " actor_id = config[\"configurable\"][\"actor_id\"]\n", - "\n", - " # Namespace we defined, where semantic facts are extracted for the user across\n", - " # sessions. This is combined under the hood with `/` to match AgentCore namespaces\n", - " search_namespace = (\"facts\", actor_id)\n", - "\n", - " store = get_store()\n", - " \n", - " result = store.search(search_namespace, query=query, limit=limit)\n", - " return result\n", + "# Create the memory search tool using the factory\n", + "retrieve_past_conversation_facts = create_search_memory_tool(\n", + " namespace=(\"facts\", \"{actor_id}\"), # Placeholder for actor ID\n", + " instructions=\"Retrieve facts and user preferences about the user that might be helpful in answering vague questions\",\n", + " name=\"get_past_conversation_facts\",\n", + ")\n", "\n", - "tools = [retrieve_user_facts_from_past_conversations]" + "tools = [retrieve_past_conversation_facts]" ] }, { @@ -152,7 +143,7 @@ }, { "cell_type": "code", - "execution_count": 32, + "execution_count": 11, "metadata": {}, "outputs": [], "source": [ @@ -215,21 +206,9 @@ }, { "cell_type": "code", - "execution_count": 33, + "execution_count": 20, "metadata": {}, - "outputs": [ - { - "data": { - "image/png": "iVBORw0KGgoAAAANSUhEUgAAAPUAAAG/CAIAAAAPZwaMAAAQAElEQVR4nOydB1wTZx/Hn7uEhL1FGbJUXCi4Wmutozhate7WbdVaV7W1rrp3Kw6s63XVqnXvPeu27oF74EAEVESGssm6958chABJzkAS7i7PVz7xcs/d5cnld8/9nv8zTkhRFMJgeIoQYTD8Besbw2ewvjF8Busbw2ewvjF8Busbw2ewvk1Idja6e+5DQkxmbpZCJlVIc1ShWIJCFEERyv8RvBKIUqhWkwULACxrrlG+FcDavDVqSCFBKSj1yrwtyUK75x2TpCgZoc5A/v6QhULHFFoTAgEhtiHLVbQJ/dzJ3k2AuAyB49+mYOei+JS3EmmuQmwjEIoIkbVAQFKSHJWOSAIpKJWuKZW+lQJFSgmqF5QvsFxU36RSikX0LbBCCjkqpm9697wD0gcnBJRCSr9RXifq9Uip7wINiGwEchklyaVyM2UK5WEplwri9oO87RxJxEGwvo3MpjmxqYm5tg7C6g2cGn3jijjO1aMpD6+mZXyQOjhb9Zvmj7gG1rfROL8n6d6l9y7lRD1/80W8Y3tEXGJ8TpVQx6++L4+4A9a3cdg2Py4tVdr1Z1/XCryt0sjlaN3UFyIbsu9kP8QRsL6NwIlNiQkx2X2486uXhp0LX1NI/t2oiogLYH2Xlk1/xEL1rO8kHnoSXWxf8CorQ9p/uj9iPZysFLOHPcteK+SURYkb6DbGGyrQW+bGIdaD9V1you9lJbzM6TvFImxJEbqN9kl/L718OBWxG6zvknN8U0Ld5i7IUmnZo0LkmWTEbrC+S8ipzYnQRNOwjeXqO7C2rY2dcPeSV4jFYH2XkCd30ms1slxx03zevlxCbA5iMVjfJeFpZBYlh1/XrPresWPHtGnTkOGMHz9+//79yARUrWcntCKuHGGvC8f6Lgm3zibbu1gh8/Lw4UNUIkq848fg4iF6eisNsRUc/y4Jaya/CKzl8GU3d2QCYmJiVq5cefPmTfhpateu3bdv39DQ0EGDBkVGRtIbbNq0qVq1atu3b//vv//u378vFovr1q37008/+fj4QOq4ceMEAoGnp+eGDRvmzZsHb+m97O3tz549i4zNrdMfrh5PHjI3ELESXH6XBEmOIrCWHTIBEokEpAwCXbp06YoVK4RC4a+//pqTk7N69erg4OC2bdveuHEDxH379u358+eHhIQsWLBgxowZKSkpkydPpo9gZWX1TMXChQvr1Klz8eJFWDllyhRTiBuo09RJLlMgtoL7f5cEKFn9a9ggE/Dy5UsQa48ePUDE8DY8PByKbZlMVmSzWrVqgR339fWFCwDeSqVSuAw+fPjg5OREEMTr1683btxobW0NSbm5ucikCBBJErGPc3yrWSP2gfVtMEmxEsJktz2QrIuLy/Tp09u0aVOvXj0ooevXr198Myjg4+PjIyIiwJ9kZmbSK+HCAH3DQkBAAC1uM0Gi9ym5voiN+sb+xGBkyuEBBDINYKb/+uuvxo0bb9my5YcffujYseORI0eKb3bu3LlRo0bVqFEDNr5+/fqyZcuKHASZGTliJ1jfBuNRQYQUJqyU+/v7jxw58tChQ2CgK1euPHXq1MePHxfZZu/evVDphDplUFAQGJL09HRUhigIBxcRYiVY3wZDqn7KN89N4msheHLgwAFYAIPRpEmTuXPngsN+9OhRkc3Aant4eKjfnj59GpUdcrkiIMgktZHSg/VdEkgBEXXLJEUmCHfmzJmLFi2Ki4uDuua6deugcgkuHJIqVqwIbhvcCPhsKLavXLkCsRRI3bx5M73vmzdvih8QvApcCeqNkbG5dymdALPG0uIb67tEOLgI455kIRMAUp44ceLRo0c7derUpUuXW7duQSw8MFAZXe7cuTNYEfAkT58+HTZsWKNGjcCCf/bZZwkJCRAiBC/+888/Hzt2rPgxBwwYAFfF6NGjs7OzkbF5cj3Nxp69Y+xx+05JuH32/cWDST9FVEYWz4pxz4PqOoZ1L4dYCS6/S0JoM2coFu7+x952afOQGCeRyyjWihvh+HeJ8Qq0uXI0qfYXjro2GDx4cFRUVPH1crkc7pl0u0xx9u3b5+zsjEwANHlCWEZrEmSJhEYaQnvQ8+TJk7py+++mBLcKZo9FGgL2JyVn2ahnX/X2rFxXe0P9u3fvoFlRaxK0KeoKUXt5eSGTAe2ayHB0ZSnzg2Lt9OgRf7LapOHyu+TUaeZ6cntC5bqVtKaWK8e6u7ZxL55Nc2MCgh0Qu8H+u+R83t7V1kG4YyEHhtkanSN/J1gJyXY/sH2uH6zvUtF3st+HZNmhNQnIkri0PzX2adaAmf6I9WD/bQQ2/vESCvIuI7yRBXBya2LMg6yBs/0RF8D6Ng5rp8UIRajvJH/Ea7YuiE9PkQz6g6WjGYqD9W00di15lfgyp0qIQ8u+Hoh3nNuVdO/ye7fy4h7juDEzGw3WtzF59TT3yD9vZBK5u5d1864e7j7mHqNpdNKS5ae3v331PEtoRTbuUK7mZ2wPmBQB69v4PLyScflIUnamTEASNg4CO0crWweBSEDlSAtOtdAKyTSD4wQiSeVM9ZoIhMrJ6hWKoithDVVsRJiARHJtw8RIIaGQafmJhSJSJtGyg5WIRBSR/l6anirNzpRTcmRjL6j9hXODVpycDAPr24REnvoQG5WZniqTShQKuUIqKUgSWCF5ocYfChoQi0pZoJJy4d8HLgNYQ6+kVEsEqXrggwBRxQYZQLKg2GF1ZCAPoRgJYR8S2TkJK1a1a/gVt+d4wfrmMOvXr8/IyBg+fDjC6AC3X3IYmUymq2cIhgafHQ6D9c0Ibr/kMFjfjOCzw2GkUqmVFedDkCYF65vD4PKbEXx2OAzWNyP47HAYrG9G8NnhMKBv7L/1g/XNYXD5zQg+OxwG65sRfHY4DNY3I/jscBisb0bw2eEw0L6D9a0ffHY4DC6/GcFnh8NgfTOCzw6HwfpmBJ8dDoP7VzGC9c1hcPnNCD47HAbrmxF8djgM1jcj+OxwGKxvRvDZ4TC4fskI1jeHweU3I/jscBhPT0+BgL3PLmMDePw8h0lMTJRIJAijG1x+cxgwJ6Z4ZCufwPrmMFjfjGB9cxisb0awvjkM1jcjWN8cBuubEaxvDoP1zQjWN4fB+mYE65vDYH0zgvXNYbC+GcH65jCgb7lcjjC6wfrmMLj8ZgTrm8NgfTOC9c1hsL4ZwfrmMFjfjGB9cxisb0awvjkM1jcj+PnF3KNly5bJyckEQdBv6V+wcuXKO3bsQJjC4PE73AP0Da9EPiRJWltbd+vWDWGKgfXNPXr16uXt7a25xsfHp3PnzghTDKxv7gHiDgsLU78VCATt2rVT2xWMJljfnKRv374VK1akl0HuXbt2RRhtYH1zEldX19atWyOVC4cFW1tbhNGGpcRPYh9lP72dmZVReDYF5T2dQqoTQJJIoaBXql41zgpBEpSCojenz5Z6DSAQEnKZKpVElCJvF/pohLL0KNhSY71ypeb29JHVa9QLmtvnH4FQKChYL5HKIiNvKeSyunXriUQi9cYFX0QDrUmaGSh0BvLfUlSBPOjPLboZUXCi1BsU/2iKICg5VfjjiEKnOB9rO6vAWnaVahntcrUIfa+b+jI3V24lIiU5RX55SqUs5VLB2S+mb+VNTp2UtzFFKfL8LilECjoGTcClkrcy72iEaqWiwBnT6wnlWScK/cT0svoI+Z+Yt73Gx+WvUb4qfzsCEgSaB1F/EQVBkZSWvQoyQ9B5KZQ3zayCXLWkqs8GUmZW1+6FvjKl/F66NtBEbENKcimhFdF/qr9AhEoP//W9avwL3+oOjTu6IwxHiDyZ+uja+wHTA0Q2qJTwXN9/TYqp/qlbSFMHhOEUb55Izux+NTg8AJUOPtcvLx5IhtszFjcX8QwSgVc5+nciKh187n8S+yTbzhl3sOEqTu6ipNfZqHTwufyWZMoYKzQY1kIKUHZOaX8/PhdvMogO4OGJnEUulyukWN8YjG6wvjEshUBG6FOD9Y1hKRQyQuwa6xvDZ7C+MXyGz/omSdwpmsMQBEmUOnzN5/i3QoEHl3IYilKUvvmCz+W3QKjsvIow3IQgjVB+81nfchm0geECnKsY5eaL65cYlkIao/KE9Y1hKQqFEfw3n+uXJIF4Hz85c/ZE87D679+n6t+sY+cWGzau0b/NocN74VBGnBBr2vRxo8cMRWUKn8tvBYVw/MTCwf4Ew1II1b9SgvVdiHbtm/bs0T8q6uH5/07b2dnVqlVn4oRZDvYO0dHPfvix+5zfFy1YONvZ2WXN6q2w8bHjBw8c3P3ixbOAgMpfNm/VpXMPxvYk8An9vh8cHx+7e89WOM5nDb8Y/tOYP8KnXLx4rmJFv949B7Rq1ZbeEtb8s2H1y9gXTk7OlStX/WXEb+XLV6CTVq5a/O+Jw7Y2tmFhX/n4+KkPDtbi77XLr1y9kJiYEBwc2qnDdw0bNkYGkpycNOv3iQ8e3PXx8e3erW/bNh0Z86MnSfOwQ4b1CQmpN3ni7I/LiGpig1LffvH8J4UQCIQ7d21u167z6ZPX54Uvi42NWbpsPqy3srKC1w2b1nT7rs/oUZNh+eSpY3PnzQiqUm3LpgMDf/hp1+4ty5ZHMB4fjrNt+z++vv7Hj16CvY4eO/DrqEFhX3514viV5s1azo+YlZ6RDpvduHl16vSxoPUd245MmxL+9u2bRUvC6SPsP7Br/4Gdv/z82/LlGzw9vTds/Et98CVL50E2OnXstmXzwaZNwqbNGHfu/ClkCEKhcMmyeX16D1wYsbJatZqLFsNHJ+jPj54kNdnZ2ePGD3dzdR83ZioyL1jfRalcKahB/YZQEteoUatD+65nz56QSqV0wQzrv+3aq3q1mrB85Mi+2rXrjPxlvIuLa906Dfp/P2Tfvh2pqSmMx69SuVr7b7qIRKJmTZXTZNasWRuUDcJq3qwVFMCxL1/AyrXrVjT54suuXXpCiQgbDBs66sqVC4+jHkLSnr3bmjZpAfJ1dHD8qvU38NH0YXNzc4//e6hnj35wcCdHpzZfd4DLRlP9HwNkoP03XT/9pFGd0Ppwn4G3jx7f158fPUk0crl8ytTRWZmZ4XOW0PO0fCyUEYIDvI6fkMoQiqHAHVa97O1VEcT9+nU8/TaoSnV6AUJX9x/caVD/M/WWdeo0gJV3791iPD4U3vQC+B949fevRL+1sVFOapOengav0dFPq6muIpqqQTXg9fHjB9Dk8epVnL9/oDopKCgvS0+ePJJIJJpZCg2pB7bqQ9oHZAghtevSC85OLvCam5OjJz/6k+jpbectmPk46sG8ucvAjyGDIHD7jl4UCqQw/BSJxdbqZWsb5QQcmZkZjo5OsCASi+n1oCTQPZhd+NPc92PK7yIenSSLFjEZGRlQGGtmg55+LQvKwMxMKA7pKyEvh9Y2+Xspjc2IX34ocrTUlGQnVeY/EriTFMmnnvzoSUKqBsg7dyPhJgAVGM1tPhJl+7wAlRJcvywKqFm9nJOtHL+tfHj6vgAAEABJREFU1pAaa2tr+CFbtWzbpEmY5novTx9UauDgyo/OKRg6nqmSC/hXKPIFAkFubo46KTs7i15wcy8Hr6NHTfL2rqh5NA+PCshk+dGTRL+1s7OfPnVuxJ+/h8+dFrFghUH9OSlo3yn16Fms76LcuXNTvfz0WRSUZ6CYd+/eFtmsUqUgqAuCT6XfQnH+5s0rD4/yqNTAJ1YNqg4RDPUaejmwUhXQR/nynsq33+YlQbSEXvDx9hWrbi/qLMHNBErQ0k+9qSc/epLot5UCq4SG1psxbd7gob03b1nXu9cAZF54Xb8sUf3kXVIihFDABkDw5NDhPc2btxLn2xJNfvxh+MWLZ48c3Q+2+9692zNnTRg1Zgj4FmQMIAZy4eLZ3bu3pqWn3bp9Y/mKhVCPrKKqGEBlFGKX0GwJy1u3/fPw4T16F9Ax1AihQgmZgWxA5GTMuGEQAEHGQE9+9CSpCQys/OPA4ev/WQXlBTIvvC6/S1Q/ade2ExRCy1f8CcvwU40YPlbrZrVqha5euRnKpFWrl8ANumaN2rNnLdR6JZQACLfBZbZ950aIOUIsuX69hqAPOql3rx+gNR6ilnBFQR4gXvH7H5PprnYQroa7ypZt6yMjr4ExgCyNHj0ZGQM9+dGTpMl33/a+du3SH3OmrPvbrA8J4vP8g39Pi7ESE51+8vv4XTp0CoNmmr59BiJMWXNiU3ziy9wh8yqhUoD9N4al4PZ51gHed+KkkbpSN23cB40gqOyYMGnk/Xu3tSa1adNx6JCRiE0YpX2e1+OLCYMLgP17DWvQLoLSlK/eoiu1bMUNjBk1WSLVXgO2tWHdE04IApff+iEUROlbCAzEs4IXYitublya5F/11AdUSvjdflno2TcYjkEgXH5j+AuFcPmtD8ICxqfxGKPM78NnfVN4fBqXwfP7YDAM8NyfYCwcnvsTjIWD/QmGz2B9Y/gMn/UttiaFYjyAmquIxFYi29JOpsXnn9/eSZibhT04V8lIk9nalrZ7BZ/1/UUHj6w04wyowZifD+8kNRuVtkcan/Xt5i308LHdueAlwnCNPYvi4PZbq7EDKh18Hr9Dc+lQyoPLaZ4Btr5V7eUKBj9HEIVOCKV6r+zlQ68jmHpEEJCu2iX/cAVBSj370oF61ZZFMqB3R4JA2rtIazkIvZ4kKUVekyBBFcyfo2v7vPwX//raslRkXcFX15p/HV+KJK1eP894/TzTO8jmqz5GGKzNf30D14+/f3DlQ06WXCYxtMGXUdFG2UW/+rUnUXp615UoC7op9lEfcXwq//ozTN8iZG0tDAx2bPatKzIGFqHvUpKTk/PXX39t3bp1+vTprVq1QqxhxIgRmZmZa9euRaYEvvL79+9dXFyEQqGnp2flypUDAwN9fHwaNWqEWA+Of+sD1APK3rVr16BBgy5duoTYxK1bt6KiokiSvHHjRv369ZHJ6NSp0/r165OTk2H5zZs3kZGRIHRXV1cHB4edO3cidoPDw9pJT09fuHBhmzZt3N3dL1y40LdvX8Qy/v7775SUlKSkJFOLrHv37lBs08ukCoVCASU6+8WNsL6LA7/c/Pnz27dvX6FChXPnzvXu3Ruxj8uXLz948IBehoX79+8jkwHO5KuvvlLPS4hUI8euXLmCuADWdwFwC547d27Xrl19fX3PnDnTs2dPxFbANcEdhl5OSEiAugEyJVCEw9WufisQCMCoIC6A9a3k3bt3c+bMAUFDzenkyZPdunVDLObEiRNPnz7VXHPv3r0nT54gk+Hs7AxFOMgalh0dHffu3QsVkgMHDiDWY+n6fvv27ezZs8FeV61a9fjx499++y1iPVDbg4qv5pq4uLjt27cjU9KnTx9vb2+Q+OnTp2Hh4MGDd+7cmTJlCmI3lhsffP369Zo1a65duzZw4MCOHTsi7tCyZUuo5MnlcghcQlUPWmdkMhkUsXB9IvNy9OjRxYsXr1ixIiAgALESS9R3fHw8+FeIr4GyoR6JMKUAAjhDhw6F+953332H2Idl6fvly5dQZoNb/fHHH9u2bYs4zt27dz08PDRrfmXFvHnzQOjwiliGpfjvFy9eTJo0adSoUdDqtm/fPh6IG9i8ebM6Sli2jBs3DiqgzZo1e/z4MWIT/C+/nz9/vnr16ujoaCizWdW6XnpA33Xq1KlRowZiBxkZGUOGDIGTzJ7mMD7rG0Jm4LMhtgDKDgsLQxizsGTJkmfPnsErYgH81DfcJUHZ0AYBym7evDniKTdv3vT393dzc0Ms49KlS6NHj4a4SmhoKCpT+KZv8KOgbKjrQANEkyZNEK8ZPHgwfM169eoh9iGVSiGu8umnn0IRg8oO/ugboiKg7A8fPsAJbdzY4OeucxGoV7Ru3drPzw+xlVWrVkVGRi5fvpxu+zQ/fNA3NKTBL52VlQXK5kSnZIsCTNSwYcPAjkNZjswOt/UN5w7i2XArBGWXyekrWy5fvhwcHOzgUNpBimbgp59+ql69+vDhw5F54aq+r1+/DsqGBWiDbNCgAbJIoMkwPDw8MDAQcYH169efOXMGKp2lf+Tsx8O98TtXr14Fn21lZcXaqpXZaNiwoZOTAc+WL1v69esHJRE0A82ePdtsVX8uld8QdYIy28bGBtxImQeeMCUGQoeenp5jxoxBpocb+r5w4QIo29HREdxI7dq1EUbF+fPnodZhrIcmm5Nt27bt27cPvIqLiwsyJWzXN/yE4EagCQPK7Jo1ayKMBtASDkJxdTXOVApm5vnz59CYP3bsWJN2mmCv/wafDUGlChUqTJw4EareCFOML774wpx1NeNSqVKlEydOTJo0CUIF8IpMA0vLbwhp/+9//wOjVrVqVYThNVCKQS35+++/RyaApf1jExMTwZNgcevn1KlTMllpZxAucyC+GR0djUwDS/UtFAp58MuZmhkzZkgkfJgglyRNpUOW+m+s748hLCxMc1oSTHGwvjnMtGnTEEYv2J9wGH74b5OC9c1heOO/TQf2JxwG+29GsL45DPbfjGB/wmGw/2YE65vDYP/NCPYnHAb7b0awvjkM9t+MYH/CYbD/ZgTrm8Ng/80IS/2JlZWVVCpFGL1g/80I9t8cBvtvRrA/4TDYfzPCrvJ75MiR586dI4i8p0Grp3+4efMmwhQD/PexY8ewRdEDu8rvRYsW+fn5kfkQKipWrBgXF4cwxcD+mxHW+ZPPP/9cc0goLDds2BAkjjDFAP8tEokQRjes03evXr38/f3Vb0HZ7HxwERvA/psR1unby8sLbrv0dLoKhaJOnTpcmV/P/OD4NyNsjJ/06NHD19cXqQpvTjxwtazA/puRUp2duKjs7AwoZOWF1kK1UEFRBIWovDAI/Kcxx0r+OwiSgM+mXwuSkDKVELZo+P3pzFPVq1QnM32irqdRhGovzZla6OOrQi1qv05Qyn/0kYj8qV0Kf7oSsVgcUJt7c5oVB8e/GSmhvg+sTHgTnQUCk0kVyJAJgmhV6kwuEGO1L6pWQ3J0cksihbTsULBSQ7+FtsxfX3x3oRUJF6Czq6jnBG5XW8F/N23aFBfheijJqTm67u2HZGmzb328grhaeZdkojM7E9ZOiRkwyx9xFhz/ZsRg/70j4tXrF7kdh1fkrrgBkR1q3a+CZ6DDmkkvEGfB/psRw/QtyUBJCTnfjfZFvKBxZzeCRKe3JSFuguPfjBim7/8OJItteFVguJa3fvU0C3ETHP9mxDB9p6flEgZVJ1kPKUa5Eq52xMXxb0YMK4xluQopBEx4hFyikHFWIdh/M4LPDsXdGxKOfzPC0v7fZkPZQZGz5wD7b0YsXd/Q3ERx1nBh/82IYf4kr/WbRyjdCYE4CvbfjBh2dgSqAQeIRyi/DWe/EPbfjBjmTxQKxOnn1WuFu/ck7L8ZsXj/zeXyG/tvRizdvVEKirv1S+y/GbH0s8PpCgX234wY5k8IkuJX9ZIOCXH1K2H/zYhh+qYUBO+qlxTibAMm9t+MWLz/5nL7PPbfjPAqfjJj5vgjR/cbtIvSfXPZf+P+3/rhlb6joh4iA1GG83H8m78YeHcjKUMLuxcvnh84uCvy1vWEhNf+foFt2nTs0L4rnZSamjInfOqDh3d9K/p36PBtfHzsfxfO/LNuFyTBz/b32uVXrl5ITEwIDg7t1OG7hg0b00cbMLDb8v/9s2XLugsXz5Yr59G8WatBP44QCATNw+rDBvMXzFqx8s+D+89+ZPZIASkQcfUix+MvGTEwfkIZfDf/3/KI69cv//Lzb+FzloC4Fy+Ze+XqRTpp3oKZsXEx8+ctnz1r4dWrF+GPJPPys2TpvF27t3Tq2G3L5oNNm4RNmzHu3PlTSDUvOLxGLJwdFvbVv8cuT5owe8fOTWfOnoCVx44oDzt2zJSPFzegkCvkEq4GwLH/ZsTA+AmFDI2fTJkyZ/785XXrNKgTWh9K7qpB1a9dvwTrP3x4f+XKhe++7VOjerCbm/voUZOhgKd3yc3NPf7voZ49+rX/pouTo1ObrzuEffnVho1/qY/ZtEmLZk1bgNZDQup6eXo/efIIWSTYfzNi2NVPkMjg+DdF7dmz7eq1i3FxL+kVnp7e8Po8+im8BgeH0Cvt7e3r1v0EinNYBr1C2KtB/c/UxwgNqXf02IEPaR/ot0FB1dVJ9vYOGRnpqKSoJgJCHOXMmTNNmjSh57LjLlDFN91dyMD+sQrDym+FQjF+4i9SqeTHgcNDQ+s72DuM+OUHOik9PQ1e7ezs1Rs7OjrRC7Re1VuqSU1Jpk+E2saUHk7HB6H8Bv9ta2uLuAxU8U1XSzbwujGwrx1UBx8/frBg/vJ6dT+h14B2y7l7IOUkadbwKtVonkh9n0IvuLmXg9fRoyZ5exeaX8rDo0JKipHnciAIASHkagGO/TcjBvoTA5vn09KVjoIWNBATEw1/Af6VkHLuTD94fRHz3N9fOT1sRkZGZOS18uU9YdnH21csVs4PCJad3hEiLXCVQ0GVkoKMC0XJKRlXC3Dc/4QRA+uXBrZlV/TxgwJm+46NaelpsbExS5fNb1C/YcLbN5Dk7eXj5xfwz4bVr17Hg7gXLZ5D+3IAdNzv+8FQobx37zYYcYicjBk3bNHicP2fBZcEhAtv3Lhy6/YN/nVS1wqOfzNi2tCvu3u5SRNnP3x0r0PHLydO/nXgDz+1b9/10aP73/dXhsDHjZkKTrpP306/jhoEVcbgmiFWQit6x+7d+o4dM3XLtvXfdGgGIUUvT5/RoyczflyvngMg0D5l6uiP1zen65e4/wkjhro3g3sjQSAP/jTXnDl1g14Aez3n98Xly1eg306YNJL2JzRQ0sNfkaP5+Piqd6dZtXKTehnij+rGo48E9z/hN4b2jyWNOJ3CjJnjoeSGNkuIhW/c9PfNm1fbG6jO0qN8jJWAqwU4jn8zYmj/WMqI1nbatLmBlar8tWZZ957tLl48O21KePEC29QolNPzc7UAx/6bkbK8u0Hb5OyZEaiM4fB4Ddz/hBF8amTtoTIAABAASURBVDgcacH+m5EStM/zLPTG4e+D49+MGFhZVCBE8Wt+HxKRnO0Dj/03I6Zt3+EAXL5ccfybEYsffwkhITz/CX/BZ4fDYP/NiKHtO+BWeeW/4etwt30H+29GDG3fgQYRXjlw+Drcbd/B/psR7E84DPbfjOCzw2Gw/2bEMH9iJSZFnJ1NQSsisVBkw9Xxi9h/M2KYWB1cRQpePR4Q5WQqRGKu1i+x/2bEMH1/+Z17brYc8YgPiRK/qg6Im2D/zYjBZsOnsu22eTGIFxz/+zVpRXzRxRVxE9z/mxGD9d1+iGeVEPvdf768f7Hks46UObGPc/eviM9Il/af7oc4C/bfjJTk7tbsO/eTW6h7F5JunUmUyyitT6SHKLn2hiDlDG9atqcoonjPRK0rleuVswwV21hbV26tWxICUiAg3L3FXcdzWNwI9//+CEp4alr0LIdQObkEZX+QF/LjRH4PLCJfcZRGCi1AqtjGquf0UZrrERoxfMSUqZM9PMoXOiANqerJqPkpKP/I+Zvl/a/5iflJIpHAxgnxAOy/GSnV2RGIkH05UwXXktJinNyFTu7cnnzMpOD4NyPsDWaDs8SFk36w/2aEvfqWSqX0bMgYXeD4NyPsLSBx+c0I9t+MsFrfuPzWD/bfjLDXn8jlcpK7QyPNAvbfjLBUQApVNxesb/1g/80IS/0JNt8fA/bfjGB9cxjsvxlhqQHAwcGPAftvRliqb1x+fwzYfzOC/QmHwf6bEaxvDoP9NyPYf3MY7L8Zwf6bw2D/zQj2JxwG+29GsL45DPbfjLDXn2D/zQj234xg/81hsP9mBPsTDoP9NyNY3xwG+29G2Bv/xvpmBPtvRrD/5jDYfzPCXg15e3sjjF46d+7MgyiTWCx2c3NDpoGl5XfLli1fvnx55swZhNHGrVu34PWXX37hgb5zc3OTk5ORaWDvALD58+dPmjQJ33+Lc/ny5a1btyLlpF+8ehaSKWD1AMclS5b8/PPPCFOY9PT0efPmIcxHwGp9169fv3r16hs3bkQYFRMmTIDXVq1aIczHwfYB6mAxDx48GB0djSyecePGDRo0CGEMgQMTMCxevBhUjiwYujb5xx9/BAQEIIwhcEDfnp6e/fr1mzNnDrJI9u3bd+3aNVjADQIlgBsT6HTp0uXdu3fnz59HlodCoRg8eDDClAjOTBC1cOHCsWPHyuW8erqVHiAqDF8ZqRpxEKakcGkCNIsy4t27d//+++8RpnRwSd8NGzYMDAzcsmUL4jUPHjyA171795qu1dpy4NgElqNGjdq9ezc03SOesnTpUh5/O/PDvQla+e1SPDw82rRpgzBGgnv69vHx6dGjx/z58xGPSE5Opn1Xt27dEMZ4cHKCbRBBfHz8pUuXEC+AoFDPnj0hBoowxoarE8jzxqU8e/ZMIpEcP35cLBYjjLHh8AMSFi1apClxLtrW2bNnZ2Zm2tjYIIxp4LC+P//8c29v7x07dsByu3btEhISoFBH3CExMTE4ODgkJARhTAa3H3Azbty4zZs3N27cGMRNEASYcsQFYmNjz58/7+zs3LFjR4QxJdzWd6dOneLi4nJycmCZoihQOWI9Hz58GDlyJFyTIpEIYUwMh7ukhYWFgVbUz1iDhdTU1IyMDHt7e8RWwJNkZWXt2bMHYcwCh8tvFxcXgUCguQYCbVCcI7by22+/wU3G398fYcwFh/W9a9cuuNFDc4+6CIfCGwpIxEquXbvWqlWr8uXLI4wZ4bb/hobMffv2DR48mFY53PpjYmIQy4iKioKKL4RKwFAhjHlh8N9ndryLvpchyVHIZQo9m1EURC8oLesRomcwUDBcSbAzRZQwFWjSIqApClBmIP02Wnr7abGNCVVetBxZVS/VnnlVtglS+45KKEQQOlILJ8FXz0boVX4SIj72aET+5lSxjbUchFIdQhNSKBAKkJu3dZcRXsgi0afv0zuSo+9mBgY7VavnRBUyugWKoxfgj1To1iChOvVI+xHoDYrrpGADQnV96D64bgUqUwmFKp0ofEyNXWFNcXmrhK9SJqUjzzouu7xjastvXk6LZ1jXPtrkTajWFD8fFKH8V+QTSYHg1eP0x9ffr5v+sv90P2R56NT3jj9fZX2QdxvrjzBcpmpDB/i7vCd1zeSYgbP9kYWh3TUkx8qT3+R2+dUXYXjBZ51dhELyyN/vkIWhXd8XDyfaOuDR2rzCK9A24WUGsjC06zs7Uy604nZoBVMEB3craS6FLAzthXROtkyhQBg+IZXKpVKL+1GxCcHwGaxvDJ/Rrm+BkEByi/NqPIfQ3YbAX7TrWy6jsP/mGxSBLK/Iwv7EUlD2QcDlNw1+7gX/oCiEy+88CIIgcfibX1hmmaVd3woK+2++QVlkvECH/8axEwwvwPVLS0HZC5jA7fMYnkIhLR3EeY92fZMCZInBJF5DkIQF/qTaoyQKOaIUlujBz5w90Tys/vv3qfo3mz7jtzFjh+nf5tDhvXAomUyGjMS06eNGjxmKSoryB7W8n5SlUcAZM8cfObofYTClg6X6jop6iDBGhcD9Twow/ES0a9+0Z4/+oMvz/522s7OrVavOxAmzHOwd6NQNG9cc//dQUlKih0eF0JB6v46cQE9acuXqxe3bNzyOeuDq6h4cHDJo4Ag3N3e4rUPS/AWzVqz88+D+s3o+FIp5aIr6rOEX8yNmCQSCalVrTp82d9/+nf9sWO3o6NS6Vbshg38hVA0bsbExixaHP3n6SCAQ+vsH9vt+cJ3Q+vRBVq5a/O+Jw7Y2tmFhX/n4FBqEe+z4wQMHd7948SwgoPKXzVt16dyDMLCZJDk5adbvEx88uOvj49u9W9+2bfImHNSTHz1JmocdMqxPzRq14fsiA7A4g6K9/CYFBCEw7IeEH2Pnrs3t2nU+ffL6vPBl8CMtXZb3iIV161fu279j6OCRu3Ye/2HAsLPnTsCWsP7J08cTJv5Sp06D9Wt3/Txi3PPnT+bOmw7rjx25CK9jx0zRL26keuTp/Qd34G/n9qMrl2+EhV9+/VGhkB86cG7a1PAdOzddvao8VGpqyvAR/eHSWr1qy/+WrnNxdp01e2JWVhYk7T+wa/+Bnb/8/Nvy5Rs8Pb03bPxLffCTp47NnTcjqEq1LZsODPzhp127tyxbHoEMAbK3ZNm8Pr0HLoxYWa1aTVDt27cJ+vOjJ0lNdnb2uPHD3VzdoQT56LzQ7fMWV4DrqF/KKMrw/rGVKwU1qN8QSrgaNWp1aN/17NkTUqk0PSN967Z/4Ddu3LgZFOfNmrbo1LHbps1/Q9L9e7etra179xpQvnyFTz9pFDF/RY8e/ZCBSCSS4T+NcXJy9vMLCAyoDKV4/35DbG1tocxzdnZ5Hv0UtoHLSSQWjxk92cvTG8rRsWOmZmdngawhac/ebU2btGjaJMzRwfGr1t/UrdNAfeQjR/bVrl1n5C/jXVxcYX3/74fs27cD9PfxeYPKZftvusJXg8xAMQxvHz2+rz8/epJo5HL5lKmjszIzw+cswTN0MmJM/125clX1srdXRVDw69fxcXEvYaF69WB1UlBQ9YyMjFev4oJrhebk5EyYNBJ+1PhXcaDR4jdiRry9K1pZWdHLNra2/n6B6iQ7W7uMjHRYiH7xrEqVaurnW4N9qujj9+TJI4qiIBvgATTzRi8oFAq4GzSo/5k6Ce4zsPLuvVvIEEJq16UXnJ1c4DVXNdWtrvzoTyJUzFswE+zcvLnL4OpFvACKJNPN8G/M9h2x2Fq9bK3KcWZmRkpqsvKtRpKNjS1S3mSzQPRQCJ0/f2r1X0uXr/izXt1PoJADF44MgSzcEYzU1i8sJTkJLgPNNZC9rGwoBDOhOKTzk7feOu9Ew20BLsu/1y6HP80dDSq/kcZD4zWNu6786E+Cq/HO3Ui4CcBtUPNUfyQkW+uX8BOA40KmwZj6BjWrl3NUOQa52NkpZyvOzin4AllZmfAKFUp4hXs3/IGjuHnz6u49WydOGrln9wlkbGzt7HJyczTXZGdl+Xj7QukIhUeuRlJ2dp7TBeMEJqdVy7ZNmhSaNNDL0weZLD/6k5CyOLefPnVuxJ+/h8+dFrFghUGVXcoSwyc6/IlASJACg8/GnTs31ctPn0VB0QVFUaVKQaChBw/uqJMePboPJVC5ch63b9+8ek35DDR393KtW7f7adhoMOsJb98gY1M1qAZ8KJTH9Nu09LSXsS8CAiqBPsqX94TghnrLK1cvqJch55AfsEz0X3DNEKjSeXgYYQJYXfnRn6TMUmCV0NB6M6bNu3f/9uYt65AhQP3SArsQate3cnya4fXLd0mJ4KThdgPBk0OH9zRv3kosFkO9rWWLNps2r7106Tz8Wv/+e3jvvu1du/YCIwEGd/qMcQcP7YH2woeP7kNVD4Reobwn7AXqv3Hjyq3bN4zS/vfNN13g3hKx8HcIX8TERM8Jnwp+qc3XylBd82YtIaAJzZawDPXghw/vqff68YfhFy+ehWYmsN337t2eOWvCqDFDwLegUqMnP3qS1AQGVv5x4PD1/6yCABTC6MWY9ct2bTtBWdii1aff9+/q5xswYvhYej0UzJ83agph4C5dW23eug7C5D1VcZLvvu3dtk2nZf9b0KlLy19HDbK1tftz4WrasPbqOSDy1nUIFGgamxLj410RwoUQxu7es93IUYNgzeJFa8CcwELvXj9ATBpCmRB0v3zlv2FDRyGV04XXWrVCV6/cfPfuLcjemHHDQHazZy00ylP89ORHT5ImcOqgGWHmzPEIoxeC0nbT+mdWjEKBuo70Rx9Nh05h0PzRt89AhGElt8+l3jmXPDyiMmIZhw4dunnz5rRp05AJ0F6/pBAe4sA3KERZYPuOjvlPyLxZs8ucb9o305X022/TG3/eDJUdW7au37p1vdYkP//AZUvWItaBxzeoUCgMHn+5f+8pZAK2bDmoK8nGuowf+wt+DKqDWpPY2NUaj79kIeoeWixErAJhWAwen2Yx4PkhNLHAsXr8hrDE6qVufVvgWGveY4Ellq7xxZY4FpXfWGbMV0f8RF6S9nkMm1F2xsL+pAA8xyb/wO07GL5CWeQEsljfGD6jo33eiiSx/+YXApIUCC1u0mvtX9jWXkQgAcLwCHkOZSXC+lYRGGyXmWGEjvwY9hD3PMPJ3QpZGNr1HdrMQWQtOLMtEWF4gUSC0lKk3470RhaGzhtW/2l+KW+yj64x/mhIjJm5diRlx/znPcf7IctDX/yk33S/TX/EbZz9HOol0lx58Q1IgXKm2eIQJKK0da8lBIjStr3yUAShKDaSSBWC1z7CSPUphM5JbkkC6Z7/VteO9Ax9lO6OwVp3hL0gg1rzX5AdEmn2N6Z3KUjVcRqL7ktoie/pOaWASCxQyCihNdlnbKC9qyU2aDDEB3tPrIjk6OaZtKyMnOKpun5UnQIiSZ39yotIIG+H/J9E66cgkkLaj0YQpGq4inbBnfvvvwYN6tsWn1NGOX+OvomhKdW8/H0GAAAQAElEQVQX1pIR5Wzp2q/pN28SXr9+XbdBvUI7Fha4vtOCGAROCEhKrnNfK6GVfy27Cn6WO83VR8S/BaheC0eEHBEvSE5O/n3lX5MjuiAz4bFp092gT+QVKlRAGLNjcQEjsVi8dq1ZR4717t3bxcXlxo0bz549QxjzYnH6tre39/LyQuYFLqq6detOnjz57du3CGNGLE7fY8aMefLkCTI7JElu27YtLS0tNTUVYcyFZelboVCcP38+KCgIlRFVqlQRiUTgWCjLfNyq2bEsfYOqTp8+jcoUOzu7KVOmbN++HWFMj2XpWyAQgP9GZU3VqlW7d+8OC+BYEMaUWJa+J06cCP4EsYb4+PjDhw8jjMmwLH3fvn07ODgYsQao7Pr7+yPlnOhZCGMCLEvfR44ccXV1RWyiZs2a8Prjjz/i6LgpsCB9y2SynJwcxEo2b97877//IoyxsSB9R0REHDx4ELGVYcOUD/xes2YNwhgPC9J3XFxc/foGP5/NzHzyySeDBg1CGCNhQfpetmxZQEAAYje1a9eeO1f5SGJsx42Cpeg7Ozv7zRtujNVwcVE+2BJCPatWrUKY0mEp+t60adOBAwcQd+jatSt+PHHpsRR9Z2RkNGrUCHGK/v37w+uOHTugGQhhSoSl6PvXX3+tVasW4iAdOnQYPnx4bm4uwhiORegbzPe9e/cQNxGLxfv27YPI/fPnzxHGQCxC38eOHeOW+S6Ok5MTCH3gQPz4RcOwCH3Dzb1169aI4/j4+IBRiYyMxF7l47GI+TXpzqg8IDQ0lKKoV69eXbt2rXPnzgjDBP/Lb6lUeuqUSZ5dWCYQBAEF+ePHj+/cuYMwTPBf31euXLl+/TriFxMnTnR2duZHr1qoVzg4mOopkPzXt6ura9WqVRHv8PPzA2WEhYXJZDLEZSAu5Ohoqtl1+K/vmjVrdurUCfERgUCwZ88eiB4qDH3aNJt4+vRplSpVkGngv77fvHkDtTHEUyBuCC35EOC/cOEC4ibPnj2rXLkyMg381zecvq1btyJeY2dnt2vXrqioKMQ14MpMSUnx9jbVxM3817eXl9cnn3yC+M6iRYvS0tIgWIQ4hUnNCbIEfVeqVKlHjx7IAmjQoAFEDydMmIC4A9xdsb5LRVJS0sWLF5FlIBQKv/zyy6NHjyKOAPqGAgiZDP7rOz4+ft26dchiaNmyJfixnJwcTsQNsT8pLeXKlfv888+RJeHm5gah8caNG6enpyN2g/VdWqBuTg8UsCjAiEPD7ZkzZ+RyOWIrb9++tVeBTAb/9f3hw4ezZ88ii6R9+/bgUljbNxgKb9NFvmkson65cuVKZKmAUblz505kZCRiH6Y2J8gS9O3s7Ny0aVNkwUyZMgXiKiysbpq05ZKG//qGytbQoUORZVO7dm2SJAcPHozYBNa3EcjKysJT+yHVA1IGDRoEzfiIHVAUFR0dbdLgN7IEfUOMbPHixQiDUL169b7++muJRMKGqY7MYL6RJegbwk/Q5IEwKuzs7EQiERTkZf4kN1O3zNPwX9/wi44cORJhNDh48CBEVIr0GoeiHZkRU7fM01jE+MsjR44gTGFAzaBv9aNuIcQEDQXHjh1D5gL7E+MA+g4PD0eYYkDQMDc39/Lly61bt87MzITlvXv3InNhHn3zf34IaOBo06YNwmgDIqdQkCcnJyNVk35MTIx5ZAf3CojHu7u7IxPD//JbIBCMHz8eYbTRuXPnd+/eqd9CW+/+/fuR6TFDyzyNRcxfZZ7fjItABFrzLRTh58+fN8P8WOa5SyAL0fcff/zB5m50ZUi1atV8fX1tbW3VsRQows0wPMIMLZc0hCU8CH3OnDnjxo0Do4IwxYCiNCoq6vr16w8fPoS6eIj7UFdHH7GVjUymIKCVUbUNQUBzIyryFiAJpNBcr7GACq8kVKvUWoPLiSRIejv1XqoDEor8jdSfotqG0NhbidhaaOsgaNzW3a+WDdINn/UdEhKiqWlCdY579OgxZswYhCmGJJtaMzXawdnKK8BOaE1IpTINHStlR1KEgqCQhgrhlIJ+BIiUI1Xxn78DyJFSLhesJFULClCyxgHpzyUIkqLy7h6a6zWXVdcHhTSkKhBaJcVmJSdImnRyr9FQ5/RXfI6fBAYGxsXFaa7x9vbu06cPwhTjzXPJvpXx3UZWEplwsIHRUT6oaEv4i/in2a36eGjdgs/+G8KCJFnoC37xxRfly5dHmGIcWvuqRiMXTok7j57jA57dzUAS7al81nfv3r2h8qR+6+Xl1a1bN4QpRsxdiVxK1f3SBXETGwfh4fXau9PwWd82NjYQ31Vb8IYNG1asWBFhihH/PFMgJBBnEVuTaanaY5o8jw/27NnTx8cHFjw9PXHhrYtcCfzjcPw0N0eWk6s9TMKu+mVOJoq+m5H6TiKTUjJZwVRj+fEpQl2FJpU197waNUkSCgVF1+U1doH3UKunvq435o7VHahZxtywi72VqJAXnWoVPDqEqwiSoBT5NXpV4Erjs0hFfgVfQAhIIbJ3FvkF2br5WCEMu2GFvi/sT358Iy0nUw6KIgQqaRKEXKYhxLwgqkaktPiy5hqkGYMtF+QZBvGqR1czNEVcsCEd1Cq+u7bPUsapCOVBLqmuK5GNwCvAps0AXGdlKWWs730rXsc/ywI1i+2svKq5ulbkUgU+K1WSFPsh7mn2/0Y/cy4n7jWeq+aeFJCkgMP+W0AiXY04Zabvh1czzu9JhGx5Vy3nwilZq7F1Efm6lIMFhQTF3Hq9bNQz/xr27QZWQFwDPJtCzuFmPuWNXkf2y0bfu5e9ehOd7Vm1nJsvJ5VdBFKEAj/1goWo87F/TYr+8fdAhDEjlO6nV5RB/OTwurdJryTBLQP4IW5NqjbxFdlZrxgXjbgFSSAO2xN9mFvfG+fExkZlgQ4QT/GrU97exW7FWE5JXEEhLvdCUnbU0tGNyqz63rPsdXaGonpT3oqbpmKou1MF+1UTOCNxgiQ5XX4re36R2r+A+fT94FJ6wsucoMYW0YLoVcNNYCXcHvEKcQFKoeB0+Q3+W1cvWPPp+7997zyruCGLofJn3klvcmLuZyNM2WEmfR9ekwANfxyNA5YYRze7U9sTEcbE0GMntCaZSd8vozLLV7agwpumYmi53GzZk8gMxG4EQpLT/atA4Lp0bA59XzqUDC2Uzl62iJVkZKaOmfLp7XsnkQmwdhBfP56K2I1cppDLzG3Ad+/Z1qLVp8goEKoQpzbMoe+ntzOt7cXIIikX4PwhRYJ4x4sXz7v3bIfYAdQvdT2g3Bz6zkqTOXs5IovEoZwNVO2f38lC/CLqyUPEGiC8SeoQssnb5zNTIPpEuXibypykpScfPLooJu6uRJJTtUrDFk0HeJTzg/Vv3j6PWNbz58FrT5//5/6jc06OHqG1WrZp+RM93OHW3X+PnVqVnZ1Wo9oXTT/vhUwJuNund9IqhbDUnpWAw0f2LYiYDQvNw+oPG/rrt117ZWVlLVz0x+3bN9LT0/z9Ar/+ukPHDt/SG8fGxixaHP7k6SOBQOjvH9jv+8F1QusXOSBss279ytt3blIUVbNm7e7f9a1VKxR9NMrgoI42epOX3y+jMuiB66ZALpevXDvseUxkl2/Gjx6+xd7OdcnqAUnJ8ZAkFCg7Z+/cP6dO7dbh0y707Drj3MXNdx4oTfabt8+27Jpav06b8SN31w9tu/9wBDIlAqEgLYnVowcEApI0pH7Ztk3H7t36li9f4cypGyBuWDN+4s+vX8fPmhmxY9uRJk3CFi+Z++jxA1ifmpoyfER/D48Kq1dt+d/SdS7OrrNmT4SLQfNoEolk5KhBUO7MDV8aMX+FUCCcNPnXnJycj84OUo0E0K5kk+v7fbKUMNmHvIi9nZgU06PrjGpBnzk6uH3z1c92ts7/Xd6m3iCk5pchwWFCoVWlgLpuLt7xrx7DyktXdzs7VWjZ7AdbW8fKgfU+rd8RmRKSIHKyWf2oVblcoShF/fLK1Yv37t0eO3pK9Wo1nZyce/XsD6XvPxtWQ9LOXZtFYvGY0ZO9PL19fHzHjpmanZ21/8BOzd3j4l7CZdClc4+gKtUqVaoybWr4jBnzDXpaEKG7+4zJ9Z2bLdfTvauUxLy8IxBYVQnMu9/BjQJ0HB1zS72Bj1d19bK1tUN2jvJ5p0kpcRXKF3Txq+hdA5kUAsmkvJ1kBinrms+sra0DAgom8w6qUj0qSmnQo188q1KlmlCYZ4Pt7Owq+vg9efJIc3fQvbOzS/i86Zs2r71//w5JkmBgDHooJkWVXf9vO3th8SEzxiI7J0Mul0J0T3OlvV3BOHBC270jKyvN3a2gm4BIpG8CpNIDhaOY1wPZkpOTrK0LnUNbW1sop2EhJTnJ27tQjwxrG5us7EL+RCwWL/7zL/D0u3Zv+Xvtci8vn359B7VsacCUv3r8r8n17eYtVphM3w72bqDOAb0KGWiSZLgpgS2RSgvsXW5uJjIllFxh5yhC/AVK5ZycQt0QMrMy3d2UIz9sISm3kJPOzsry8S7awc7X13/okJH9+w2JjLx29NiBP8Kn+vkHgl1BHwmhmidOGyb3J5VqK+MGctOEgL09gySSbGfn8mCj6T8X5wrenlX17+Xi7Bkb/0A9o+TDqAvIlCjklKe/aW8RpUQ5Pk1YciVUDaoB1cGnz6LUax49uu+vsiuQBMtSad5Q8bT0tJexLzSdDFIFT0DTSGkgrRs1ajJ92lzwM0U8jH4oBT07ohbMEf8WConEaJO04VWp1KBalc927vs99X1CRub7i1d3LV7Z71rkQf17hdRsAW2W+w5HQDTqWfTNS1dN+8g8mVQR0swJsRjl+DSZYZUkMM1gSy5cOAu1w08+aQSmYuHC3x9HPUxJSQaPAZru9q1yHrxvvumSmZkRsfD3t28TYmKi54RPtRZbt/m6UIU+Le3DvPkzV6xcFP8qDo62ecs6qFwG1wxBxsAc+nYpL05LMpUHGNB7Ye2aYZt2TJ4e3vrClR11Q7764jOGeU6qVvm0XesRUU8vj53acNuemd27TFWtNomJevs0VSQW2Dvxberahp82rhUcOmXamFOnj0NxO3tmhKOj07Cfvu/Zu/3NyGuzZi6gA9g+3hUhHgIVUGjshCAgrFm8aA34Gc1DBQeHjPp14slTR/v07dS3X5d7924tjFgJkXJkDMwxf2zso9yDa+JrtvBHlseTC/Ee3lYdh3khFnNqe8Ljaxl9p5pjQm5TsGdpDEUR/ab4FU8yR/ntW10sshbE30tCloYCSbJlLBc3D1AG6ORlOn/VJ61dLx4Afet8ntCU31toDWIqFHKI8elqAYUGSHs7Z2Qk/t446kXsHa1JtjaOWdlpWpNmTTypK3tPL8e7eXIgckIQ5ijmTIeycqmj/6CZ9B3SxPHmqdQXN94G1Nc+1dPYEduQ4RhR3ED3LtPkGpPCaSKVSayE2pWqS9xZyRJJjrTHbC7c9AnE6fGXBGLB/D4DZvgtG/UsuCtpVgAAAwFJREFU673E1lmLUBwdTf6oOEbsbI0Z5Xhx+3XDNh6IC3B+/KXuJLPemDoOrhh9nRtDbkvJk4uvfCrb1vvSQnsFswez6tunqrjTUN8Hp14iXvPwdEylWjYdhngirqDsoMRhg0IQFFG24y/VeFcRdRzidf/ki+QXbB+VWDIenX7pFWAb1q0c4hDKDkocNiiEasphrUllUHH2rmzda1xAYkxS1H9xiEe8vJV4/8SLqvUdOg7jTsmtguvzx+qpH5fN/Jou5QVD51XatiD+3r/R1nYiz2ru9m7WiLOAsjNSssQ2guELOdlEwvn5Y3X3LSjL+b+7j/FBcrR5XmxM5BuIcVtZC63txfbONlb2IkKESB0d3CmCDnjmRYUojUtXAWFQKm8bIv/3ooo/bjT/CPnvlTsK6PA7pTFbfuF3ea9ww5MpcnNk2WmS3IwcSaZMJlXYOgibf1e+xidcnd2F0DdCgNuU9fMbBKjXBGVvyev/vn8SmZb5ISszJQuKE6X8NK5KTb3SfQrUdgs21QjtFzxBVyVHlWJVDy7J21hBkCQtYyIvNW8p723+J+Udh16vQASZL2/VU1KUn0gKSXsngW+IfYuenLLa2qD0jRDgNmx5/k6DVs7whzAYo8Ln5xdjPhKBQECQHG6iFxKkAmkfwY31jUE2tlZCKy4/f0dEikVlNH4ew34+beEsl5hsELjpyU6TeQfYaU3C+sYgJEKObqKja18jDvLoSpqcUnzeUfvDxc0xvgHDCTb+EWtlJWw7iEu91e+ceX//UsqPcwIFOgZIYX1jCtjwe1xWmlRsQ0IAVnNGWUL9fJvCXVFVj4wm6JVEkWfgFNoyr0Eh7znU2p6Wk/+UXUrVPIG0fHRhwHPLcuSkAA2cEYB0j/7D+sYUIi4q997F95npEk1Hrn7uc5EHQBMCgpJTytYBRdEkpfNV5KuczJu/gRAgSl54y/zLQPNQmmh95DTg4CD2qW4b0pShhybWN4bP4Pgghs9gfWP4DNY3hs9gfWP4DNY3hs9gfWP4zP8BAAD//7fyBTEAAAAGSURBVAMAcOA1/oyLBv8AAAAASUVORK5CYII=", - "text/plain": [ - "" - ] - }, - "execution_count": 33, - "metadata": {}, - "output_type": "execute_result" - } - ], + "outputs": [], "source": [ "graph = create_react_agent(\n", " llm, \n", @@ -238,9 +217,7 @@ " checkpointer=InMemorySaver(),\n", " pre_model_hook=pre_model_hook,\n", " post_model_hook=post_model_hook\n", - ")\n", - "\n", - "graph" + ")" ] }, { @@ -259,7 +236,7 @@ }, { "cell_type": "code", - "execution_count": 35, + "execution_count": 17, "metadata": {}, "outputs": [], "source": [ @@ -282,7 +259,7 @@ }, { "cell_type": "code", - "execution_count": 36, + "execution_count": 14, "metadata": {}, "outputs": [ { @@ -298,42 +275,43 @@ "\n", "==================================\u001b[1m Ai Message \u001b[0m==================================\n", "\n", - "[{'type': 'text', 'text': \"I'd be happy to help you enhance your salmon dish for your upcoming weightlifting competition! Let me see if I have some additional information about your dietary preferences or nutritional goals from our past conversations that might help me give more personalized suggestions.\"}, {'type': 'tool_use', 'name': 'retrieve_user_facts_from_past_conversations', 'input': {'query': 'dietary preferences weightlifting nutrition protein requirements supplements'}, 'id': 'tooluse_rjCYWu9BS_SleO9B7iRJUg'}]\n", + "[{'type': 'text', 'text': \"I'd be happy to suggest additions to your salmon dish that could enhance flavor and nutritional content, especially for your weightlifting competition prep. Let me check if I have any information about your dietary preferences or previous food discussions to give you more personalized suggestions.\"}, {'type': 'tool_use', 'name': 'get_past_conversation_facts', 'input': {'query': 'food preferences salmon protein weightlifting diet nutrition'}, 'id': 'tooluse_oc_P4LhVS1ejZV7ZcelT5w'}]\n", "Tool Calls:\n", - " retrieve_user_facts_from_past_conversations (tooluse_rjCYWu9BS_SleO9B7iRJUg)\n", - " Call ID: tooluse_rjCYWu9BS_SleO9B7iRJUg\n", + " get_past_conversation_facts (tooluse_oc_P4LhVS1ejZV7ZcelT5w)\n", + " Call ID: tooluse_oc_P4LhVS1ejZV7ZcelT5w\n", " Args:\n", - " query: dietary preferences weightlifting nutrition protein requirements supplements\n", + " query: food preferences salmon protein weightlifting diet nutrition\n", "=================================\u001b[1m Tool Message \u001b[0m=================================\n", - "Name: retrieve_user_facts_from_past_conversations\n", + "Name: get_past_conversation_facts\n", "\n", - "[]\n", + "No memories found.\n", "==================================\u001b[1m Ai Message \u001b[0m==================================\n", "\n", - "Since I don't have specific information about your dietary preferences from previous conversations, I'll provide some general recommendations to enhance your salmon, rice, and veggies meal:\n", + "Based on your meal of salmon with rice and veggies, here are some suggestions to boost both flavor and nutrition while supporting your weightlifting competition goals:\n", "\n", - "For better taste:\n", - "- Add fresh herbs like dill, parsley, or cilantro\n", - "- Use a squeeze of lemon or lime juice\n", - "- Try a light glaze with honey and soy sauce (or coconut aminos)\n", - "- Incorporate minced garlic and ginger\n", - "- Season with various spices like paprika, cumin, or a spice blend\n", + "To enhance flavor:\n", + "- Fresh herbs like dill, parsley, or cilantro (adds brightness and complements salmon well)\n", + "- Lemon or lime zest and juice (acidity helps cut through the richness of salmon)\n", + "- A light marinade with soy sauce, ginger and honey\n", + "- Garlic and/or ginger (for more aromatic flavor)\n", + "- Spice blends like cajun seasoning or herbs de provence\n", "\n", - "For more protein:\n", - "- Mix in some quinoa with your rice (it's a complete protein)\n", - "- Add edamame to your veggie mix\n", - "- Include a side of Greek yogurt (can be made into a sauce with herbs)\n", - "- Sprinkle hemp seeds or chopped nuts (walnuts are great with salmon)\n", - "- Consider a side of egg whites\n", + "To boost protein content:\n", + "- Add a side of Greek yogurt (high protein and can be used as a sauce base)\n", + "- Mix in some edamame with your rice\n", + "- Add quinoa to your rice (or replace part of the rice) for a complete protein\n", + "- Include a sprinkle of hemp seeds or chopped nuts (walnuts pair well with salmon)\n", + "- Egg whites (could be incorporated into a sauce or served as a side)\n", "\n", - "For more vitamins and minerals:\n", - "- Add leafy greens like spinach or kale to your vegetable mix\n", - "- Include colorful bell peppers for vitamin C\n", - "- Add avocado for healthy fats and vitamins\n", - "- Mix in some broccoli for vitamins A, C, and K\n", - "- Consider sweet potato instead of or with rice for vitamin A and fiber\n", + "For additional vitamins and minerals:\n", + "- Spinach or kale (can be lightly wilted with your other veggies)\n", + "- Avocado slices (healthy fats and great texture)\n", + "- Roasted sweet potatoes (excellent source of vitamins A and C)\n", + "- Broccoli or brussels sprouts (high in vitamins K and C)\n", + "- Bell peppers (vitamin C)\n", + "- Chia seeds (omega-3s, similar to what's in salmon)\n", "\n", - "These additions would maintain the healthy profile of your meal while boosting both flavor and the nutritional content that's valuable for your weightlifting competition. Is there any particular aspect of your diet I should focus on more specifically?\n" + "Would you like more specific suggestions based on your particular weightlifting goals or any flavor preferences you have?\n" ] } ], @@ -378,7 +356,7 @@ }, { "cell_type": "code", - "execution_count": 37, + "execution_count": 15, "metadata": {}, "outputs": [ { @@ -409,6 +387,8 @@ "source": [ "### Agent access to the store\n", "\n", + "**Note** - since AgentCore memory processes these events in the background, it may take a few seconds for the memory to be extracted and embedded to long term memory retrieval.\n", + "\n", "Great! Now we have seen that long term memories were extracted to our namespaces based on the earlier messages in the conversation.\n", "\n", "Now, let's start a new session and ask about recommendations for what to cook for dinner. The agent can use the store to access the long term memories that were extracted to make a recommendation that the user will be sure to like." @@ -416,7 +396,7 @@ }, { "cell_type": "code", - "execution_count": 38, + "execution_count": 16, "metadata": {}, "outputs": [ { @@ -428,56 +408,41 @@ "Today's a new day, what should I make for dinner tonight?\n", "==================================\u001b[1m Ai Message \u001b[0m==================================\n", "\n", - "[User Context: {'content': '{\"context\":\"User is preparing for a weightlifting competition and is concerned about meal\\'s nutritional value\",\"preference\":\"Interested in meals that support athletic performance and nutrition\",\"categories\":[\"fitness\",\"nutrition\",\"sports\"]}', 'memory_strategy_id': 'memory_preference_ghc4p-zjtCX1D9Eo', 'namespaces': ['/preferences/user-1']}, {'content': '{\"context\":\"User mentioned cooking salmon with rice and vegetables, indicating a focus on healthy eating for a weightlifting competition\",\"preference\":\"Follows a health-conscious diet with high-protein, nutritious meals\",\"categories\":[\"food\",\"nutrition\",\"fitness\",\"diet\"]}', 'memory_strategy_id': 'memory_preference_ghc4p-zjtCX1D9Eo', 'namespaces': ['/preferences/user-1']}]\n", + "[User Context: {'content': '{\"context\":\"User mentioned cooking salmon with rice and veggies, and preparing for a weightlifting competition\",\"preference\":\"Interested in healthy, nutritionally balanced meals that support athletic performance\",\"categories\":[\"food\",\"nutrition\",\"fitness\"]}', 'memory_strategy_id': 'memory_preference_ghc4p-zjtCX1D9Eo', 'namespaces': ['/preferences/user-12345']}, {'content': '{\"context\":\"User is focused on meal preparation that supports weightlifting competition\",\"preference\":\"Prioritizes meals with good macronutrient balance\",\"categories\":[\"nutrition\",\"fitness\",\"diet\"]}', 'memory_strategy_id': 'memory_preference_ghc4p-zjtCX1D9Eo', 'namespaces': ['/preferences/user-12345']}, {'content': '{\"context\":\"User specifically mentioned salmon as one of their favorite meals\",\"preference\":\"Enjoys salmon as a protein source\",\"categories\":[\"food\",\"protein\",\"seafood\"]}', 'memory_strategy_id': 'memory_preference_ghc4p-zjtCX1D9Eo', 'namespaces': ['/preferences/user-12345']}]\n", "==================================\u001b[1m Ai Message \u001b[0m==================================\n", "\n", - "[{'type': 'text', 'text': \"\\n\\nI'd be happy to suggest some dinner options for you today! To provide recommendations that match your preferences, let me first check some information about your dietary needs and preferences.\"}, {'type': 'tool_use', 'name': 'retrieve_user_facts_from_past_conversations', 'input': {'query': 'dietary preferences, fitness goals, meal preferences', 'limit': 3}, 'id': 'tooluse_mlZ_AmFESOmY2nKRbtIaIw'}]\n", + "[{'type': 'text', 'text': \"\\n\\nLet me help you decide on a dinner option for tonight. To provide a more personalized suggestion, I'll check what I know about your food preferences.\"}, {'type': 'tool_use', 'name': 'get_past_conversation_facts', 'input': {'query': 'food preferences dinner meal protein'}, 'id': 'tooluse_-ozxj2RPTJGpq61-h46q6A'}]\n", "Tool Calls:\n", - " retrieve_user_facts_from_past_conversations (tooluse_mlZ_AmFESOmY2nKRbtIaIw)\n", - " Call ID: tooluse_mlZ_AmFESOmY2nKRbtIaIw\n", + " get_past_conversation_facts (tooluse_-ozxj2RPTJGpq61-h46q6A)\n", + " Call ID: tooluse_-ozxj2RPTJGpq61-h46q6A\n", " Args:\n", - " query: dietary preferences, fitness goals, meal preferences\n", - " limit: 3\n", + " query: food preferences dinner meal protein\n", "=================================\u001b[1m Tool Message \u001b[0m=================================\n", - "Name: retrieve_user_facts_from_past_conversations\n", + "Name: get_past_conversation_facts\n", "\n", - "[Item(namespace=['facts', 'user-1'], key='mem-c040719d-81a4-41eb-9b8a-6d4848d75dcf', value={'content': 'The user is focused on maintaining good macronutrient balance in their diet to support their weightlifting training.', 'memory_strategy_id': 'memory_semantic_ghc4p-SLjZ3l87ji', 'namespaces': ['/facts/user-1']}, created_at='2025-09-24T17:55:09-07:00', updated_at='2025-09-24T17:55:09-07:00', score=0.4182649), Item(namespace=['facts', 'user-1'], key='mem-596ad8e5-f561-4bea-861a-c498276e92f8', value={'content': 'The user is preparing for a weightlifting competition and is cooking a healthy meal of salmon, rice, and vegetables.', 'memory_strategy_id': 'memory_semantic_ghc4p-SLjZ3l87ji', 'namespaces': ['/facts/user-1']}, created_at='2025-09-24T17:55:09-07:00', updated_at='2025-09-24T17:55:09-07:00', score=0.38666412)]\n", + "1. The user is preparing for a weightlifting competition and is cooking a healthy meal of salmon with rice and vegetables. (relevance: 0.38) [id: mem-12f0bcaf-94cf-47d4-8c3c-da17fffc6bdd]\n", "==================================\u001b[1m Ai Message \u001b[0m==================================\n", "\n", - "[User Context: {'content': '{\"context\":\"User is preparing for a weightlifting competition and is concerned about meal\\'s nutritional value\",\"preference\":\"Interested in meals that support athletic performance and nutrition\",\"categories\":[\"fitness\",\"nutrition\",\"sports\"]}', 'memory_strategy_id': 'memory_preference_ghc4p-zjtCX1D9Eo', 'namespaces': ['/preferences/user-1']}, {'content': '{\"context\":\"User mentioned cooking salmon with rice and vegetables, indicating a focus on healthy eating for a weightlifting competition\",\"preference\":\"Follows a health-conscious diet with high-protein, nutritious meals\",\"categories\":[\"food\",\"nutrition\",\"fitness\",\"diet\"]}', 'memory_strategy_id': 'memory_preference_ghc4p-zjtCX1D9Eo', 'namespaces': ['/preferences/user-1']}]\n", + "[User Context: {'content': '{\"context\":\"User mentioned cooking salmon with rice and veggies, and preparing for a weightlifting competition\",\"preference\":\"Interested in healthy, nutritionally balanced meals that support athletic performance\",\"categories\":[\"food\",\"nutrition\",\"fitness\"]}', 'memory_strategy_id': 'memory_preference_ghc4p-zjtCX1D9Eo', 'namespaces': ['/preferences/user-12345']}, {'content': '{\"context\":\"User is focused on meal preparation that supports weightlifting competition\",\"preference\":\"Prioritizes meals with good macronutrient balance\",\"categories\":[\"nutrition\",\"fitness\",\"diet\"]}', 'memory_strategy_id': 'memory_preference_ghc4p-zjtCX1D9Eo', 'namespaces': ['/preferences/user-12345']}, {'content': '{\"context\":\"User specifically mentioned salmon as one of their favorite meals\",\"preference\":\"Enjoys salmon as a protein source\",\"categories\":[\"food\",\"protein\",\"seafood\"]}', 'memory_strategy_id': 'memory_preference_ghc4p-zjtCX1D9Eo', 'namespaces': ['/preferences/user-12345']}]\n", "==================================\u001b[1m Ai Message \u001b[0m==================================\n", "\n", "\n", "\n", - "Based on what I know about your dietary preferences and fitness goals, I have some dinner suggestions that would support your weightlifting training and competition preparation:\n", - "\n", - "### High-Protein Dinner Options:\n", + "Based on what I know about your preferences, I have a dinner suggestion for tonight:\n", "\n", - "1. **Herb-Crusted Chicken Breast with Sweet Potato and Roasted Vegetables**\n", - " - Lean protein from chicken\n", - " - Complex carbs from sweet potato\n", - " - Nutrients from colorful vegetables\n", - " - Season with herbs for flavor without excess sodium\n", + "Since you're preparing for a weightlifting competition and prioritize nutritionally balanced meals with good macronutrients, how about trying a different protein source tonight? \n", "\n", - "2. **Turkey and Vegetable Stir-Fry with Quinoa**\n", - " - Lean protein from turkey\n", - " - Complete protein and complex carbs from quinoa\n", - " - Variety of vegetables for micronutrients\n", - " - Light stir-fry sauce with ginger and garlic\n", + "**Dinner Suggestion: Baked Chicken Thighs with Sweet Potatoes and Roasted Broccoli**\n", "\n", - "3. **Baked White Fish with Lemon, Brown Rice and Steamed Broccoli**\n", - " - Similar to your salmon meal but with variety\n", - " - Clean protein source with minimal fat\n", - " - Complex carbs from brown rice\n", - " - Broccoli for fiber and micronutrients\n", + "This meal would:\n", + "- Provide a good balance of protein from the chicken thighs\n", + "- Offer complex carbohydrates from sweet potatoes for energy\n", + "- Include fiber and micronutrients from the broccoli\n", + "- Support your athletic performance needs\n", "\n", - "4. **Greek Yogurt Marinated Chicken Skewers with Mediterranean Vegetables and Farro**\n", - " - Protein-rich and tender chicken\n", - " - Ancient grain for sustained energy\n", - " - Vegetables for vitamins and minerals\n", - " - Good balance of macronutrients\n", + "It's a nice alternative to salmon (which I know you enjoy), while still meeting your nutritional goals for your weightlifting competition preparation.\n", "\n", - "Any of these options would provide a good balance of macronutrients to support your weightlifting training. Would you like me to provide a more detailed recipe for any of these suggestions?\n" + "Would you like a simple recipe for preparing this, or would you prefer a different suggestion?\n" ] } ],