diff --git a/src/llm_interface.py b/src/llm_interface.py index dd3c650..2f098cf 100644 --- a/src/llm_interface.py +++ b/src/llm_interface.py @@ -32,6 +32,8 @@ def generate_response(self, message_history, system_message=None): "stream": False } + print(data) + response = self._post_request(data) if response: return response.json().get("message", {}).get("content", "Error: No content received.") @@ -46,6 +48,8 @@ def generate_streaming_response(self, message_history): "stream": True } + print(data) + full_response = [] response = self._post_request(data) diff --git a/src/memory_manager.py b/src/memory_manager.py index d4724d8..9d5ea1c 100644 --- a/src/memory_manager.py +++ b/src/memory_manager.py @@ -59,10 +59,13 @@ def get_active_memories(self): def remember(self, text, category="general", priority=3): """Summarizes the text using the LLM and stores it as a memory.""" # Prepare the message history for summarization - message_history = [{"role": "user", "content": text}] + messages = [ + {"role": "user", "content": self.llm.settings.get("system_message_how_to_remember_information_in_prompt")}, + {"role": "user", "content": text} + ] # Generate the summary or relevant info from the LLM summary = self.llm.generate_response( - message_history, + messages, system_message=self.llm.settings.get("system_message_how_to_remember_information_in_prompt") ) @@ -73,8 +76,12 @@ def remember(self, text, category="general", priority=3): def summarize_and_save(self, conversation, conversation_file): """Summarize the conversation and save it to memories.""" conversation_text = "\n".join([entry["content"] for entry in conversation]) + messages = [ + {"role": "user", "content": self.llm.settings.get("system_message_how_to_extract_relevant_info_for_memory")}, + {"role": "user", "content": conversation_text} + ] summary = self.llm.generate_response( - [{"role": "user", "content": conversation_text}], + messages, system_message=self.llm.settings.get("system_message_how_to_extract_relevant_info_for_memory") )