Skip to content

Commit

Permalink
Add system messages for memories as a user prompt as well as a system…
Browse files Browse the repository at this point in the history
… message, which makes the performance much better.
  • Loading branch information
ejhusom committed Nov 19, 2024
1 parent 62755a4 commit 3508783
Show file tree
Hide file tree
Showing 2 changed files with 14 additions and 3 deletions.
4 changes: 4 additions & 0 deletions src/llm_interface.py
Original file line number Diff line number Diff line change
Expand Up @@ -32,6 +32,8 @@ def generate_response(self, message_history, system_message=None):
"stream": False
}

print(data)

response = self._post_request(data)
if response:
return response.json().get("message", {}).get("content", "Error: No content received.")
Expand All @@ -46,6 +48,8 @@ def generate_streaming_response(self, message_history):
"stream": True
}

print(data)

full_response = []
response = self._post_request(data)

Expand Down
13 changes: 10 additions & 3 deletions src/memory_manager.py
Original file line number Diff line number Diff line change
Expand Up @@ -59,10 +59,13 @@ def get_active_memories(self):
def remember(self, text, category="general", priority=3):
"""Summarizes the text using the LLM and stores it as a memory."""
# Prepare the message history for summarization
message_history = [{"role": "user", "content": text}]
messages = [
{"role": "user", "content": self.llm.settings.get("system_message_how_to_remember_information_in_prompt")},
{"role": "user", "content": text}
]
# Generate the summary or relevant info from the LLM
summary = self.llm.generate_response(
message_history,
messages,
system_message=self.llm.settings.get("system_message_how_to_remember_information_in_prompt")
)

Expand All @@ -73,8 +76,12 @@ def remember(self, text, category="general", priority=3):
def summarize_and_save(self, conversation, conversation_file):
"""Summarize the conversation and save it to memories."""
conversation_text = "\n".join([entry["content"] for entry in conversation])
messages = [
{"role": "user", "content": self.llm.settings.get("system_message_how_to_extract_relevant_info_for_memory")},
{"role": "user", "content": conversation_text}
]
summary = self.llm.generate_response(
[{"role": "user", "content": conversation_text}],
messages,
system_message=self.llm.settings.get("system_message_how_to_extract_relevant_info_for_memory")
)

Expand Down

0 comments on commit 3508783

Please sign in to comment.