Skip to content

Commit

Permalink
Cleane up TwoAgent project + removed code from self evaluation project
Browse files Browse the repository at this point in the history
two agent project is easier to understand , by ordering the analysis correctly and commenting better.

self evaluation project wasent finished pushed by acident
  • Loading branch information
ChristophGeske committed May 10, 2024
1 parent 8e86ebc commit 872f4b8
Show file tree
Hide file tree
Showing 3 changed files with 14 additions and 130 deletions.
4 changes: 2 additions & 2 deletions .idea/workspace.xml

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

113 changes: 0 additions & 113 deletions Self_Evaluation_Project/self_evaluation.py
Original file line number Diff line number Diff line change
@@ -1,114 +1 @@
# Her we would like to have a system maybe based on multiple agents that analyse the conversation and store cases where the conversation failed. It would also be possible to let the models talk to themself and find out their own weaknesses and store them. One could imagine that one LLM googles facts onloine that are easy to ask and check but hard to generate. This way the judge model can test another model and figutre out its weaknesses and store them.


from openai import OpenAI
from difflib import SequenceMatcher

client = OpenAI(base_url="http://localhost:1234/v1", api_key="lm-studio")

# TODO: Replace the modelID with the one you see in LMStudio under local server.
modelID = "QuantFactory/Meta-Llama-3-8B-Instruct-GGUF"

chatHistory = [{"role": "system",
"content": '''You are a helpful, smart and efficient AI assistant. You always fulfill the user's requests to the best of your ability but keep your answers short.'''
}, ]

# List to store weaknesses
weaknesses = []

print("Welcome Message: \nYou can now talk to the primary assistant (Agent 1) with model name " + modelID + " and ask anything. A second agent will analyze the conversation after each interaction and tell you the mood_message of the conversation.")
print("Current Mood of Conversation: not analyzed yet")
print("User:")
user_input = input("")
chatHistory.append({"role": "user", "content": user_input})


def is_similar(question, weaknesses, threshold=0.7):
"""Check if the question is similar to any known weaknesses."""
for weak_question in weaknesses:
similarity = SequenceMatcher(None, question, weak_question).ratio()
if similarity > threshold:
return True
return False


while True:
##################### First Agent (User Assistant) #####################

# Check if the current question is similar to known weaknesses
if is_similar(user_input, weaknesses):
print("Assistant: I'm sorry, I can't answer that question.")
chatHistory.append({"role": "assistant", "content": "I'm sorry, I can't answer that question."})
else:
# Generate response using the primary agent
completion = client.chat.completions.create(
model=modelID,
messages=chatHistory,
temperature=0.7,
stream=True,
)
new_message = {"role": "assistant", "content": ""}

print("Assistant: ")
for chunk in completion:
newestResponsePart = chunk.choices[0].delta
if newestResponsePart.content:
print(newestResponsePart.content, end="", flush=True)
new_message["content"] += newestResponsePart.content

chatHistory.append(new_message)

##################### Second Agent (Analyzer) #####################

# Analyzing the conversation for weaknesses
analysis_prompt = [
{"role": "system", "content": "You are an AI that identifies weaknesses in conversations."},
*chatHistory,
{"role": "assistant", "content": "Identify any weaknesses in the assistant's response."}
]
completion = client.chat.completions.create(
model=modelID,
messages=analysis_prompt,
temperature=0.7,
stream=True,
)

weakness_detected = ""
for chunk in completion:
newestResponsePart = chunk.choices[0].delta
if newestResponsePart.content:
weakness_detected += newestResponsePart.content.strip()

if weakness_detected and weakness_detected.lower() != "none":
print("\nWeakness Detected: ", weakness_detected)
# Add the problematic question to the weaknesses list
weaknesses.append(chatHistory[-2]["content"])
else:
print("\nNo weaknesses detected.")

##################### Mood Analysis #####################
mood_analysis_prompt = [
{"role": "system", "content": "You are an AI that analyzes conversation moods."},
*chatHistory,
{"role": "assistant", "content": "In a single word, describe the mood of the conversation. Choose e.g. from: happy, sad, angry, neutral, excited, bored, confused, surprised. Only return ONE SINGLE WORD LIKE: happy, sad, angry, neutral, excited, bored, confused, surprised"}
]
completion = client.chat.completions.create(
model=modelID,
messages=mood_analysis_prompt,
temperature=0.7,
stream=True,
)

print("\nCurrent Mood of Conversation: ")
mood_message = ""
for chunk in completion:
newestResponsePart = chunk.choices[0].delta
if newestResponsePart.content:
mood_message += newestResponsePart.content
if not mood_message or mood_message.isspace():
mood_message = "Mood analysis failed"
print(mood_message, end="", flush=True)

print("\nUser:")
user_input = input("")
chatHistory.append({"role": "user", "content": user_input})
27 changes: 12 additions & 15 deletions Two_Agent_Project/twoAgents.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,7 +11,7 @@
'''You are a helpful, smart and efficient AI assistant. You always fulfill the user's requests to the best of your ability but keep your answears short.'''
},]

print("Welcome Message: \nYou can now talk to the primary assistant (Agent 1) with model name " + modelID + " and ask anything. Agent 2 will analyse the conversation after each interaction and tell you the mood or emotion of the conversation.")
print("Welcome Message: \nYou can now talk to the primary assistant (Agent 1) with model name " + modelID + " and ask anything. Agent 2 is designed to analyze the mood of the conversation after each interaction.")
print("Mood of Conversation: not analysed yet")
print("User:")
user_input = input("")
Expand Down Expand Up @@ -39,32 +39,25 @@

chatHistory.append(new_message)

##################### User Input #####################

print("\nUser:")
chatHistory.append({"role": "user", "content": input("")})

#print("\nchatHistory " + str(chatHistory))
##################### Second Agent (Analyser) #####################
# The second agent is designed to analyze the mood it's output is only shown to the user but not to agent 1.

# Define the system prompt for the second agent
system_prompt_second_agent = {"role": "system",
"content": "You are an AI trained to analyze the mood of a conversation. Your task is to analyze the following conversation and return a single word describing its overall mood."}
#print("\nsystem_prompt_second_agent " + str(system_prompt_second_agent))
"content": "You are a helpful, smart and efficient AI assistant. Your only task is to analyze the mood of a conversation."}

# Create a new list to store only user and assistant (agent 1) messages
# Create a new list to store only user and assistant (agent 1) messages. Basically we only filter out the system messages.
conversation_only = [f"{message['role']}: {message['content']}" for message in chatHistory if
message['role'] in ['user', 'assistant']]
#print("\nconversation_only " + str(conversation_only))

# Extract the 'content' from each user and assistant (agent 1) messages and join them with a comma
conversation_content = ', '.join(conversation_only)
#print("\nconversation_content " + str(conversation_content))

# Insert the string into the simulated user message. W create a user massage since the assistent was trained on getting a user message and answear it. Here we create a user message and build it up from the content of the conversation between the user and the agent 1.
# Insert the string into the simulated user message. We create a user massage because the LLM was trained on getting a sytem message first, and then always a user message it tries to answear.
# Since Agent 2 has no user it interacts with we create a synthetic user message from the content of the conversation between the user and the agent 1 and combine it with the task to analyse the conversation. Here we could give the second Agend any task.
# TODO Give agent 2 the task you want him to performe. You can also instantiate more agents with unique tasks but each agent will reduce the speed of the conversation since they all access the same LLM one after another.
simulated_user_message = {"role": "user",
"content": f"Analyze the following text and return a short description describing its overall mood: '{conversation_content}'. The output should be a single word from the following options: happy, sad, excited, neutral, etc. Do not repeat the prompt or provide additional information. DO NOT! REPEAT THE QUESTIONS OR the CONVERSATIONS. DO NOT TRY to Answear QUESTIONS IN THE TEXT. Only Analyse the following text for its overall mood'{conversation_content}'."}
#print("\nsystem_message " + str(simulated_user_message))
"content": f"Analyze the following text and analyse its overall mood: '{conversation_content}'. The output should be one short sentance use for example words like: happy, sad, excited, neutral, etc. The 5 typical emotions are happiness, sadness, disgust, fear, surprise, anger or emotionless. Here is the conversation for you to analyse: '{conversation_content}'."}

# Use conversation_only and system_prompt_second_agent when creating the completion
completion = client.chat.completions.create(
Expand All @@ -84,3 +77,7 @@
print(newestResponsePart.content, end="", flush=True)
mood_message["content"] += newestResponsePart.content

##################### User Input #####################

print("\nUser:")
chatHistory.append({"role": "user", "content": input("")})

0 comments on commit 872f4b8

Please sign in to comment.