From 21ccaf2ce892aab71d54649846aee6768f4e7403 Mon Sep 17 00:00:00 2001 From: Merwane Hamadi Date: Sun, 16 Apr 2023 14:16:48 -0700 Subject: [PATCH 1/6] Refactor variable names and remove unnecessary blank lines in __main__.py --- autogpt/__main__.py | 11 ++++------- 1 file changed, 4 insertions(+), 7 deletions(-) diff --git a/autogpt/__main__.py b/autogpt/__main__.py index 29ccddbfc0d2..7fe6aec35ee9 100644 --- a/autogpt/__main__.py +++ b/autogpt/__main__.py @@ -3,13 +3,10 @@ from colorama import Fore from autogpt.agent.agent import Agent from autogpt.args import parse_arguments - from autogpt.config import Config, check_openai_api_key from autogpt.logs import logger from autogpt.memory import get_memory - from autogpt.prompt import construct_prompt - # Load environment variables from .env file @@ -21,13 +18,13 @@ def main() -> None: parse_arguments() logger.set_level(logging.DEBUG if cfg.debug_mode else logging.INFO) ai_name = "" - prompt = construct_prompt() + master_prompt = construct_prompt() # print(prompt) # Initialize variables full_message_history = [] next_action_count = 0 # Make a constant: - user_input = ( + triggering_prompt = ( "Determine which next command to use, and respond using the" " format specified above:" ) @@ -43,8 +40,8 @@ def main() -> None: memory=memory, full_message_history=full_message_history, next_action_count=next_action_count, - prompt=prompt, - user_input=user_input, + master_prompt=master_prompt, + triggering_prompt=triggering_prompt, ) agent.start_interaction_loop() From b50259c25daac4de70378309b619d9ff693dd0cc Mon Sep 17 00:00:00 2001 From: Merwane Hamadi Date: Sun, 16 Apr 2023 14:16:48 -0700 Subject: [PATCH 2/6] Update variable names, improve comments, and modify input handling in agent.py --- autogpt/agent/agent.py | 45 +++++++++++++++++++++++++----------------- 1 file changed, 27 insertions(+), 18 deletions(-) diff --git a/autogpt/agent/agent.py b/autogpt/agent/agent.py index 32d982e52a4b..3be17a896474 100644 --- a/autogpt/agent/agent.py +++ b/autogpt/agent/agent.py @@ -19,9 +19,18 @@ class Agent: memory: The memory object to use. full_message_history: The full message history. next_action_count: The number of actions to execute. - prompt: The prompt to use. - user_input: The user input. - + master_prompt: The master prompt is the initial prompt that defines everything the AI needs to know to achieve its task successfully. + Currently, the dynamic and customizable information in the master prompt are ai_name, description and goals. + + triggering_prompt: The last sentence the AI will see before answering. For Auto-GPT, this prompt is: + Determine which next command to use, and respond using the format specified above: + The triggering prompt is not part of the master prompt because between the master prompt and the triggering + prompt we have contextual information that can distract the AI and make it forget that its goal is to find the next task to achieve. + MASTER PROMPT + CONTEXTUAL INFORMATION (memory, previous conversations, anything relevant) + TRIGGERING PROMPT + + The triggering prompt reminds the AI about its short term meta task (defining the next task) """ def __init__( @@ -30,15 +39,15 @@ def __init__( memory, full_message_history, next_action_count, - prompt, - user_input, + master_prompt, + triggering_prompt, ): self.ai_name = ai_name self.memory = memory self.full_message_history = full_message_history self.next_action_count = next_action_count - self.prompt = prompt - self.user_input = user_input + self.master_prompt = master_prompt + self.triggering_prompt = triggering_prompt def start_interaction_loop(self): # Interaction Loop @@ -62,8 +71,8 @@ def start_interaction_loop(self): # Send message to AI, get response with Spinner("Thinking... "): assistant_reply = chat_with_ai( - self.prompt, - self.user_input, + self.master_prompt, + self.triggering_prompt, self.full_message_history, self.memory, cfg.fast_token_limit, @@ -88,7 +97,7 @@ def start_interaction_loop(self): ### GET USER AUTHORIZATION TO EXECUTE COMMAND ### # Get key press: Prompt the user to press enter to continue or escape # to exit - self.user_input = "" + user_input = "" logger.typewriter_log( "NEXT ACTION: ", Fore.CYAN, @@ -106,14 +115,14 @@ def start_interaction_loop(self): Fore.MAGENTA + "Input:" + Style.RESET_ALL ) if console_input.lower().rstrip() == "y": - self.user_input = "GENERATE NEXT COMMAND JSON" + user_input = "GENERATE NEXT COMMAND JSON" break elif console_input.lower().startswith("y -"): try: self.next_action_count = abs( int(console_input.split(" ")[1]) ) - self.user_input = "GENERATE NEXT COMMAND JSON" + user_input = "GENERATE NEXT COMMAND JSON" except ValueError: print( "Invalid input format. Please enter 'y -n' where n is" @@ -122,20 +131,20 @@ def start_interaction_loop(self): continue break elif console_input.lower() == "n": - self.user_input = "EXIT" + user_input = "EXIT" break else: - self.user_input = console_input + user_input = console_input command_name = "human_feedback" break - if self.user_input == "GENERATE NEXT COMMAND JSON": + if user_input == "GENERATE NEXT COMMAND JSON": logger.typewriter_log( "-=-=-=-=-=-=-= COMMAND AUTHORISED BY USER -=-=-=-=-=-=-=", Fore.MAGENTA, "", ) - elif self.user_input == "EXIT": + elif user_input == "EXIT": print("Exiting...", flush=True) break else: @@ -153,7 +162,7 @@ def start_interaction_loop(self): f"Command {command_name} threw the following error: {arguments}" ) elif command_name == "human_feedback": - result = f"Human feedback: {self.user_input}" + result = f"Human feedback: {user_input}" else: result = ( f"Command {command_name} returned: " @@ -165,7 +174,7 @@ def start_interaction_loop(self): memory_to_add = ( f"Assistant Reply: {assistant_reply} " f"\nResult: {result} " - f"\nHuman Feedback: {self.user_input} " + f"\nHuman Feedback: {user_input} " ) self.memory.add(memory_to_add) From b5e0127b16bb88f6b6e18ada0efabc1422c9f3de Mon Sep 17 00:00:00 2001 From: Merwane Hamadi Date: Sun, 16 Apr 2023 14:16:48 -0700 Subject: [PATCH 3/6] Only print JSON object validation message in debug mode --- autogpt/json_validation/validate_json.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/autogpt/json_validation/validate_json.py b/autogpt/json_validation/validate_json.py index 127fcc17f4de..440c3b0b9199 100644 --- a/autogpt/json_validation/validate_json.py +++ b/autogpt/json_validation/validate_json.py @@ -24,7 +24,7 @@ def validate_json(json_object: object, schema_name: object) -> object: for error in errors: logger.error(f"Error: {error.message}") - else: + elif CFG.debug_mode: print("The JSON object is valid.") return json_object From 3b80253fb36b9709d48313aec5f407cc83e8c22d Mon Sep 17 00:00:00 2001 From: Merwane Hamadi Date: Sun, 16 Apr 2023 14:16:48 -0700 Subject: [PATCH 4/6] Update process creation in benchmark script --- benchmark/benchmark_entrepeneur_gpt_with_difficult_user.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/benchmark/benchmark_entrepeneur_gpt_with_difficult_user.py b/benchmark/benchmark_entrepeneur_gpt_with_difficult_user.py index d6cae972d6ab..f7f1dac9dd31 100644 --- a/benchmark/benchmark_entrepeneur_gpt_with_difficult_user.py +++ b/benchmark/benchmark_entrepeneur_gpt_with_difficult_user.py @@ -73,9 +73,12 @@ def benchmark_entrepeneur_gpt_with_difficult_user(): Not helpful. Needs improvement. Not what I need.''' + # TODO: add questions above, to distract it even more. + command = f'{sys.executable} -m autogpt' - process = subprocess.Popen(command, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True) + process = subprocess.Popen(command, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, + shell=True) stdout_output, stderr_output = process.communicate(input_data.encode()) From 89e0e8992795accfc41183723064dcdab9719f8e Mon Sep 17 00:00:00 2001 From: Merwane Hamadi Date: Sun, 16 Apr 2023 14:22:58 -0700 Subject: [PATCH 5/6] change master prompt to system prompt --- autogpt/__main__.py | 4 ++-- autogpt/agent/agent.py | 14 +++++++------- 2 files changed, 9 insertions(+), 9 deletions(-) diff --git a/autogpt/__main__.py b/autogpt/__main__.py index 7fe6aec35ee9..5f4622347d9a 100644 --- a/autogpt/__main__.py +++ b/autogpt/__main__.py @@ -18,7 +18,7 @@ def main() -> None: parse_arguments() logger.set_level(logging.DEBUG if cfg.debug_mode else logging.INFO) ai_name = "" - master_prompt = construct_prompt() + system_prompt = construct_prompt() # print(prompt) # Initialize variables full_message_history = [] @@ -40,7 +40,7 @@ def main() -> None: memory=memory, full_message_history=full_message_history, next_action_count=next_action_count, - master_prompt=master_prompt, + system_prompt=system_prompt, triggering_prompt=triggering_prompt, ) agent.start_interaction_loop() diff --git a/autogpt/agent/agent.py b/autogpt/agent/agent.py index 3be17a896474..9853f6a0b153 100644 --- a/autogpt/agent/agent.py +++ b/autogpt/agent/agent.py @@ -19,14 +19,14 @@ class Agent: memory: The memory object to use. full_message_history: The full message history. next_action_count: The number of actions to execute. - master_prompt: The master prompt is the initial prompt that defines everything the AI needs to know to achieve its task successfully. - Currently, the dynamic and customizable information in the master prompt are ai_name, description and goals. + system_prompt: The system prompt is the initial prompt that defines everything the AI needs to know to achieve its task successfully. + Currently, the dynamic and customizable information in the system prompt are ai_name, description and goals. triggering_prompt: The last sentence the AI will see before answering. For Auto-GPT, this prompt is: Determine which next command to use, and respond using the format specified above: - The triggering prompt is not part of the master prompt because between the master prompt and the triggering + The triggering prompt is not part of the system prompt because between the system prompt and the triggering prompt we have contextual information that can distract the AI and make it forget that its goal is to find the next task to achieve. - MASTER PROMPT + SYSTEM PROMPT CONTEXTUAL INFORMATION (memory, previous conversations, anything relevant) TRIGGERING PROMPT @@ -39,14 +39,14 @@ def __init__( memory, full_message_history, next_action_count, - master_prompt, + system_prompt, triggering_prompt, ): self.ai_name = ai_name self.memory = memory self.full_message_history = full_message_history self.next_action_count = next_action_count - self.master_prompt = master_prompt + self.system_prompt = system_prompt self.triggering_prompt = triggering_prompt def start_interaction_loop(self): @@ -71,7 +71,7 @@ def start_interaction_loop(self): # Send message to AI, get response with Spinner("Thinking... "): assistant_reply = chat_with_ai( - self.master_prompt, + self.system_prompt, self.triggering_prompt, self.full_message_history, self.memory, From 4f33e1bf89e580355dfcf6890779799c584e9563 Mon Sep 17 00:00:00 2001 From: k-boikov Date: Sun, 16 Apr 2023 18:38:08 +0300 Subject: [PATCH 6/6] add utf-8 encoding to file handlers for logging --- autogpt/logs.py | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/autogpt/logs.py b/autogpt/logs.py index f18e21402c61..c1e436db97fc 100644 --- a/autogpt/logs.py +++ b/autogpt/logs.py @@ -46,7 +46,9 @@ def __init__(self): self.console_handler.setFormatter(console_formatter) # Info handler in activity.log - self.file_handler = logging.FileHandler(os.path.join(log_dir, log_file)) + self.file_handler = logging.FileHandler( + os.path.join(log_dir, log_file), 'a', 'utf-8' + ) self.file_handler.setLevel(logging.DEBUG) info_formatter = AutoGptFormatter( "%(asctime)s %(levelname)s %(title)s %(message_no_color)s" @@ -54,7 +56,9 @@ def __init__(self): self.file_handler.setFormatter(info_formatter) # Error handler error.log - error_handler = logging.FileHandler(os.path.join(log_dir, error_file)) + error_handler = logging.FileHandler( + os.path.join(log_dir, error_file), 'a', 'utf-8' + ) error_handler.setLevel(logging.ERROR) error_formatter = AutoGptFormatter( "%(asctime)s %(levelname)s %(module)s:%(funcName)s:%(lineno)d %(title)s"