Skip to content

Commit

Permalink
Merge branch 'Significant-Gravitas:master' into 0xf333_branch
Browse files Browse the repository at this point in the history
0xf333 authored Apr 16, 2023
2 parents b0accbf + 4f33e1b commit 30e7693
Showing 5 changed files with 42 additions and 29 deletions.
11 changes: 4 additions & 7 deletions autogpt/__main__.py
Original file line number Diff line number Diff line change
@@ -3,13 +3,10 @@
from colorama import Fore
from autogpt.agent.agent import Agent
from autogpt.args import parse_arguments

from autogpt.config import Config, check_openai_api_key
from autogpt.logs import logger
from autogpt.memory import get_memory

from autogpt.prompt import construct_prompt

# Load environment variables from .env file


@@ -21,13 +18,13 @@ def main() -> None:
parse_arguments()
logger.set_level(logging.DEBUG if cfg.debug_mode else logging.INFO)
ai_name = ""
prompt = construct_prompt()
system_prompt = construct_prompt()
# print(prompt)
# Initialize variables
full_message_history = []
next_action_count = 0
# Make a constant:
user_input = (
triggering_prompt = (
"Determine which next command to use, and respond using the"
" format specified above:"
)
@@ -43,8 +40,8 @@ def main() -> None:
memory=memory,
full_message_history=full_message_history,
next_action_count=next_action_count,
prompt=prompt,
user_input=user_input,
system_prompt=system_prompt,
triggering_prompt=triggering_prompt,
)
agent.start_interaction_loop()

45 changes: 27 additions & 18 deletions autogpt/agent/agent.py
Original file line number Diff line number Diff line change
@@ -19,9 +19,18 @@ class Agent:
memory: The memory object to use.
full_message_history: The full message history.
next_action_count: The number of actions to execute.
prompt: The prompt to use.
user_input: The user input.
system_prompt: The system prompt is the initial prompt that defines everything the AI needs to know to achieve its task successfully.
Currently, the dynamic and customizable information in the system prompt are ai_name, description and goals.
triggering_prompt: The last sentence the AI will see before answering. For Auto-GPT, this prompt is:
Determine which next command to use, and respond using the format specified above:
The triggering prompt is not part of the system prompt because between the system prompt and the triggering
prompt we have contextual information that can distract the AI and make it forget that its goal is to find the next task to achieve.
SYSTEM PROMPT
CONTEXTUAL INFORMATION (memory, previous conversations, anything relevant)
TRIGGERING PROMPT
The triggering prompt reminds the AI about its short term meta task (defining the next task)
"""

def __init__(
@@ -30,15 +39,15 @@ def __init__(
memory,
full_message_history,
next_action_count,
prompt,
user_input,
system_prompt,
triggering_prompt,
):
self.ai_name = ai_name
self.memory = memory
self.full_message_history = full_message_history
self.next_action_count = next_action_count
self.prompt = prompt
self.user_input = user_input
self.system_prompt = system_prompt
self.triggering_prompt = triggering_prompt

def start_interaction_loop(self):
# Interaction Loop
@@ -62,8 +71,8 @@ def start_interaction_loop(self):
# Send message to AI, get response
with Spinner("Thinking... "):
assistant_reply = chat_with_ai(
self.prompt,
self.user_input,
self.system_prompt,
self.triggering_prompt,
self.full_message_history,
self.memory,
cfg.fast_token_limit,
@@ -88,7 +97,7 @@ def start_interaction_loop(self):
### GET USER AUTHORIZATION TO EXECUTE COMMAND ###
# Get key press: Prompt the user to press enter to continue or escape
# to exit
self.user_input = ""
user_input = ""
logger.typewriter_log(
"NEXT ACTION: ",
Fore.CYAN,
@@ -106,14 +115,14 @@ def start_interaction_loop(self):
Fore.MAGENTA + "Input:" + Style.RESET_ALL
)
if console_input.lower().rstrip() == "y":
self.user_input = "GENERATE NEXT COMMAND JSON"
user_input = "GENERATE NEXT COMMAND JSON"
break
elif console_input.lower().startswith("y -"):
try:
self.next_action_count = abs(
int(console_input.split(" ")[1])
)
self.user_input = "GENERATE NEXT COMMAND JSON"
user_input = "GENERATE NEXT COMMAND JSON"
except ValueError:
print(
"Invalid input format. Please enter 'y -n' where n is"
@@ -122,20 +131,20 @@ def start_interaction_loop(self):
continue
break
elif console_input.lower() == "n":
self.user_input = "EXIT"
user_input = "EXIT"
break
else:
self.user_input = console_input
user_input = console_input
command_name = "human_feedback"
break

if self.user_input == "GENERATE NEXT COMMAND JSON":
if user_input == "GENERATE NEXT COMMAND JSON":
logger.typewriter_log(
"-=-=-=-=-=-=-= COMMAND AUTHORISED BY USER -=-=-=-=-=-=-=",
Fore.MAGENTA,
"",
)
elif self.user_input == "EXIT":
elif user_input == "EXIT":
print("Exiting...", flush=True)
break
else:
@@ -153,7 +162,7 @@ def start_interaction_loop(self):
f"Command {command_name} threw the following error: {arguments}"
)
elif command_name == "human_feedback":
result = f"Human feedback: {self.user_input}"
result = f"Human feedback: {user_input}"
else:
result = (
f"Command {command_name} returned: "
@@ -165,7 +174,7 @@ def start_interaction_loop(self):
memory_to_add = (
f"Assistant Reply: {assistant_reply} "
f"\nResult: {result} "
f"\nHuman Feedback: {self.user_input} "
f"\nHuman Feedback: {user_input} "
)

self.memory.add(memory_to_add)
2 changes: 1 addition & 1 deletion autogpt/json_validation/validate_json.py
Original file line number Diff line number Diff line change
@@ -24,7 +24,7 @@ def validate_json(json_object: object, schema_name: object) -> object:

for error in errors:
logger.error(f"Error: {error.message}")
else:
elif CFG.debug_mode:
print("The JSON object is valid.")

return json_object
8 changes: 6 additions & 2 deletions autogpt/logs.py
Original file line number Diff line number Diff line change
@@ -46,15 +46,19 @@ def __init__(self):
self.console_handler.setFormatter(console_formatter)

# Info handler in activity.log
self.file_handler = logging.FileHandler(os.path.join(log_dir, log_file))
self.file_handler = logging.FileHandler(
os.path.join(log_dir, log_file), 'a', 'utf-8'
)
self.file_handler.setLevel(logging.DEBUG)
info_formatter = AutoGptFormatter(
"%(asctime)s %(levelname)s %(title)s %(message_no_color)s"
)
self.file_handler.setFormatter(info_formatter)

# Error handler error.log
error_handler = logging.FileHandler(os.path.join(log_dir, error_file))
error_handler = logging.FileHandler(
os.path.join(log_dir, error_file), 'a', 'utf-8'
)
error_handler.setLevel(logging.ERROR)
error_formatter = AutoGptFormatter(
"%(asctime)s %(levelname)s %(module)s:%(funcName)s:%(lineno)d %(title)s"
5 changes: 4 additions & 1 deletion benchmark/benchmark_entrepeneur_gpt_with_difficult_user.py
Original file line number Diff line number Diff line change
@@ -73,9 +73,12 @@ def benchmark_entrepeneur_gpt_with_difficult_user():
Not helpful.
Needs improvement.
Not what I need.'''
# TODO: add questions above, to distract it even more.

command = f'{sys.executable} -m autogpt'

process = subprocess.Popen(command, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
process = subprocess.Popen(command, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE,
shell=True)

stdout_output, stderr_output = process.communicate(input_data.encode())

0 comments on commit 30e7693

Please sign in to comment.