Convert to python module named autogpt.

Also fixed the Dockerfile.
Converting to module makes development easier.
Fixes coverage script in CI and test imports.
This commit is contained in:
Dino Hensen 2023-04-14 18:28:58 +02:00 committed by Merwane Hamadi
parent a17a850b25
commit d64f866bfa
45 changed files with 352 additions and 90 deletions

View File

@ -32,11 +32,11 @@ jobs:
- name: Lint with flake8
continue-on-error: false
run: flake8 scripts/ tests/ --select E303,W293,W291,W292,E305,E231,E302
run: flake8 autogpt/ tests/ --select E303,W293,W291,W292,E305,E231,E302
- name: Run unittest tests with coverage
run: |
coverage run --source=scripts -m unittest discover tests
coverage run --source=autogpt -m unittest discover tests
- name: Generate coverage report
run: |

8
.gitignore vendored
View File

@ -1,7 +1,7 @@
scripts/keys.py
scripts/*json
scripts/node_modules/
scripts/__pycache__/keys.cpython-310.pyc
autogpt/keys.py
autogpt/*json
autogpt/node_modules/
autogpt/__pycache__/keys.cpython-310.pyc
package-lock.json
*.pyc
auto_gpt_workspace/*

View File

@ -17,7 +17,7 @@ COPY --chown=appuser:appuser requirements.txt .
RUN pip install --no-cache-dir --user -r requirements.txt
# Copy the application files
COPY --chown=appuser:appuser scripts/ .
COPY --chown=appuser:appuser autogpt/ .
# Set the entrypoint
ENTRYPOINT ["python", "main.py"]
ENTRYPOINT ["python", "-m", "autogpt"]

View File

@ -119,11 +119,11 @@ pip install -r requirements.txt
## 🔧 Usage
1. Run the `main.py` Python script in your terminal:
1. Run the `autogpt` Python module in your terminal:
_(Type this into your CMD window)_
```
python scripts/main.py
python -m autogpt
```
2. After each of action, enter 'y' to authorise command, 'y -N' to run N continuous commands, 'n' to exit program, or enter additional feedback for the AI.
@ -136,7 +136,21 @@ You will find activity and error logs in the folder `./output/logs`
To output debug logs:
```
python scripts/main.py --debug
python -m autogpt --debug
```
### Docker
You can also build this into a docker image and run it:
```
docker build -t autogpt .
docker run -it --env-file=./.env -v $PWD/auto_gpt_workspace:/app/auto_gpt_workspace autogpt
```
You can pass extra arguments, for instance, running with `--gpt3only` and `--continuous` mode:
```
docker run -it --env-file=./.env -v $PWD/auto_gpt_workspace:/app/auto_gpt_workspace autogpt --gpt3only --continuous
```
### Command Line Arguments
Here are some common arguments you can use when running Auto-GPT:
@ -152,7 +166,7 @@ Here are some common arguments you can use when running Auto-GPT:
Use this to use TTS for Auto-GPT
```
python scripts/main.py --speak
python -m autogpt --speak
```
## 🔍 Google API Keys Configuration
@ -328,10 +342,10 @@ Continuous mode is not recommended.
It is potentially dangerous and may cause your AI to run forever or carry out actions you would not usually authorise.
Use at your own risk.
1. Run the `main.py` Python script in your terminal:
1. Run the `autogpt` python module in your terminal:
```
python scripts/main.py --continuous
python -m autogpt --speak --continuous
```
@ -342,7 +356,7 @@ python scripts/main.py --continuous
If you don't have access to the GPT4 api, this mode will allow you to use Auto-GPT!
```
python scripts/main.py --gpt3only
python -m autogpt --speak --gpt3only
```
It is recommended to use a virtual machine for tasks that require high security measures to prevent any potential harm to the main computer's system and data.
@ -415,8 +429,8 @@ This project uses [flake8](https://flake8.pycqa.org/en/latest/) for linting. We
To run the linter, run the following command:
```
flake8 scripts/ tests/
flake8 autogpt/ tests/
# Or, if you want to run flake8 with the same configuration as the CI:
flake8 scripts/ tests/ --select E303,W293,W291,W292,E305,E231,E302
flake8 autogpt/ tests/ --select E303,W293,W291,W292,E305,E231,E302
```

View File

@ -1,16 +1,16 @@
import json
import random
import commands as cmd
import utils
from memory import get_memory, get_supported_memory_backends
import chat
from autogpt import commands as cmd
from autogpt import utils
from autogpt.memory import get_memory, get_supported_memory_backends
from autogpt import chat
from colorama import Fore, Style
from spinner import Spinner
from autogpt.spinner import Spinner
import time
import speak
from config import Config
from json_parser import fix_and_parse_json
from ai_config import AIConfig
from autogpt import speak
from autogpt.config import Config
from autogpt.json_parser import fix_and_parse_json
from autogpt.ai_config import AIConfig
import traceback
import yaml
import argparse

View File

@ -1,4 +1,4 @@
from llm_utils import create_chat_completion
from autogpt.llm_utils import create_chat_completion
next_key = 0
agents = {} # key, (task, full_message_history, model)

View File

@ -1,7 +1,7 @@
from typing import List
import json
from config import Config
from call_ai_function import call_ai_function
from autogpt.config import Config
from autogpt.call_ai_function import call_ai_function
cfg = Config()

View File

@ -1,8 +1,8 @@
import requests
from bs4 import BeautifulSoup
from memory import get_memory
from config import Config
from llm_utils import create_chat_completion
from autogpt.memory import get_memory
from autogpt.config import Config
from autogpt.llm_utils import create_chat_completion
from urllib.parse import urlparse, urljoin
cfg = Config()

View File

@ -1,8 +1,7 @@
from config import Config
from autogpt.config import Config
cfg = Config()
from llm_utils import create_chat_completion
from autogpt.llm_utils import create_chat_completion
# This is a magic function that can do anything with no-code. See

View File

@ -1,10 +1,10 @@
import time
import openai
from dotenv import load_dotenv
from config import Config
import token_counter
from llm_utils import create_chat_completion
from logger import logger
from autogpt.config import Config
from autogpt import token_counter
from autogpt.llm_utils import create_chat_completion
from autogpt.logger import logger
import logging
cfg = Config()

View File

@ -1,15 +1,15 @@
import browse
from autogpt import browse
import json
from memory import get_memory
from autogpt.memory import get_memory
import datetime
import agent_manager as agents
import speak
from config import Config
import ai_functions as ai
from file_operations import read_file, write_to_file, append_to_file, delete_file, search_files
from execute_code import execute_python_file, execute_shell
from json_parser import fix_and_parse_json
from image_gen import generate_image
import autogpt.agent_manager as agents
from autogpt import speak
from autogpt.config import Config
import autogpt.ai_functions as ai
from autogpt.file_operations import read_file, write_to_file, append_to_file, delete_file, search_files
from autogpt.execute_code import execute_python_file, execute_shell
from autogpt.json_parser import fix_and_parse_json
from autogpt.image_gen import generate_image
from duckduckgo_search import ddg
from googleapiclient.discovery import build
from googleapiclient.errors import HttpError

View File

@ -1,8 +1,8 @@
import argparse
import logging
from config import Config
from memory import get_memory
from file_operations import ingest_file, search_files
from autogpt.config import Config
from autogpt.memory import get_memory
from autogpt.file_operations import ingest_file, search_files
cfg = Config()

View File

@ -2,7 +2,7 @@ import requests
import io
import os.path
from PIL import Image
from config import Config
from autogpt.config import Config
import uuid
import openai
from base64 import b64decode

View File

@ -1,9 +1,9 @@
import json
from typing import Any, Dict, Union
from call_ai_function import call_ai_function
from config import Config
from json_utils import correct_json
from logger import logger
from autogpt.call_ai_function import call_ai_function
from autogpt.config import Config
from autogpt.json_utils import correct_json
from autogpt.logger import logger
cfg = Config()

View File

@ -1,6 +1,6 @@
import re
import json
from config import Config
from autogpt.config import Config
cfg = Config()

View File

@ -1,7 +1,7 @@
import time
import openai
from colorama import Fore
from config import Config
from autogpt.config import Config
cfg = Config()

View File

@ -8,9 +8,9 @@ from colorama import Fore
from colorama import Style
import speak
from config import Config
from config import Singleton
from autogpt import speak
from autogpt.config import Config
from autogpt.config import Singleton
cfg = Config()

View File

@ -1,5 +1,5 @@
from memory.local import LocalCache
from memory.no_memory import NoMemory
from autogpt.memory.local import localcache
from autogpt.memory.no_memory import NoMemory
# List of supported memory backends
# Add a backend to this list if the import attempt is successful

View File

@ -1,6 +1,6 @@
"""Base class for memory providers."""
import abc
from config import AbstractSingleton, Config
from autogpt.config import AbstractSingleton, Config
import openai
cfg = Config()

View File

@ -3,7 +3,7 @@ import orjson
from typing import Any, List, Optional
import numpy as np
import os
from memory.base import MemoryProviderSingleton, get_ada_embedding
from autogpt.memory.base import MemoryProviderSingleton, get_ada_embedding
EMBED_DIM = 1536

View File

@ -1,3 +1,4 @@
from autogpt.config import Config, Singleton
import pinecone

View File

@ -6,8 +6,8 @@ from redis.commands.search.query import Query
from redis.commands.search.indexDefinition import IndexDefinition, IndexType
import numpy as np
from memory.base import MemoryProviderSingleton, get_ada_embedding
from logger import logger
from autogpt.memory.base import MemoryProviderSingleton, get_ada_embedding
from autogpt.logger import logger
from colorama import Fore, Style

View File

@ -1,7 +1,7 @@
import os
from playsound import playsound
import requests
from config import Config
from autogpt.config import Config
cfg = Config()
import gtts
import threading

View File

@ -8,7 +8,7 @@ services:
- redis
build: ./
volumes:
- "./scripts:/app"
- "./autogpt:/app"
- ".env:/app/.env"
profiles: ["exclude-from-up"]

View File

@ -1 +1 @@
from scripts.main import main
from autogpt import main

245
scripts/agent.py Normal file
View File

@ -0,0 +1,245 @@
import commands as cmd
import json
import traceback
from tkinter.ttk import Style
from colorama import Fore
import chat
from config import Config
from logger import logger
import speak
from spinner import Spinner
class Agent:
"""Agent class for interacting with Auto-GPT.
Attributes:
ai_name: The name of the agent.
memory: The memory object to use.
full_message_history: The full message history.
next_action_count: The number of actions to execute.
prompt: The prompt to use.
user_input: The user input.
"""
def __init__(self,
ai_name,
memory,
full_message_history,
next_action_count,
prompt,
user_input):
self.ai_name = ai_name
self.memory = memory
self.full_message_history = full_message_history
self.next_action_count = next_action_count
self.prompt = prompt
self.user_input = user_input
def start_interaction_loop(self):
# Interaction Loop
cfg = Config()
loop_count = 0
while True:
# Discontinue if continuous limit is reached
loop_count += 1
if cfg.continuous_mode and cfg.continuous_limit > 0 and loop_count > cfg.continuous_limit:
logger.typewriter_log("Continuous Limit Reached: ", Fore.YELLOW, f"{cfg.continuous_limit}")
break
# Send message to AI, get response
with Spinner("Thinking... "):
assistant_reply = chat.chat_with_ai(
self.prompt,
self.user_input,
self.full_message_history,
self.memory,
cfg.fast_token_limit) # TODO: This hardcodes the model to use GPT3.5. Make this an argument
# Print Assistant thoughts
print_assistant_thoughts(assistant_reply)
# Get command name and arguments
try:
command_name, arguments = cmd.get_command(
attempt_to_fix_json_by_finding_outermost_brackets(assistant_reply))
if cfg.speak_mode:
speak.say_text(f"I want to execute {command_name}")
except Exception as e:
logger.error("Error: \n", str(e))
if not cfg.continuous_mode and self.next_action_count == 0:
### GET USER AUTHORIZATION TO EXECUTE COMMAND ###
# Get key press: Prompt the user to press enter to continue or escape
# to exit
self.user_input = ""
logger.typewriter_log(
"NEXT ACTION: ",
Fore.CYAN,
f"COMMAND = {Fore.CYAN}{command_name}{Style.RESET_ALL} ARGUMENTS = {Fore.CYAN}{arguments}{Style.RESET_ALL}")
print(
f"Enter 'y' to authorise command, 'y -N' to run N continuous commands, 'n' to exit program, or enter feedback for {self.ai_name}...",
flush=True)
while True:
console_input = utils.clean_input(Fore.MAGENTA + "Input:" + Style.RESET_ALL)
if console_input.lower().rstrip() == "y":
self.user_input = "GENERATE NEXT COMMAND JSON"
break
elif console_input.lower().startswith("y -"):
try:
self.next_action_count = abs(int(console_input.split(" ")[1]))
self.user_input = "GENERATE NEXT COMMAND JSON"
except ValueError:
print("Invalid input format. Please enter 'y -n' where n is the number of continuous tasks.")
continue
break
elif console_input.lower() == "n":
self.user_input = "EXIT"
break
else:
self.user_input = console_input
command_name = "human_feedback"
break
if self.user_input == "GENERATE NEXT COMMAND JSON":
logger.typewriter_log(
"-=-=-=-=-=-=-= COMMAND AUTHORISED BY USER -=-=-=-=-=-=-=",
Fore.MAGENTA,
"")
elif self.user_input == "EXIT":
print("Exiting...", flush=True)
break
else:
# Print command
logger.typewriter_log(
"NEXT ACTION: ",
Fore.CYAN,
f"COMMAND = {Fore.CYAN}{command_name}{Style.RESET_ALL} ARGUMENTS = {Fore.CYAN}{arguments}{Style.RESET_ALL}")
# Execute command
if command_name is not None and command_name.lower().startswith("error"):
result = f"Command {command_name} threw the following error: " + arguments
elif command_name == "human_feedback":
result = f"Human feedback: {self.user_input}"
else:
result = f"Command {command_name} returned: {cmd.execute_command(command_name, arguments)}"
if self.next_action_count > 0:
self.next_action_count -= 1
memory_to_add = f"Assistant Reply: {assistant_reply} " \
f"\nResult: {result} " \
f"\nHuman Feedback: {self.user_input} "
self.memory.add(memory_to_add)
# Check if there's a result from the command append it to the message
# history
if result is not None:
self.full_message_history.append(chat.create_chat_message("system", result))
logger.typewriter_log("SYSTEM: ", Fore.YELLOW, result)
else:
self.full_message_history.append(
chat.create_chat_message(
"system", "Unable to execute command"))
logger.typewriter_log("SYSTEM: ", Fore.YELLOW, "Unable to execute command")
def attempt_to_fix_json_by_finding_outermost_brackets(json_string):
cfg = Config()
if cfg.speak_mode and cfg.debug_mode:
speak.say_text("I have received an invalid JSON response from the OpenAI API. Trying to fix it now.")
logger.typewriter_log("Attempting to fix JSON by finding outermost brackets\n")
try:
# Use regex to search for JSON objects
import regex
json_pattern = regex.compile(r"\{(?:[^{}]|(?R))*\}")
json_match = json_pattern.search(json_string)
if json_match:
# Extract the valid JSON object from the string
json_string = json_match.group(0)
logger.typewriter_log(title="Apparently json was fixed.", title_color=Fore.GREEN)
if cfg.speak_mode and cfg.debug_mode:
speak.say_text("Apparently json was fixed.")
else:
raise ValueError("No valid JSON object found")
except (json.JSONDecodeError, ValueError) as e:
if cfg.speak_mode:
speak.say_text("Didn't work. I will have to ignore this response then.")
logger.error("Error: Invalid JSON, setting it to empty JSON now.\n")
json_string = {}
return json_string
def print_assistant_thoughts(assistant_reply):
"""Prints the assistant's thoughts to the console"""
global ai_name
global cfg
cfg = Config()
try:
try:
# Parse and print Assistant response
assistant_reply_json = fix_and_parse_json(assistant_reply)
except json.JSONDecodeError as e:
logger.error("Error: Invalid JSON in assistant thoughts\n", assistant_reply)
assistant_reply_json = attempt_to_fix_json_by_finding_outermost_brackets(assistant_reply)
assistant_reply_json = fix_and_parse_json(assistant_reply_json)
# Check if assistant_reply_json is a string and attempt to parse it into a JSON object
if isinstance(assistant_reply_json, str):
try:
assistant_reply_json = json.loads(assistant_reply_json)
except json.JSONDecodeError as e:
logger.error("Error: Invalid JSON\n", assistant_reply)
assistant_reply_json = attempt_to_fix_json_by_finding_outermost_brackets(assistant_reply_json)
assistant_thoughts_reasoning = None
assistant_thoughts_plan = None
assistant_thoughts_speak = None
assistant_thoughts_criticism = None
assistant_thoughts = assistant_reply_json.get("thoughts", {})
assistant_thoughts_text = assistant_thoughts.get("text")
if assistant_thoughts:
assistant_thoughts_reasoning = assistant_thoughts.get("reasoning")
assistant_thoughts_plan = assistant_thoughts.get("plan")
assistant_thoughts_criticism = assistant_thoughts.get("criticism")
assistant_thoughts_speak = assistant_thoughts.get("speak")
logger.typewriter_log(f"{ai_name.upper()} THOUGHTS:", Fore.YELLOW, assistant_thoughts_text)
logger.typewriter_log("REASONING:", Fore.YELLOW, assistant_thoughts_reasoning)
if assistant_thoughts_plan:
logger.typewriter_log("PLAN:", Fore.YELLOW, "")
# If it's a list, join it into a string
if isinstance(assistant_thoughts_plan, list):
assistant_thoughts_plan = "\n".join(assistant_thoughts_plan)
elif isinstance(assistant_thoughts_plan, dict):
assistant_thoughts_plan = str(assistant_thoughts_plan)
# Split the input_string using the newline character and dashes
lines = assistant_thoughts_plan.split('\n')
for line in lines:
line = line.lstrip("- ")
logger.typewriter_log("- ", Fore.GREEN, line.strip())
logger.typewriter_log("CRITICISM:", Fore.YELLOW, assistant_thoughts_criticism)
# Speak the assistant's thoughts
if cfg.speak_mode and assistant_thoughts_speak:
speak.say_text(assistant_thoughts_speak)
return assistant_reply_json
except json.decoder.JSONDecodeError as e:
logger.error("Error: Invalid JSON\n", assistant_reply)
if cfg.speak_mode:
speak.say_text("I have received an invalid JSON response from the OpenAI API. I cannot ignore this response.")
# All other errors, return "Error: + error message"
except Exception as e:
call_stack = traceback.format_exc()
logger.error("Error: \n", call_stack)

View File

@ -1,8 +1,8 @@
import unittest
if __name__ == "__main__":
# Load all tests from the 'scripts/tests' package
suite = unittest.defaultTestLoader.discover('scripts/tests')
# Load all tests from the 'autogpt/tests' package
suite = unittest.defaultTestLoader.discover('autogpt/tests')
# Run the tests
unittest.TextTestRunner().run(suite)

View File

@ -3,16 +3,15 @@ import random
import string
import sys
from pathlib import Path
# Add the parent directory of the 'scripts' folder to the Python path
sys.path.append(str(Path(__file__).resolve().parent.parent.parent / 'scripts'))
from config import Config
from memory.local import LocalCache
from autogpt.config import Config
from autogpt.memory.local import LocalCache
class TestLocalCache(unittest.TestCase):
def random_string(self, length):
return ''.join(random.choice(string.ascii_letters) for _ in range(length))
return ''.join(
random.choice(string.ascii_letters) for _ in range(length))
def setUp(self):
cfg = cfg = Config()

View File

@ -1,8 +1,7 @@
import os
import sys
# Probably a better way:
sys.path.append(os.path.abspath('../scripts'))
from memory.local import LocalCache
from autogpt.memory.local import LocalCache
def MockConfig():

View File

@ -3,9 +3,7 @@ import unittest
import sys
import os
# Add the path to the "scripts" directory to import the PromptGenerator module
sys.path.append(os.path.abspath("../scripts"))
from promptgenerator import PromptGenerator
from autogpt.promptgenerator import PromptGenerator
# Create a test class for the PromptGenerator, subclassed from unittest.TestCase

View File

@ -1,5 +1,5 @@
import unittest
from scripts.config import Config
from autogpt.config import Config
class TestConfig(unittest.TestCase):

View File

@ -1,7 +1,7 @@
import unittest
import tests.context
from scripts.json_parser import fix_and_parse_json
from autogpt.json_parser import fix_and_parse_json
class TestParseJson(unittest.TestCase):

View File

@ -1,9 +1,9 @@
import unittest
import os
import sys
# Probably a better way:
sys.path.append(os.path.abspath('../scripts'))
from json_parser import fix_and_parse_json
from autogpt.json_parser import fix_and_parse_json
class TestParseJson(unittest.TestCase):
@ -108,6 +108,13 @@ class TestParseJson(unittest.TestCase):
# Assert that this raises an exception:
self.assertEqual(fix_and_parse_json(json_str, try_to_fix_with_gpt=False), good_obj)
def test_that_apologies_containing_multiple_json_get_the_correct_one(self):
bad_json = 'I apologize once again for the error. Here is the corrected format to run the tests: ``` { "name": "execute_python_file", "args": { "file": "<test_file_location>" } } ``` Where `<test_file_location>` should be replaced with the file path to the test file you created in the previous step. For example: ``` { "name": "execute_python_file", "args": { "file": "tests/test_addition.py" } } ``` This will execute the tests for the `add_numbers` function in `tests/test_addition.py`. Please let me know if you have any further questions.'
actual_json = fix_and_parse_json(bad_json, try_to_fix_with_gpt=True)
expected_json = { "name": "execute_python_file", "args": { "file": "tests/test_addition.py" } }
self.assertEqual(actual_json, expected_json)
# TODO come back to fix this test after fixing imports
if __name__ == '__main__':
unittest.main()

View File

@ -3,7 +3,7 @@
import requests
from scripts.browse import scrape_text
from autogpt.browse import scrape_text
"""
Code Analysis