mirror of
https://github.com/Significant-Gravitas/Auto-GPT.git
synced 2025-01-09 04:19:02 +08:00
Rearrange tests & fix CI (#4596)
* Rearrange tests into unit/integration/challenge categories * Fix linting + `tests.challenges` imports * Fix obscured duplicate test in test_url_validation.py * Move VCR conftest to tests.vcr * Specify tests to run & their order (unit -> integration -> challenges) in CI * Fail Docker CI when tests fail * Fix import & linting errors in tests * Fix `get_text_summary` * Fix linting errors * Clean up pytest args in CI * Remove bogus tests from GoCodeo
This commit is contained in:
parent
8a881f70a3
commit
dafbd11686
9
.github/workflows/ci.yml
vendored
9
.github/workflows/ci.yml
vendored
@ -5,7 +5,7 @@ on:
|
||||
branches: [ master, ci-test* ]
|
||||
paths-ignore:
|
||||
- 'tests/Auto-GPT-test-cassettes'
|
||||
- 'tests/integration/challenges/current_score.json'
|
||||
- 'tests/challenges/current_score.json'
|
||||
pull_request:
|
||||
branches: [ stable, master ]
|
||||
pull_request_target:
|
||||
@ -148,8 +148,9 @@ jobs:
|
||||
|
||||
- name: Run pytest with coverage
|
||||
run: |
|
||||
pytest -n auto --cov=autogpt --cov-report term-missing --cov-branch --cov-report xml --cov-report term
|
||||
python tests/integration/challenges/utils/build_current_score.py
|
||||
pytest -n auto --cov=autogpt --cov-branch --cov-report term-missing --cov-report xml \
|
||||
tests/unit tests/integration tests/challenges
|
||||
python tests/challenges/utils/build_current_score.py
|
||||
env:
|
||||
CI: true
|
||||
PROXY: ${{ secrets.PROXY }}
|
||||
@ -179,7 +180,7 @@ jobs:
|
||||
- name: Push updated challenge scores
|
||||
if: github.event_name == 'push'
|
||||
run: |
|
||||
score_file="tests/integration/challenges/current_score.json"
|
||||
score_file="tests/challenges/current_score.json"
|
||||
|
||||
if ! git diff --quiet $score_file; then
|
||||
git add $score_file
|
||||
|
11
.github/workflows/docker-ci.yml
vendored
11
.github/workflows/docker-ci.yml
vendored
@ -5,7 +5,7 @@ on:
|
||||
branches: [ master ]
|
||||
paths-ignore:
|
||||
- 'tests/Auto-GPT-test-cassettes'
|
||||
- 'tests/integration/challenges/current_score.json'
|
||||
- 'tests/challenges/current_score.json'
|
||||
pull_request:
|
||||
branches: [ master, stable ]
|
||||
|
||||
@ -108,15 +108,18 @@ jobs:
|
||||
set +e
|
||||
test_output=$(
|
||||
docker run --env CI --env OPENAI_API_KEY --entrypoint python ${{ env.IMAGE_NAME }} -m \
|
||||
pytest -n auto --cov=autogpt --cov-report term-missing --cov-branch --cov-report xml --cov-report term 2>&1
|
||||
pytest -n auto --cov=autogpt --cov-branch --cov-report term-missing \
|
||||
tests/unit tests/integration 2>&1
|
||||
)
|
||||
test_failure=$?
|
||||
|
||||
|
||||
echo "$test_output"
|
||||
|
||||
|
||||
cat << $EOF >> $GITHUB_STEP_SUMMARY
|
||||
# Tests $([ $test_failure = 0 ] && echo '✅' || echo '❌')
|
||||
\`\`\`
|
||||
$test_output
|
||||
\`\`\`
|
||||
$EOF
|
||||
|
||||
exit $test_failure
|
||||
|
2
.github/workflows/pr-label.yml
vendored
2
.github/workflows/pr-label.yml
vendored
@ -6,7 +6,7 @@ on:
|
||||
branches: [ master ]
|
||||
paths-ignore:
|
||||
- 'tests/Auto-GPT-test-cassettes'
|
||||
- 'tests/integration/challenges/current_score.json'
|
||||
- 'tests/challenges/current_score.json'
|
||||
# So that the `dirtyLabel` is removed if conflicts are resolve
|
||||
# We recommend `pull_request_target` so that github secrets are available.
|
||||
# In `pull_request` we wouldn't be able to change labels of fork PRs
|
||||
|
@ -142,7 +142,7 @@ def get_text_summary(url: str, question: str, config: Config) -> str:
|
||||
Returns:
|
||||
str: The summary of the text
|
||||
"""
|
||||
text = scrape_text(url)
|
||||
text = scrape_text(url, config)
|
||||
summary, _ = summarize_text(text, question=question)
|
||||
|
||||
return f""" "Result" : {summary}"""
|
||||
|
@ -70,7 +70,7 @@ def kubernetes_agent(
|
||||
```
|
||||
|
||||
## Creating your challenge
|
||||
Go to `tests/integration/challenges`and create a file that is called `test_your_test_description.py` and add it to the appropriate folder. If no category exists you can create a new one.
|
||||
Go to `tests/challenges`and create a file that is called `test_your_test_description.py` and add it to the appropriate folder. If no category exists you can create a new one.
|
||||
|
||||
Your test could look something like this
|
||||
|
||||
@ -84,7 +84,7 @@ import yaml
|
||||
|
||||
from autogpt.commands.file_operations import read_file, write_to_file
|
||||
from tests.integration.agent_utils import run_interaction_loop
|
||||
from tests.integration.challenges.utils import run_multiple_times
|
||||
from tests.challenges.utils import run_multiple_times
|
||||
from tests.utils import requires_api_key
|
||||
|
||||
|
||||
|
@ -5,7 +5,7 @@
|
||||
**Command to try**:
|
||||
|
||||
```
|
||||
pytest -s tests/integration/challenges/information_retrieval/test_information_retrieval_challenge_a.py --level=2
|
||||
pytest -s tests/challenges/information_retrieval/test_information_retrieval_challenge_a.py --level=2
|
||||
```
|
||||
|
||||
## Description
|
||||
|
@ -5,7 +5,7 @@
|
||||
**Command to try**:
|
||||
|
||||
```
|
||||
pytest -s tests/integration/challenges/information_retrieval/test_information_retrieval_challenge_b.py
|
||||
pytest -s tests/challenges/information_retrieval/test_information_retrieval_challenge_b.py
|
||||
```
|
||||
|
||||
## Description
|
||||
|
@ -4,7 +4,7 @@
|
||||
|
||||
**Command to try**:
|
||||
```
|
||||
pytest -s tests/integration/challenges/memory/test_memory_challenge_b.py --level=3
|
||||
pytest -s tests/challenges/memory/test_memory_challenge_b.py --level=3
|
||||
``
|
||||
|
||||
## Description
|
||||
@ -41,4 +41,3 @@ Write all the task_ids into the file output.txt. The file has not been created y
|
||||
## Objective
|
||||
|
||||
The objective of this challenge is to test the agent's ability to follow instructions and maintain memory of the task IDs throughout the process. The agent successfully completed this challenge if it wrote the task ids in a file.
|
||||
|
||||
|
@ -4,7 +4,7 @@
|
||||
|
||||
**Command to try**:
|
||||
```
|
||||
pytest -s tests/integration/challenges/memory/test_memory_challenge_c.py --level=2
|
||||
pytest -s tests/challenges/memory/test_memory_challenge_c.py --level=2
|
||||
``
|
||||
|
||||
## Description
|
||||
|
2
mypy.ini
2
mypy.ini
@ -2,7 +2,7 @@
|
||||
follow_imports = skip
|
||||
check_untyped_defs = True
|
||||
disallow_untyped_defs = True
|
||||
files = tests/integration/challenges/**/*.py
|
||||
files = tests/challenges/**/*.py
|
||||
|
||||
[mypy-requests.*]
|
||||
ignore_missing_imports = True
|
||||
|
@ -1,10 +1,8 @@
|
||||
import pytest
|
||||
|
||||
from autogpt.agent import Agent
|
||||
from tests.integration.challenges.challenge_decorator.challenge_decorator import (
|
||||
challenge,
|
||||
)
|
||||
from tests.integration.challenges.utils import run_interaction_loop
|
||||
from tests.challenges.challenge_decorator.challenge_decorator import challenge
|
||||
from tests.challenges.utils import run_interaction_loop
|
||||
from tests.utils import requires_api_key
|
||||
|
||||
CYCLE_COUNT = 2
|
@ -4,10 +4,8 @@ from pytest_mock import MockerFixture
|
||||
from autogpt.agent import Agent
|
||||
from autogpt.commands.file_operations import read_file
|
||||
from autogpt.config import Config
|
||||
from tests.integration.challenges.challenge_decorator.challenge_decorator import (
|
||||
challenge,
|
||||
)
|
||||
from tests.integration.challenges.utils import run_interaction_loop
|
||||
from tests.challenges.challenge_decorator.challenge_decorator import challenge
|
||||
from tests.challenges.utils import run_interaction_loop
|
||||
from tests.utils import requires_api_key
|
||||
|
||||
CYCLE_COUNT = 3
|
@ -4,11 +4,9 @@ from typing import Any, Callable, Optional
|
||||
|
||||
import pytest
|
||||
|
||||
from tests.integration.challenges.challenge_decorator.challenge import Challenge
|
||||
from tests.integration.challenges.challenge_decorator.challenge_utils import (
|
||||
create_challenge,
|
||||
)
|
||||
from tests.integration.challenges.challenge_decorator.score_utils import (
|
||||
from tests.challenges.challenge_decorator.challenge import Challenge
|
||||
from tests.challenges.challenge_decorator.challenge_utils import create_challenge
|
||||
from tests.challenges.challenge_decorator.score_utils import (
|
||||
get_scores,
|
||||
update_new_score,
|
||||
)
|
@ -1,7 +1,7 @@
|
||||
import os
|
||||
from typing import Any, Callable, Dict, Optional, Tuple
|
||||
|
||||
from tests.integration.challenges.challenge_decorator.challenge import Challenge
|
||||
from tests.challenges.challenge_decorator.challenge import Challenge
|
||||
|
||||
CHALLENGE_PREFIX = "test_"
|
||||
|
@ -2,7 +2,7 @@ import json
|
||||
import os
|
||||
from typing import Any, Dict, Optional, Tuple
|
||||
|
||||
from tests.integration.challenges.challenge_decorator.challenge import Challenge
|
||||
from tests.challenges.challenge_decorator.challenge import Challenge
|
||||
|
||||
CURRENT_SCORE_LOCATION = "../current_score"
|
||||
NEW_SCORE_LOCATION = "../new_score"
|
@ -5,9 +5,8 @@ from _pytest.config import Config
|
||||
from _pytest.config.argparsing import Parser
|
||||
from _pytest.fixtures import FixtureRequest
|
||||
|
||||
from tests.integration.challenges.challenge_decorator.challenge import Challenge
|
||||
from tests.integration.conftest import BASE_VCR_CONFIG
|
||||
from tests.vcr.vcr_filter import before_record_response
|
||||
from tests.challenges.challenge_decorator.challenge import Challenge
|
||||
from tests.vcr import BASE_VCR_CONFIG, before_record_response
|
||||
|
||||
|
||||
def before_record_response_filter_errors(
|
@ -7,10 +7,8 @@ from autogpt.agent import Agent
|
||||
from autogpt.commands.execute_code import execute_python_file
|
||||
from autogpt.commands.file_operations import append_to_file, write_to_file
|
||||
from autogpt.config import Config
|
||||
from tests.integration.challenges.challenge_decorator.challenge_decorator import (
|
||||
challenge,
|
||||
)
|
||||
from tests.integration.challenges.utils import run_interaction_loop
|
||||
from tests.challenges.challenge_decorator.challenge_decorator import challenge
|
||||
from tests.challenges.utils import run_interaction_loop
|
||||
from tests.utils import requires_api_key
|
||||
|
||||
CYCLE_COUNT = 5
|
@ -3,10 +3,8 @@ from pytest_mock import MockerFixture
|
||||
|
||||
from autogpt.commands.file_operations import read_file
|
||||
from autogpt.config import Config
|
||||
from tests.integration.challenges.challenge_decorator.challenge_decorator import (
|
||||
challenge,
|
||||
)
|
||||
from tests.integration.challenges.utils import run_interaction_loop
|
||||
from tests.challenges.challenge_decorator.challenge_decorator import challenge
|
||||
from tests.challenges.utils import run_interaction_loop
|
||||
from tests.utils import requires_api_key
|
||||
|
||||
CYCLE_COUNT = 3
|
@ -6,10 +6,8 @@ from pytest_mock import MockerFixture
|
||||
from autogpt.agent import Agent
|
||||
from autogpt.commands.file_operations import read_file
|
||||
from autogpt.config import Config
|
||||
from tests.integration.challenges.challenge_decorator.challenge_decorator import (
|
||||
challenge,
|
||||
)
|
||||
from tests.integration.challenges.utils import run_interaction_loop
|
||||
from tests.challenges.challenge_decorator.challenge_decorator import challenge
|
||||
from tests.challenges.utils import run_interaction_loop
|
||||
from tests.utils import requires_api_key
|
||||
|
||||
CYCLE_COUNT = 3
|
@ -5,10 +5,8 @@ from pytest_mock import MockerFixture
|
||||
from autogpt.agent import Agent
|
||||
from autogpt.commands.file_operations import read_file
|
||||
from autogpt.config import Config
|
||||
from tests.integration.challenges.challenge_decorator.challenge_decorator import (
|
||||
challenge,
|
||||
)
|
||||
from tests.integration.challenges.utils import run_interaction_loop
|
||||
from tests.challenges.challenge_decorator.challenge_decorator import challenge
|
||||
from tests.challenges.utils import run_interaction_loop
|
||||
from tests.utils import requires_api_key
|
||||
|
||||
CYCLE_COUNT = 3
|
@ -4,10 +4,8 @@ from pytest_mock import MockerFixture
|
||||
from autogpt.agent import Agent
|
||||
from autogpt.commands.file_operations import read_file, write_to_file
|
||||
from autogpt.config import Config
|
||||
from tests.integration.challenges.challenge_decorator.challenge_decorator import (
|
||||
challenge,
|
||||
)
|
||||
from tests.integration.challenges.utils import run_interaction_loop
|
||||
from tests.challenges.challenge_decorator.challenge_decorator import challenge
|
||||
from tests.challenges.utils import run_interaction_loop
|
||||
from tests.utils import requires_api_key
|
||||
|
||||
|
@ -4,10 +4,8 @@ from pytest_mock import MockerFixture
|
||||
from autogpt.agent import Agent
|
||||
from autogpt.commands.file_operations import read_file, write_to_file
|
||||
from autogpt.config import Config
|
||||
from tests.integration.challenges.challenge_decorator.challenge_decorator import (
|
||||
challenge,
|
||||
)
|
||||
from tests.integration.challenges.utils import generate_noise, run_interaction_loop
|
||||
from tests.challenges.challenge_decorator.challenge_decorator import challenge
|
||||
from tests.challenges.utils import generate_noise, run_interaction_loop
|
||||
from tests.utils import requires_api_key
|
||||
|
||||
NOISE = 1000
|
@ -4,10 +4,8 @@ from pytest_mock import MockerFixture
|
||||
from autogpt.agent import Agent
|
||||
from autogpt.commands.file_operations import read_file, write_to_file
|
||||
from autogpt.config import Config
|
||||
from tests.integration.challenges.challenge_decorator.challenge_decorator import (
|
||||
challenge,
|
||||
)
|
||||
from tests.integration.challenges.utils import generate_noise, run_interaction_loop
|
||||
from tests.challenges.challenge_decorator.challenge_decorator import challenge
|
||||
from tests.challenges.utils import generate_noise, run_interaction_loop
|
||||
from tests.utils import requires_api_key
|
||||
|
||||
NOISE = 1000
|
@ -26,12 +26,8 @@ def recursive_sort_dict(data: dict) -> dict:
|
||||
|
||||
|
||||
cwd = os.getcwd() # get current working directory
|
||||
new_score_filename_pattern = os.path.join(
|
||||
cwd, "tests/integration/challenges/new_score_*.json"
|
||||
)
|
||||
current_score_filename = os.path.join(
|
||||
cwd, "tests/integration/challenges/current_score.json"
|
||||
)
|
||||
new_score_filename_pattern = os.path.join(cwd, "tests/challenges/new_score_*.json")
|
||||
current_score_filename = os.path.join(cwd, "tests/challenges/current_score.json")
|
||||
|
||||
merged_data: Dict[str, Any] = {}
|
||||
for filename in glob.glob(new_score_filename_pattern):
|
@ -1,4 +1,3 @@
|
||||
import os
|
||||
from pathlib import Path
|
||||
|
||||
import pytest
|
||||
@ -8,15 +7,11 @@ from autogpt.config.config import Config
|
||||
from autogpt.llm.api_manager import ApiManager
|
||||
from autogpt.workspace import Workspace
|
||||
|
||||
pytest_plugins = ["tests.integration.agent_factory", "tests.integration.memory.utils"]
|
||||
|
||||
PROXY = os.environ.get("PROXY")
|
||||
|
||||
|
||||
@pytest.fixture()
|
||||
def vcr_cassette_dir(request):
|
||||
test_name = os.path.splitext(request.node.name)[0]
|
||||
return os.path.join("tests/Auto-GPT-test-cassettes", test_name)
|
||||
pytest_plugins = [
|
||||
"tests.integration.agent_factory",
|
||||
"tests.integration.memory.utils",
|
||||
"tests.vcr",
|
||||
]
|
||||
|
||||
|
||||
@pytest.fixture()
|
||||
|
@ -1,56 +0,0 @@
|
||||
import os
|
||||
|
||||
import openai.api_requestor
|
||||
import pytest
|
||||
from pytest_mock import MockerFixture
|
||||
|
||||
from tests.conftest import PROXY
|
||||
from tests.vcr.vcr_filter import before_record_request, before_record_response
|
||||
|
||||
BASE_VCR_CONFIG = {
|
||||
"record_mode": "new_episodes",
|
||||
"before_record_request": before_record_request,
|
||||
"before_record_response": before_record_response,
|
||||
"filter_headers": [
|
||||
"Authorization",
|
||||
"X-OpenAI-Client-User-Agent",
|
||||
"User-Agent",
|
||||
],
|
||||
"match_on": ["method", "body"],
|
||||
}
|
||||
|
||||
|
||||
@pytest.fixture(scope="session")
|
||||
def vcr_config():
|
||||
# this fixture is called by the pytest-recording vcr decorator.
|
||||
return BASE_VCR_CONFIG
|
||||
|
||||
|
||||
def patch_api_base(requestor):
|
||||
new_api_base = f"{PROXY}/v1"
|
||||
requestor.api_base = new_api_base
|
||||
return requestor
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def patched_api_requestor(mocker: MockerFixture):
|
||||
original_init = openai.api_requestor.APIRequestor.__init__
|
||||
original_validate_headers = openai.api_requestor.APIRequestor._validate_headers
|
||||
|
||||
def patched_init(requestor, *args, **kwargs):
|
||||
original_init(requestor, *args, **kwargs)
|
||||
patch_api_base(requestor)
|
||||
|
||||
def patched_validate_headers(self, supplied_headers):
|
||||
headers = original_validate_headers(self, supplied_headers)
|
||||
headers["AGENT-MODE"] = os.environ.get("AGENT_MODE")
|
||||
headers["AGENT-TYPE"] = os.environ.get("AGENT_TYPE")
|
||||
return headers
|
||||
|
||||
if PROXY:
|
||||
mocker.patch("openai.api_requestor.APIRequestor.__init__", new=patched_init)
|
||||
mocker.patch.object(
|
||||
openai.api_requestor.APIRequestor,
|
||||
"_validate_headers",
|
||||
new=patched_validate_headers,
|
||||
)
|
@ -1,32 +0,0 @@
|
||||
"""Unit tests for the commands module"""
|
||||
from unittest.mock import MagicMock, patch
|
||||
|
||||
import pytest
|
||||
|
||||
from autogpt.app import list_agents, start_agent
|
||||
from tests.utils import requires_api_key
|
||||
|
||||
|
||||
@pytest.mark.vcr
|
||||
@pytest.mark.integration_test
|
||||
@requires_api_key("OPENAI_API_KEY")
|
||||
def test_make_agent(patched_api_requestor, config) -> None:
|
||||
"""Test that an agent can be created"""
|
||||
# Use the mock agent manager to avoid creating a real agent
|
||||
with patch("openai.ChatCompletion.create") as mock:
|
||||
response = MagicMock()
|
||||
# del response.error
|
||||
response.choices[0].messages[0].content = "Test message"
|
||||
response.usage.prompt_tokens = 1
|
||||
response.usage.completion_tokens = 1
|
||||
mock.return_value = response
|
||||
start_agent(
|
||||
"Test Agent", "chat", "Hello, how are you?", config, "gpt-3.5-turbo"
|
||||
)
|
||||
agents = list_agents(config)
|
||||
assert "List of agents:\n0: chat" == agents
|
||||
start_agent(
|
||||
"Test Agent 2", "write", "Hello, how are you?", config, "gpt-3.5-turbo"
|
||||
)
|
||||
agents = list_agents(config)
|
||||
assert "List of agents:\n0: chat\n1: write" == agents
|
@ -1,74 +0,0 @@
|
||||
# Date: 2023-5-13
|
||||
# Author: Generated by GoCodeo.
|
||||
import pytest
|
||||
|
||||
from autogpt.commands.analyze_code import analyze_code
|
||||
from autogpt.config import Config
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def mock_call_ai_function(mocker):
|
||||
return mocker.patch("autogpt.commands.analyze_code.call_ai_function")
|
||||
|
||||
|
||||
class TestAnalyzeCode:
|
||||
def test_positive_analyze_code(self, mock_call_ai_function):
|
||||
# Positive Test
|
||||
mock_call_ai_function.return_value = ["Suggestion 1", "Suggestion 2"]
|
||||
code = "def example_function():\n pass"
|
||||
config = Config()
|
||||
result = analyze_code(code, config)
|
||||
assert result == ["Suggestion 1", "Suggestion 2"]
|
||||
mock_call_ai_function.assert_called_once_with(
|
||||
"def analyze_code(code: str) -> list[str]:",
|
||||
[code],
|
||||
"Analyzes the given code and returns a list of suggestions for improvements.",
|
||||
config=config,
|
||||
)
|
||||
|
||||
def test_negative_analyze_code(
|
||||
self,
|
||||
mock_call_ai_function,
|
||||
config: Config,
|
||||
):
|
||||
# Negative Test
|
||||
mock_call_ai_function.return_value = []
|
||||
code = "def example_function():\n pass"
|
||||
result = analyze_code(code, config)
|
||||
assert result == []
|
||||
mock_call_ai_function.assert_called_once_with(
|
||||
"def analyze_code(code: str) -> list[str]:",
|
||||
[code],
|
||||
"Analyzes the given code and returns a list of suggestions for improvements.",
|
||||
config=config,
|
||||
)
|
||||
|
||||
def test_error_analyze_code(self, mock_call_ai_function, config: Config):
|
||||
# Error Test
|
||||
mock_call_ai_function.side_effect = Exception("Error occurred")
|
||||
code = "def example_function():\n pass"
|
||||
with pytest.raises(Exception):
|
||||
result = analyze_code(code, config)
|
||||
mock_call_ai_function.assert_called_once_with(
|
||||
"def analyze_code(code: str) -> list[str]:",
|
||||
[code],
|
||||
"Analyzes the given code and returns a list of suggestions for improvements.",
|
||||
config=config,
|
||||
)
|
||||
|
||||
def test_edge_analyze_code_empty_code(
|
||||
self,
|
||||
mock_call_ai_function,
|
||||
config: Config,
|
||||
):
|
||||
# Edge Test
|
||||
mock_call_ai_function.return_value = ["Suggestion 1", "Suggestion 2"]
|
||||
code = ""
|
||||
result = analyze_code(code, config)
|
||||
assert result == ["Suggestion 1", "Suggestion 2"]
|
||||
mock_call_ai_function.assert_called_once_with(
|
||||
"def analyze_code(code: str) -> list[str]:",
|
||||
[code],
|
||||
"Analyzes the given code and returns a list of suggestions for improvements.",
|
||||
config=config,
|
||||
)
|
@ -1,56 +0,0 @@
|
||||
# Date: 2023-5-13
|
||||
# Author: Generated by GoCodeo.
|
||||
import json
|
||||
from unittest.mock import MagicMock, patch
|
||||
|
||||
import pytest
|
||||
|
||||
from autogpt.commands.audio_text import read_audio
|
||||
|
||||
|
||||
class TestReadAudio:
|
||||
@patch("requests.post")
|
||||
def test_positive_read_audio(self, mock_post, config):
|
||||
# Positive Test
|
||||
audio_data = b"test_audio_data"
|
||||
mock_response = MagicMock()
|
||||
mock_response.content.decode.return_value = json.dumps(
|
||||
{"text": "Hello, world!"}
|
||||
)
|
||||
mock_post.return_value = mock_response
|
||||
|
||||
config.huggingface_api_token = "testing-token"
|
||||
result = read_audio(audio_data, config)
|
||||
assert result == "The audio says: Hello, world!"
|
||||
mock_post.assert_called_once_with(
|
||||
f"https://api-inference.huggingface.co/models/{config.huggingface_audio_to_text_model}",
|
||||
headers={"Authorization": f"Bearer {config.huggingface_api_token}"},
|
||||
data=audio_data,
|
||||
)
|
||||
|
||||
@patch("requests.post")
|
||||
def test_negative_read_audio(self, mock_post, config):
|
||||
# Negative Test
|
||||
audio_data = b"test_audio_data"
|
||||
mock_response = MagicMock()
|
||||
mock_response.content.decode.return_value = json.dumps({"text": ""})
|
||||
mock_post.return_value = mock_response
|
||||
config.huggingface_api_token = "testing-token"
|
||||
result = read_audio(audio_data, config)
|
||||
assert result == "The audio says: "
|
||||
mock_post.assert_called_once_with(
|
||||
f"https://api-inference.huggingface.co/models/{config.huggingface_audio_to_text_model}",
|
||||
headers={"Authorization": f"Bearer {config.huggingface_api_token}"},
|
||||
data=audio_data,
|
||||
)
|
||||
|
||||
def test_error_read_audio(self, config):
|
||||
# Error Test
|
||||
config.huggingface_api_token = None
|
||||
with pytest.raises(ValueError):
|
||||
read_audio(b"test_audio_data", config)
|
||||
|
||||
def test_edge_read_audio_empty_audio(self, config):
|
||||
# Edge Test
|
||||
with pytest.raises(ValueError):
|
||||
read_audio(b"", config)
|
@ -1,55 +0,0 @@
|
||||
# Date: 2023-5-13
|
||||
# Author: Generated by GoCodeo.
|
||||
|
||||
|
||||
from unittest.mock import mock_open, patch
|
||||
|
||||
import pytest
|
||||
|
||||
from autogpt.commands.audio_text import read_audio_from_file
|
||||
from autogpt.config import Config
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def mock_read_audio(mocker):
|
||||
return mocker.patch("autogpt.commands.audio_text.read_audio")
|
||||
|
||||
|
||||
class TestReadAudioFromFile:
|
||||
def test_positive_read_audio_from_file(self, mock_read_audio):
|
||||
# Positive test
|
||||
mock_read_audio.return_value = "This is a sample text."
|
||||
mock_file_data = b"Audio data"
|
||||
m = mock_open(read_data=mock_file_data)
|
||||
|
||||
with patch("builtins.open", m):
|
||||
result = read_audio_from_file("test_audio.wav", Config())
|
||||
assert result == "This is a sample text."
|
||||
m.assert_called_once_with("test_audio.wav", "rb")
|
||||
|
||||
def test_negative_read_audio_from_file(self, mock_read_audio):
|
||||
# Negative test
|
||||
mock_read_audio.return_value = "This is a sample text."
|
||||
mock_file_data = b"Audio data"
|
||||
m = mock_open(read_data=mock_file_data)
|
||||
|
||||
with patch("builtins.open", m):
|
||||
result = read_audio_from_file("test_audio.wav", Config())
|
||||
assert result != "Incorrect text."
|
||||
m.assert_called_once_with("test_audio.wav", "rb")
|
||||
|
||||
def test_error_read_audio_from_file(self):
|
||||
# Error test
|
||||
with pytest.raises(FileNotFoundError):
|
||||
read_audio_from_file("non_existent_file.wav", Config())
|
||||
|
||||
def test_edge_empty_audio_file(self, mock_read_audio):
|
||||
# Edge test
|
||||
mock_read_audio.return_value = ""
|
||||
mock_file_data = b""
|
||||
m = mock_open(read_data=mock_file_data)
|
||||
|
||||
with patch("builtins.open", m):
|
||||
result = read_audio_from_file("empty_audio.wav", Config())
|
||||
assert result == ""
|
||||
m.assert_called_once_with("empty_audio.wav", "rb")
|
24
tests/unit/test_make_agent.py
Normal file
24
tests/unit/test_make_agent.py
Normal file
@ -0,0 +1,24 @@
|
||||
from unittest.mock import MagicMock
|
||||
|
||||
from pytest_mock import MockerFixture
|
||||
|
||||
from autogpt.app import list_agents, start_agent
|
||||
from autogpt.config import Config
|
||||
|
||||
|
||||
def test_make_agent(config: Config, mocker: MockerFixture) -> None:
|
||||
"""Test that an agent can be created"""
|
||||
mock = mocker.patch("openai.ChatCompletion.create")
|
||||
|
||||
response = MagicMock()
|
||||
# del response.error
|
||||
response.choices[0].messages[0].content = "Test message"
|
||||
response.usage.prompt_tokens = 1
|
||||
response.usage.completion_tokens = 1
|
||||
mock.return_value = response
|
||||
start_agent("Test Agent", "chat", "Hello, how are you?", config, "gpt-3.5-turbo")
|
||||
agents = list_agents(config)
|
||||
assert "List of agents:\n0: chat" == agents
|
||||
start_agent("Test Agent 2", "write", "Hello, how are you?", config, "gpt-3.5-turbo")
|
||||
agents = list_agents(config)
|
||||
assert "List of agents:\n0: chat\n1: write" == agents
|
@ -49,25 +49,17 @@ def test_url_validation_succeeds(url):
|
||||
assert dummy_method(url) == url
|
||||
|
||||
|
||||
bad_protocol_data = (
|
||||
("htt://example.com"),
|
||||
("httppp://example.com"),
|
||||
(" https://example.com"),
|
||||
@pytest.mark.parametrize(
|
||||
"url,expected_error",
|
||||
[
|
||||
("htt://example.com", "Invalid URL format"),
|
||||
("httppp://example.com", "Invalid URL format"),
|
||||
(" https://example.com", "Invalid URL format"),
|
||||
("http://?query=q", "Missing Scheme or Network location"),
|
||||
],
|
||||
)
|
||||
|
||||
|
||||
@pytest.mark.parametrize("url", bad_protocol_data)
|
||||
def test_url_validation_fails_bad_protocol(url):
|
||||
with raises(ValueError, match="Invalid URL format"):
|
||||
dummy_method(url)
|
||||
|
||||
|
||||
missing_loc = (("http://?query=q"),)
|
||||
|
||||
|
||||
@pytest.mark.parametrize("url", missing_loc)
|
||||
def test_url_validation_fails_bad_protocol(url):
|
||||
with raises(ValueError, match="Missing Scheme or Network location"):
|
||||
def test_url_validation_fails_invalid_url(url, expected_error):
|
||||
with raises(ValueError, match=expected_error):
|
||||
dummy_method(url)
|
||||
|
||||
|
||||
|
@ -1,7 +1,6 @@
|
||||
import os
|
||||
from unittest.mock import patch
|
||||
|
||||
import pytest
|
||||
import requests
|
||||
|
||||
from autogpt.utils import (
|
||||
@ -151,7 +150,3 @@ def test_get_current_git_branch_failure(mock_repo):
|
||||
branch_name = get_current_git_branch()
|
||||
|
||||
assert branch_name == ""
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
pytest.main()
|
@ -0,0 +1,61 @@
|
||||
import os
|
||||
|
||||
import openai.api_requestor
|
||||
import pytest
|
||||
from pytest_mock import MockerFixture
|
||||
|
||||
from .vcr_filter import PROXY, before_record_request, before_record_response
|
||||
|
||||
BASE_VCR_CONFIG = {
|
||||
"record_mode": "new_episodes",
|
||||
"before_record_request": before_record_request,
|
||||
"before_record_response": before_record_response,
|
||||
"filter_headers": [
|
||||
"Authorization",
|
||||
"X-OpenAI-Client-User-Agent",
|
||||
"User-Agent",
|
||||
],
|
||||
"match_on": ["method", "body"],
|
||||
}
|
||||
|
||||
|
||||
@pytest.fixture(scope="session")
|
||||
def vcr_config():
|
||||
# this fixture is called by the pytest-recording vcr decorator.
|
||||
return BASE_VCR_CONFIG
|
||||
|
||||
|
||||
@pytest.fixture()
|
||||
def vcr_cassette_dir(request):
|
||||
test_name = os.path.splitext(request.node.name)[0]
|
||||
return os.path.join("tests/Auto-GPT-test-cassettes", test_name)
|
||||
|
||||
|
||||
def patch_api_base(requestor):
|
||||
new_api_base = f"{PROXY}/v1"
|
||||
requestor.api_base = new_api_base
|
||||
return requestor
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def patched_api_requestor(mocker: MockerFixture):
|
||||
original_init = openai.api_requestor.APIRequestor.__init__
|
||||
original_validate_headers = openai.api_requestor.APIRequestor._validate_headers
|
||||
|
||||
def patched_init(requestor, *args, **kwargs):
|
||||
original_init(requestor, *args, **kwargs)
|
||||
patch_api_base(requestor)
|
||||
|
||||
def patched_validate_headers(self, supplied_headers):
|
||||
headers = original_validate_headers(self, supplied_headers)
|
||||
headers["AGENT-MODE"] = os.environ.get("AGENT_MODE")
|
||||
headers["AGENT-TYPE"] = os.environ.get("AGENT_TYPE")
|
||||
return headers
|
||||
|
||||
if PROXY:
|
||||
mocker.patch("openai.api_requestor.APIRequestor.__init__", new=patched_init)
|
||||
mocker.patch.object(
|
||||
openai.api_requestor.APIRequestor,
|
||||
"_validate_headers",
|
||||
new=patched_validate_headers,
|
||||
)
|
@ -1,8 +1,9 @@
|
||||
import json
|
||||
import os
|
||||
import re
|
||||
from typing import Any, Dict, List
|
||||
|
||||
from tests.conftest import PROXY
|
||||
PROXY = os.environ.get("PROXY")
|
||||
|
||||
REPLACEMENTS: List[Dict[str, str]] = [
|
||||
{
|
||||
|
Loading…
Reference in New Issue
Block a user