From 4ada7d1784e49ecaa8cd831d5676239501cdc41d Mon Sep 17 00:00:00 2001 From: merwanehamadi Date: Sat, 22 Jul 2023 14:58:46 -0700 Subject: [PATCH 01/24] Integrate Auto-GPT with Auto-GPT-Benchmarks (#4987) * WIP Signed-off-by: Merwane Hamadi * WIP Signed-off-by: Merwane Hamadi * Update config for benchmark changes (#4883) * Add Helicone * Add reports, consolidate, update benchmark files (#4941) * updating config * add reports, consolidate, update benchmark files * Update benchmarks.py * Change entrypath and add __init__.py * Remove Helicone integration because we now have proxy at the system level * Support more regression tests * Fix Auto-GPT/benchmark integration Signed-off-by: Merwane Hamadi * Remove cutoff * Install agbenchmark and make continuous mode dynamic Signed-off-by: Merwane Hamadi --------- Signed-off-by: Merwane Hamadi Co-authored-by: Silen Naihin --- agbenchmark/__init__.py | 0 benchmarks.py => agbenchmark/benchmarks.py | 22 +++++++++++++++----- agbenchmark/config.json | 4 ++++ agbenchmark/regression_tests.json | 24 ++++++++++++++++++++++ requirements.txt | 5 +++-- tests/challenges/utils.py | 4 ++-- 6 files changed, 50 insertions(+), 9 deletions(-) create mode 100644 agbenchmark/__init__.py rename benchmarks.py => agbenchmark/benchmarks.py (73%) create mode 100644 agbenchmark/config.json create mode 100644 agbenchmark/regression_tests.json diff --git a/agbenchmark/__init__.py b/agbenchmark/__init__.py new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/benchmarks.py b/agbenchmark/benchmarks.py similarity index 73% rename from benchmarks.py rename to agbenchmark/benchmarks.py index 589b3f7571c9..e8675f92a244 100644 --- a/benchmarks.py +++ b/agbenchmark/benchmarks.py @@ -1,4 +1,6 @@ +import sys from pathlib import Path +from typing import Tuple from autogpt.agents import Agent from autogpt.app.main import run_interaction_loop @@ -12,14 +14,15 @@ PROJECT_DIR = Path().resolve() -def run_task(task) -> None: - agent = bootstrap_agent(task) +def run_specific_agent(task, continuous_mode=False) -> Tuple[str, int]: + agent = bootstrap_agent(task, continuous_mode) run_interaction_loop(agent) -def bootstrap_agent(task): +def bootstrap_agent(task, continuous_mode) -> Agent: config = ConfigBuilder.build_config_from_env(workdir=PROJECT_DIR) - config.continuous_mode = False + config.debug_mode = True + config.continuous_mode = continuous_mode config.temperature = 0 config.plain_output = True command_registry = get_command_registry(config) @@ -29,7 +32,7 @@ def bootstrap_agent(task): ai_config = AIConfig( ai_name="Auto-GPT", ai_role="a multi-purpose AI assistant.", - ai_goals=[task.user_input], + ai_goals=[task], ) ai_config.command_registry = command_registry return Agent( @@ -50,3 +53,12 @@ def get_command_registry(config: Config): for command_category in enabled_command_categories: command_registry.import_commands(command_category) return command_registry + + +if __name__ == "__main__": + # The first argument is the script name itself, second is the task + if len(sys.argv) != 2: + print("Usage: python script.py ") + sys.exit(1) + task = sys.argv[1] + run_specific_agent(task, continuous_mode=True) diff --git a/agbenchmark/config.json b/agbenchmark/config.json new file mode 100644 index 000000000000..dd957f9ea1b1 --- /dev/null +++ b/agbenchmark/config.json @@ -0,0 +1,4 @@ +{ + "workspace": "autogpt/workspace/auto_gpt_workspace", + "entry_path": "agbenchmark.benchmarks" +} diff --git a/agbenchmark/regression_tests.json b/agbenchmark/regression_tests.json new file mode 100644 index 000000000000..8d59b1a4f24b --- /dev/null +++ b/agbenchmark/regression_tests.json @@ -0,0 +1,24 @@ +{ + "TestBasicCodeGeneration": { + "difficulty": "basic", + "dependencies": [ + "TestWriteFile" + ], + "data_path": "agbenchmark/challenges/code/d3" + }, + "TestBasicMemory": { + "difficulty": "basic", + "data_path": "agbenchmark/challenges/memory/m1" + }, + "TestReadFile": { + "difficulty": "basic", + "dependencies": [ + "TestWriteFile" + ], + "data_path": "agbenchmark/challenges/interface/read_file" + }, + "TestWriteFile": { + "dependencies": [], + "data_path": "agbenchmark/challenges/interface/write_file" + } +} diff --git a/requirements.txt b/requirements.txt index 4af8bccd913d..c150c2625d95 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,9 +1,9 @@ beautifulsoup4>=4.12.2 colorama==0.4.6 distro==1.8.0 -openai==0.27.2 +openai==0.27.8 playsound==1.2.2 -python-dotenv==1.0.0 +python-dotenv==0.21 pyyaml==6.0 PyPDF2 python-docx @@ -31,6 +31,7 @@ en-core-web-sm @ https://github.com/explosion/spacy-models/releases/download/en_ prompt_toolkit>=3.0.38 pydantic inflection +agbenchmark # web server fastapi diff --git a/tests/challenges/utils.py b/tests/challenges/utils.py index 9d1b76e7f09c..dd661b6e3fc6 100644 --- a/tests/challenges/utils.py +++ b/tests/challenges/utils.py @@ -6,9 +6,9 @@ import pytest +from agbenchmark.benchmarks import run_specific_agent from autogpt.logs import LogCycleHandler from autogpt.workspace import Workspace -from benchmarks import run_task from tests.challenges.schema import Task @@ -75,4 +75,4 @@ def run_challenge( setup_mock_log_cycle_agent_name(monkeypatch, challenge_name, level_to_run) task = Task(user_input=user_input) with contextlib.suppress(SystemExit): - run_task(task) + run_specific_agent(task.user_input) From 7b4cd8c86a6783156b87ccaedf84939fe9595483 Mon Sep 17 00:00:00 2001 From: Tomas Valenta Date: Mon, 24 Jul 2023 05:57:47 +0200 Subject: [PATCH 02/24] Add API via agent-protocol SDK (#5044) * Add API via agent-protocol * Fix linter formatting errors --- autogpt/core/runner/cli_web_app/cli.py | 53 +------ .../runner/cli_web_app/client/__init__.py | 0 .../core/runner/cli_web_app/client/client.py | 16 --- autogpt/core/runner/cli_web_app/server/api.py | 130 +++++++++++++----- .../core/runner/cli_web_app/server/schema.py | 36 ----- .../cli_web_app/server/services/__init__.py | 0 .../cli_web_app/server/services/users.py | 20 --- requirements.txt | 1 + 8 files changed, 104 insertions(+), 152 deletions(-) delete mode 100644 autogpt/core/runner/cli_web_app/client/__init__.py delete mode 100644 autogpt/core/runner/cli_web_app/client/client.py delete mode 100644 autogpt/core/runner/cli_web_app/server/schema.py delete mode 100644 autogpt/core/runner/cli_web_app/server/services/__init__.py delete mode 100644 autogpt/core/runner/cli_web_app/server/services/users.py diff --git a/autogpt/core/runner/cli_web_app/cli.py b/autogpt/core/runner/cli_web_app/cli.py index 6600b8e1d514..ecb423247312 100644 --- a/autogpt/core/runner/cli_web_app/cli.py +++ b/autogpt/core/runner/cli_web_app/cli.py @@ -1,14 +1,8 @@ -import contextlib import pathlib -import shlex -import subprocess -import sys -import time import click -import requests -import uvicorn import yaml +from agent_protocol import Agent from autogpt.core.runner.client_lib.shared_click_commands import ( DEFAULT_SETTINGS_FILE, @@ -29,13 +23,6 @@ def autogpt(): @autogpt.command() -@click.option( - "host", - "--host", - default="localhost", - help="The host for the webserver.", - type=click.STRING, -) @click.option( "port", "--port", @@ -43,16 +30,11 @@ def autogpt(): help="The port of the webserver.", type=click.INT, ) -def server(host: str, port: int) -> None: +def server(port: int) -> None: """Run the Auto-GPT runner httpserver.""" click.echo("Running Auto-GPT runner httpserver...") - uvicorn.run( - "autogpt.core.runner.cli_web_app.server.api:app", - workers=1, - host=host, - port=port, - reload=True, - ) + port = 8080 + Agent.start(port) @autogpt.command() @@ -69,32 +51,7 @@ async def client(settings_file) -> None: if settings_file.exists(): settings = yaml.safe_load(settings_file.read_text()) - from autogpt.core.runner.cli_web_app.client.client import run - - with autogpt_server(): - run() - - -@contextlib.contextmanager -def autogpt_server(): - host = "localhost" - port = 8080 - cmd = shlex.split( - f"{sys.executable} autogpt/core/runner/cli_web_app/cli.py server --host {host} --port {port}" - ) - server_process = subprocess.Popen( - args=cmd, - ) - started = False - - while not started: - try: - requests.get(f"http://{host}:{port}") - started = True - except requests.exceptions.ConnectionError: - time.sleep(0.2) - yield server_process - server_process.terminate() + # TODO: Call the API server with the settings and task, using the Python API client for agent protocol. if __name__ == "__main__": diff --git a/autogpt/core/runner/cli_web_app/client/__init__.py b/autogpt/core/runner/cli_web_app/client/__init__.py deleted file mode 100644 index e69de29bb2d1..000000000000 diff --git a/autogpt/core/runner/cli_web_app/client/client.py b/autogpt/core/runner/cli_web_app/client/client.py deleted file mode 100644 index 346203f7ca42..000000000000 --- a/autogpt/core/runner/cli_web_app/client/client.py +++ /dev/null @@ -1,16 +0,0 @@ -import json - -import requests - - -def run(): - body = json.dumps( - {"ai_name": "HelloBot", "ai_role": "test", "ai_goals": ["goal1", "goal2"]} - ) - - header = {"Content-Type": "application/json", "openai_api_key": "asdf"} - print("Sending: ", header, body) - response = requests.post( - "http://localhost:8080/api/v1/agents", data=body, headers=header - ) - print(response.content.decode("utf-8")) diff --git a/autogpt/core/runner/cli_web_app/server/api.py b/autogpt/core/runner/cli_web_app/server/api.py index 01c50b06d187..90f63e5ad30b 100644 --- a/autogpt/core/runner/cli_web_app/server/api.py +++ b/autogpt/core/runner/cli_web_app/server/api.py @@ -1,48 +1,114 @@ -import uuid +from pathlib import Path -from fastapi import APIRouter, FastAPI, Request +from agent_protocol import Agent as AgentProtocol +from agent_protocol import StepHandler, StepResult +from colorama import Fore -from autogpt.core.runner.cli_web_app.server.schema import InteractRequestBody +from autogpt.agents import Agent +from autogpt.app.main import UserFeedback +from autogpt.commands import COMMAND_CATEGORIES +from autogpt.config import AIConfig, Config, ConfigBuilder +from autogpt.logs import logger +from autogpt.memory.vector import get_memory +from autogpt.models.command_registry import CommandRegistry +from autogpt.prompts.prompt import DEFAULT_TRIGGERING_PROMPT +from autogpt.workspace import Workspace -router = APIRouter() +PROJECT_DIR = Path().resolve() -@router.post("/agents") -async def create_agent(request: Request): - """Create a new agent.""" - agent_id = uuid.uuid4().hex - return {"agent_id": agent_id} +async def task_handler(task_input) -> StepHandler: + agent = bootstrap_agent(task_input) + next_command_name: str | None + next_command_args: dict[str, str] | None -@router.post("/agents/{agent_id}") -async def interact(request: Request, agent_id: str, body: InteractRequestBody): - """Interact with an agent.""" + async def step_handler(step_input) -> StepResult: + result = await interaction_step( + agent, + step_input["user_input"], + step_input["user_feedback"], + next_command_name, + next_command_args, + ) - # check headers + nonlocal next_command_name, next_command_args + next_command_name = result["next_step_command_name"] if result else None + next_command_args = result["next_step_command_args"] if result else None - # check if agent_id exists + if not result: + return StepResult(output=None, is_last=True) + return StepResult(output=result) - # get agent object from somewhere, e.g. a database/disk/global dict + return step_handler - # continue agent interaction with user input + +async def interaction_step( + agent: Agent, + user_input, + user_feedback: UserFeedback | None, + command_name: str | None, + command_args: dict[str, str] | None, +): + """Run one step of the interaction loop.""" + if user_feedback == UserFeedback.EXIT: + return + if user_feedback == UserFeedback.TEXT: + command_name = "human_feedback" + + result: str | None = None + + if command_name is not None: + result = agent.execute(command_name, command_args, user_input) + if result is None: + logger.typewriter_log("SYSTEM: ", Fore.YELLOW, "Unable to execute command") + return + + next_command_name, next_command_args, assistant_reply_dict = agent.think() return { - "thoughts": { - "thoughts": { - "text": "text", - "reasoning": "reasoning", - "plan": "plan", - "criticism": "criticism", - "speak": "speak", - }, - "commands": { - "name": "name", - "args": {"arg_1": "value_1", "arg_2": "value_2"}, - }, - }, - "messages": ["message1", agent_id], + "config": agent.config, + "ai_config": agent.ai_config, + "result": result, + "assistant_reply_dict": assistant_reply_dict, + "next_step_command_name": next_command_name, + "next_step_command_args": next_command_args, } -app = FastAPI() -app.include_router(router, prefix="/api/v1") +def bootstrap_agent(task): + config = ConfigBuilder.build_config_from_env(workdir=PROJECT_DIR) + config.continuous_mode = False + config.temperature = 0 + config.plain_output = True + command_registry = get_command_registry(config) + config.memory_backend = "no_memory" + Workspace.set_workspace_directory(config) + Workspace.build_file_logger_path(config, config.workspace_path) + ai_config = AIConfig( + ai_name="Auto-GPT", + ai_role="a multi-purpose AI assistant.", + ai_goals=[task.user_input], + ) + ai_config.command_registry = command_registry + return Agent( + memory=get_memory(config), + command_registry=command_registry, + ai_config=ai_config, + config=config, + triggering_prompt=DEFAULT_TRIGGERING_PROMPT, + workspace_directory=str(config.workspace_path), + ) + + +def get_command_registry(config: Config): + command_registry = CommandRegistry() + enabled_command_categories = [ + x for x in COMMAND_CATEGORIES if x not in config.disabled_command_categories + ] + for command_category in enabled_command_categories: + command_registry.import_commands(command_category) + return command_registry + + +AgentProtocol.handle_task(task_handler) diff --git a/autogpt/core/runner/cli_web_app/server/schema.py b/autogpt/core/runner/cli_web_app/server/schema.py deleted file mode 100644 index 272fbc78e37a..000000000000 --- a/autogpt/core/runner/cli_web_app/server/schema.py +++ /dev/null @@ -1,36 +0,0 @@ -from uuid import UUID - -from pydantic import BaseModel, validator - - -class AgentInfo(BaseModel): - id: UUID = None - objective: str = "" - name: str = "" - role: str = "" - goals: list[str] = [] - - -class AgentConfiguration(BaseModel): - """Configuration for creation of a new agent.""" - - # We'll want to get this schema from the configuration, so it needs to be dynamic. - user_configuration: dict - agent_goals: AgentInfo - - @validator("agent_goals") - def only_objective_or_name_role_goals(cls, agent_goals): - goals_specification = [agent_goals.name, agent_goals.role, agent_goals.goals] - if agent_goals.objective and any(goals_specification): - raise ValueError("Cannot specify both objective and name, role, or goals") - if not agent_goals.objective and not all(goals_specification): - raise ValueError("Must specify either objective or name, role, and goals") - - -class InteractRequestBody(BaseModel): - user_input: str = "" - - -class InteractResponseBody(BaseModel): - thoughts: dict[str, str] # TBD - messages: list[str] # for example diff --git a/autogpt/core/runner/cli_web_app/server/services/__init__.py b/autogpt/core/runner/cli_web_app/server/services/__init__.py deleted file mode 100644 index e69de29bb2d1..000000000000 diff --git a/autogpt/core/runner/cli_web_app/server/services/users.py b/autogpt/core/runner/cli_web_app/server/services/users.py deleted file mode 100644 index 5192dcdb2ed5..000000000000 --- a/autogpt/core/runner/cli_web_app/server/services/users.py +++ /dev/null @@ -1,20 +0,0 @@ -import uuid - -from fastapi import Request - - -class UserService: - def __init__(self): - self.users = {} - - def get_user_id(self, request: Request) -> uuid.UUID: - # TODO: something real. I don't know how this works. - hostname = request.client.host - port = request.client.port - user = f"{hostname}:{port}" - if user not in self.users: - self.users[user] = uuid.uuid4() - return self.users[user] - - -USER_SERVICE = UserService() diff --git a/requirements.txt b/requirements.txt index c150c2625d95..e401e26685aa 100644 --- a/requirements.txt +++ b/requirements.txt @@ -32,6 +32,7 @@ prompt_toolkit>=3.0.38 pydantic inflection agbenchmark +agent-protocol>=0.1.1 # web server fastapi From 60d0f5edace0e4594d7e3b5b9ff59c1aca4636ef Mon Sep 17 00:00:00 2001 From: merwanehamadi Date: Sun, 23 Jul 2023 21:28:12 -0700 Subject: [PATCH 03/24] Fix workspace crashing (#5041) Signed-off-by: Merwane Hamadi --- agbenchmark/benchmarks.py | 5 ++--- autogpt/agents/agent.py | 4 +--- autogpt/app/main.py | 7 ++++--- autogpt/workspace/workspace.py | 8 ++++---- docs/challenges/building_challenges.md | 1 - tests/conftest.py | 1 - tests/integration/agent_factory.py | 1 - tests/unit/test_message_history.py | 2 -- 8 files changed, 11 insertions(+), 18 deletions(-) diff --git a/agbenchmark/benchmarks.py b/agbenchmark/benchmarks.py index e8675f92a244..ea884b3aa501 100644 --- a/agbenchmark/benchmarks.py +++ b/agbenchmark/benchmarks.py @@ -27,8 +27,8 @@ def bootstrap_agent(task, continuous_mode) -> Agent: config.plain_output = True command_registry = get_command_registry(config) config.memory_backend = "no_memory" - Workspace.set_workspace_directory(config) - Workspace.build_file_logger_path(config, config.workspace_path) + config.workspace_path = Workspace.set_workspace_directory(config) + config.file_logger_path = Workspace.build_file_logger_path(config.workspace_path) ai_config = AIConfig( ai_name="Auto-GPT", ai_role="a multi-purpose AI assistant.", @@ -41,7 +41,6 @@ def bootstrap_agent(task, continuous_mode) -> Agent: ai_config=ai_config, config=config, triggering_prompt=DEFAULT_TRIGGERING_PROMPT, - workspace_directory=str(config.workspace_path), ) diff --git a/autogpt/agents/agent.py b/autogpt/agents/agent.py index f3fee609ca00..93d3de86570c 100644 --- a/autogpt/agents/agent.py +++ b/autogpt/agents/agent.py @@ -3,7 +3,6 @@ import json import time from datetime import datetime -from pathlib import Path from typing import TYPE_CHECKING, Any, Optional if TYPE_CHECKING: @@ -37,7 +36,6 @@ def __init__( command_registry: CommandRegistry, memory: VectorMemory, triggering_prompt: str, - workspace_directory: str | Path, config: Config, cycle_budget: Optional[int] = None, ): @@ -52,7 +50,7 @@ def __init__( self.memory = memory """VectorMemoryProvider used to manage the agent's context (TODO)""" - self.workspace = Workspace(workspace_directory, config.restrict_to_workspace) + self.workspace = Workspace(config.workspace_path, config.restrict_to_workspace) """Workspace that the agent has access to, e.g. for reading/writing files.""" self.created_at = datetime.now().strftime("%Y%m%d_%H%M%S") diff --git a/autogpt/app/main.py b/autogpt/app/main.py index a3c7d1d8f691..fa61eeaf7104 100644 --- a/autogpt/app/main.py +++ b/autogpt/app/main.py @@ -126,10 +126,12 @@ def run_auto_gpt( # TODO: have this directory live outside the repository (e.g. in a user's # home directory) and have it come in as a command line argument or part of # the env file. - Workspace.set_workspace_directory(config, workspace_directory) + config.workspace_path = Workspace.set_workspace_directory( + config, workspace_directory + ) # HACK: doing this here to collect some globals that depend on the workspace. - Workspace.build_file_logger_path(config, config.workspace_path) + config.file_logger_path = Workspace.build_file_logger_path(config.workspace_path) config.plugins = scan_plugins(config, config.debug_mode) # Create a CommandRegistry instance and scan default folder @@ -192,7 +194,6 @@ def run_auto_gpt( memory=memory, command_registry=command_registry, triggering_prompt=DEFAULT_TRIGGERING_PROMPT, - workspace_directory=workspace_directory, ai_config=ai_config, config=config, ) diff --git a/autogpt/workspace/workspace.py b/autogpt/workspace/workspace.py index e580d4c4c8a2..510d18a4f7e0 100644 --- a/autogpt/workspace/workspace.py +++ b/autogpt/workspace/workspace.py @@ -144,21 +144,21 @@ def _sanitize_path( return full_path @staticmethod - def build_file_logger_path(config: Config, workspace_directory: Path): + def build_file_logger_path(workspace_directory: Path) -> str: file_logger_path = workspace_directory / "file_logger.txt" if not file_logger_path.exists(): with file_logger_path.open(mode="w", encoding="utf-8") as f: f.write("File Operation Logger ") - config.file_logger_path = str(file_logger_path) + return str(file_logger_path) @staticmethod def set_workspace_directory( config: Config, workspace_directory: Optional[str | Path] = None - ) -> None: + ) -> Path: if workspace_directory is None: workspace_directory = config.workdir / "auto_gpt_workspace" elif type(workspace_directory) == str: workspace_directory = Path(workspace_directory) # TODO: pass in the ai_settings file and the env file and have them cloned into # the workspace directory so we can bind them to the agent. - config.workspace_path = Workspace.make_workspace(workspace_directory) + return Workspace.make_workspace(workspace_directory) diff --git a/docs/challenges/building_challenges.md b/docs/challenges/building_challenges.md index a4d0fa0827f6..9caf5cdd2758 100644 --- a/docs/challenges/building_challenges.md +++ b/docs/challenges/building_challenges.md @@ -59,7 +59,6 @@ def kubernetes_agent( config=ai_config, next_action_count=0, triggering_prompt=DEFAULT_TRIGGERING_PROMPT, - workspace_directory=workspace.root, ) return agent diff --git a/tests/conftest.py b/tests/conftest.py index 2becc8bf4443..1efd620475da 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -103,5 +103,4 @@ def agent(config: Config, workspace: Workspace) -> Agent: ai_config=ai_config, config=config, triggering_prompt=DEFAULT_TRIGGERING_PROMPT, - workspace_directory=workspace.root, ) diff --git a/tests/integration/agent_factory.py b/tests/integration/agent_factory.py index 89e3b763db24..45d195e3188a 100644 --- a/tests/integration/agent_factory.py +++ b/tests/integration/agent_factory.py @@ -38,7 +38,6 @@ def dummy_agent(config: Config, memory_json_file, workspace: Workspace): ai_config=ai_config, config=config, triggering_prompt="dummy triggering prompt", - workspace_directory=workspace.root, ) return agent diff --git a/tests/unit/test_message_history.py b/tests/unit/test_message_history.py index e434f9d5d08b..08a3a24bd334 100644 --- a/tests/unit/test_message_history.py +++ b/tests/unit/test_message_history.py @@ -19,7 +19,6 @@ def agent(config: Config): command_registry = MagicMock() ai_config = AIConfig(ai_name="Test AI") triggering_prompt = "Triggering prompt" - workspace_directory = "workspace_directory" agent = Agent( memory=memory, @@ -27,7 +26,6 @@ def agent(config: Config): ai_config=ai_config, config=config, triggering_prompt=triggering_prompt, - workspace_directory=workspace_directory, ) return agent From abed282db34a6260d92f6e0e0a9c63792734c6b1 Mon Sep 17 00:00:00 2001 From: Tomas Valenta Date: Mon, 24 Jul 2023 17:34:24 +0200 Subject: [PATCH 04/24] Fix runtime error in the API (#5047) * Add API via agent-protocol * Fix linter formatting errors * Fix API runtime errors; Improve API initialization * Fix formatting linter erorrs --- autogpt/core/runner/cli_web_app/cli.py | 8 ++--- autogpt/core/runner/cli_web_app/server/api.py | 32 +++++++++---------- 2 files changed, 19 insertions(+), 21 deletions(-) diff --git a/autogpt/core/runner/cli_web_app/cli.py b/autogpt/core/runner/cli_web_app/cli.py index ecb423247312..e933739b242f 100644 --- a/autogpt/core/runner/cli_web_app/cli.py +++ b/autogpt/core/runner/cli_web_app/cli.py @@ -2,12 +2,12 @@ import click import yaml -from agent_protocol import Agent +from agent_protocol import Agent as AgentProtocol +from autogpt.core.runner.cli_web_app.server.api import task_handler from autogpt.core.runner.client_lib.shared_click_commands import ( DEFAULT_SETTINGS_FILE, make_settings, - status, ) from autogpt.core.runner.client_lib.utils import coroutine @@ -19,7 +19,6 @@ def autogpt(): autogpt.add_command(make_settings) -autogpt.add_command(status) @autogpt.command() @@ -33,8 +32,7 @@ def autogpt(): def server(port: int) -> None: """Run the Auto-GPT runner httpserver.""" click.echo("Running Auto-GPT runner httpserver...") - port = 8080 - Agent.start(port) + AgentProtocol.handle_task(task_handler).start(port) @autogpt.command() diff --git a/autogpt/core/runner/cli_web_app/server/api.py b/autogpt/core/runner/cli_web_app/server/api.py index 90f63e5ad30b..2dc3f0101c07 100644 --- a/autogpt/core/runner/cli_web_app/server/api.py +++ b/autogpt/core/runner/cli_web_app/server/api.py @@ -1,6 +1,5 @@ from pathlib import Path -from agent_protocol import Agent as AgentProtocol from agent_protocol import StepHandler, StepResult from colorama import Fore @@ -18,21 +17,25 @@ async def task_handler(task_input) -> StepHandler: - agent = bootstrap_agent(task_input) + task = task_input.__root__ if task_input else {} + agent = bootstrap_agent(task.get("user_input"), False) - next_command_name: str | None - next_command_args: dict[str, str] | None + next_command_name: str | None = None + next_command_args: dict[str, str] | None = None async def step_handler(step_input) -> StepResult: + step = step_input.__root__ if step_input else {} + + nonlocal next_command_name, next_command_args + result = await interaction_step( agent, - step_input["user_input"], - step_input["user_feedback"], + step.get("user_input"), + step.get("user_feedback"), next_command_name, next_command_args, ) - nonlocal next_command_name, next_command_args next_command_name = result["next_step_command_name"] if result else None next_command_args = result["next_step_command_args"] if result else None @@ -76,19 +79,20 @@ async def interaction_step( } -def bootstrap_agent(task): +def bootstrap_agent(task, continuous_mode) -> Agent: config = ConfigBuilder.build_config_from_env(workdir=PROJECT_DIR) - config.continuous_mode = False + config.debug_mode = True + config.continuous_mode = continuous_mode config.temperature = 0 config.plain_output = True command_registry = get_command_registry(config) config.memory_backend = "no_memory" - Workspace.set_workspace_directory(config) - Workspace.build_file_logger_path(config, config.workspace_path) + config.workspace_path = Workspace.set_workspace_directory(config) + config.file_logger_path = Workspace.build_file_logger_path(config.workspace_path) ai_config = AIConfig( ai_name="Auto-GPT", ai_role="a multi-purpose AI assistant.", - ai_goals=[task.user_input], + ai_goals=[task], ) ai_config.command_registry = command_registry return Agent( @@ -97,7 +101,6 @@ def bootstrap_agent(task): ai_config=ai_config, config=config, triggering_prompt=DEFAULT_TRIGGERING_PROMPT, - workspace_directory=str(config.workspace_path), ) @@ -109,6 +112,3 @@ def get_command_registry(config: Config): for command_category in enabled_command_categories: command_registry.import_commands(command_category) return command_registry - - -AgentProtocol.handle_task(task_handler) From d1e5ab04a8ccd3a4db009b048a82e9dd9f83aa3d Mon Sep 17 00:00:00 2001 From: Merwane Hamadi Date: Mon, 24 Jul 2023 09:06:11 -0700 Subject: [PATCH 05/24] Change workspace location --- agbenchmark/config.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/agbenchmark/config.json b/agbenchmark/config.json index dd957f9ea1b1..47785864c863 100644 --- a/agbenchmark/config.json +++ b/agbenchmark/config.json @@ -1,4 +1,4 @@ { - "workspace": "autogpt/workspace/auto_gpt_workspace", + "workspace": "auto_gpt_workspace", "entry_path": "agbenchmark.benchmarks" } From b6d9c3ab03356de155629e6de30f7d4eeaeabc0f Mon Sep 17 00:00:00 2001 From: Merwane Hamadi Date: Mon, 24 Jul 2023 10:01:41 -0700 Subject: [PATCH 06/24] Remove delete file Signed-off-by: Merwane Hamadi --- autogpt/agents/base.py | 10 ++++++++-- autogpt/commands/file_operations.py | 31 ----------------------------- autogpt/core/planning/templates.py | 1 - tests/unit/test_file_operations.py | 18 ----------------- 4 files changed, 8 insertions(+), 52 deletions(-) diff --git a/autogpt/agents/base.py b/autogpt/agents/base.py index c0133ea7c35c..029b07a05a76 100644 --- a/autogpt/agents/base.py +++ b/autogpt/agents/base.py @@ -11,7 +11,7 @@ from autogpt.llm.base import ChatModelResponse, ChatSequence, Message from autogpt.llm.providers.openai import OPEN_AI_CHAT_MODELS, get_openai_command_specs from autogpt.llm.utils import count_message_tokens, create_chat_completion -from autogpt.logs import logger +from autogpt.logs import CURRENT_CONTEXT_FILE_NAME, logger from autogpt.memory.message_history import MessageHistory from autogpt.prompts.prompt import DEFAULT_TRIGGERING_PROMPT @@ -105,7 +105,13 @@ def think( prompt: ChatSequence = self.construct_prompt(instruction) prompt = self.on_before_think(prompt, instruction) - + self.log_cycle_handler.log_cycle( + self.ai_config.ai_name, + self.created_at, + self.cycle_count, + prompt.raw(), + CURRENT_CONTEXT_FILE_NAME, + ) raw_response = create_chat_completion( prompt, self.config, diff --git a/autogpt/commands/file_operations.py b/autogpt/commands/file_operations.py index 939b7dc181a1..715a90aebffd 100644 --- a/autogpt/commands/file_operations.py +++ b/autogpt/commands/file_operations.py @@ -274,37 +274,6 @@ def append_to_file( return f"Error: {err}" -@command( - "delete_file", - "Deletes a file", - { - "filename": { - "type": "string", - "description": "The name of the file to delete", - "required": True, - } - }, -) -@sanitize_path_arg("filename") -def delete_file(filename: str, agent: Agent) -> str: - """Delete a file - - Args: - filename (str): The name of the file to delete - - Returns: - str: A message indicating success or failure - """ - if is_duplicate_operation("delete", filename, agent): - return "Error: File has already been deleted." - try: - os.remove(filename) - log_operation("delete", filename, agent) - return "File deleted successfully." - except Exception as err: - return f"Error: {err}" - - @command( "list_files", "Lists Files in a Directory", diff --git a/autogpt/core/planning/templates.py b/autogpt/core/planning/templates.py index e28f2ed75d58..59792f656472 100644 --- a/autogpt/core/planning/templates.py +++ b/autogpt/core/planning/templates.py @@ -17,7 +17,6 @@ 'analyze_code: Analyze Code, args: "code": ""', 'execute_python_file: Execute Python File, args: "filename": ""', 'append_to_file: Append to file, args: "filename": "", "text": ""', - 'delete_file: Delete file, args: "filename": ""', 'list_files: List Files in Directory, args: "directory": ""', 'read_file: Read a file, args: "filename": ""', 'write_to_file: Write to file, args: "filename": "", "text": ""', diff --git a/tests/unit/test_file_operations.py b/tests/unit/test_file_operations.py index d7d870a592ff..b3f1fb8f8245 100644 --- a/tests/unit/test_file_operations.py +++ b/tests/unit/test_file_operations.py @@ -282,24 +282,6 @@ def test_append_to_file_uses_checksum_from_appended_file( ) -def test_delete_file(test_file_with_content_path: Path, agent: Agent): - result = file_ops.delete_file(str(test_file_with_content_path), agent=agent) - assert result == "File deleted successfully." - assert os.path.exists(test_file_with_content_path) is False - - -def test_delete_missing_file(agent: Agent): - filename = "path/to/file/which/does/not/exist" - # confuse the log - file_ops.log_operation("write", filename, agent=agent, checksum="fake") - try: - os.remove(agent.workspace.get_path(filename)) - except FileNotFoundError as err: - assert str(err) in file_ops.delete_file(filename, agent=agent) - return - assert False, f"Failed to test delete_file; {filename} not expected to exist" - - def test_list_files(workspace: Workspace, test_directory: Path, agent: Agent): # Case 1: Create files A and B, search for A, and ensure we don't return A and B file_a = workspace.get_path("file_a.txt") From a56dc479e5c593e9e4548d094f3d7c4dfbdff573 Mon Sep 17 00:00:00 2001 From: Auto-GPT-Bot Date: Mon, 24 Jul 2023 17:56:56 +0000 Subject: [PATCH 07/24] Update cassette submodule --- tests/Auto-GPT-test-cassettes | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/Auto-GPT-test-cassettes b/tests/Auto-GPT-test-cassettes index 47e262905edc..6b4f855269df 160000 --- a/tests/Auto-GPT-test-cassettes +++ b/tests/Auto-GPT-test-cassettes @@ -1 +1 @@ -Subproject commit 47e262905edc1380bc0539fd298fd94d99667e89 +Subproject commit 6b4f855269dfc7ec220cc7774d675940dcaa78ef From ddb7efa593dbdbea9b895b4448c49a6124358302 Mon Sep 17 00:00:00 2001 From: Reinier van der Leer Date: Tue, 25 Jul 2023 18:08:50 +0200 Subject: [PATCH 08/24] Move misplaced log_cycle statements --- autogpt/agents/agent.py | 8 ++++++++ autogpt/agents/base.py | 9 +-------- 2 files changed, 9 insertions(+), 8 deletions(-) diff --git a/autogpt/agents/agent.py b/autogpt/agents/agent.py index 93d3de86570c..563c682385c6 100644 --- a/autogpt/agents/agent.py +++ b/autogpt/agents/agent.py @@ -17,6 +17,7 @@ from autogpt.llm.utils import count_string_tokens from autogpt.logs import logger from autogpt.logs.log_cycle import ( + CURRENT_CONTEXT_FILE_NAME, FULL_MESSAGE_HISTORY_FILE_NAME, NEXT_ACTION_FILE_NAME, USER_INPUT_FILE_NAME, @@ -109,6 +110,13 @@ def on_before_think(self, *args, **kwargs) -> ChatSequence: self.history.raw(), FULL_MESSAGE_HISTORY_FILE_NAME, ) + self.log_cycle_handler.log_cycle( + self.ai_config.ai_name, + self.created_at, + self.cycle_count, + prompt.raw(), + CURRENT_CONTEXT_FILE_NAME, + ) return prompt def execute( diff --git a/autogpt/agents/base.py b/autogpt/agents/base.py index 029b07a05a76..e6b24be12ba8 100644 --- a/autogpt/agents/base.py +++ b/autogpt/agents/base.py @@ -11,7 +11,7 @@ from autogpt.llm.base import ChatModelResponse, ChatSequence, Message from autogpt.llm.providers.openai import OPEN_AI_CHAT_MODELS, get_openai_command_specs from autogpt.llm.utils import count_message_tokens, create_chat_completion -from autogpt.logs import CURRENT_CONTEXT_FILE_NAME, logger +from autogpt.logs import logger from autogpt.memory.message_history import MessageHistory from autogpt.prompts.prompt import DEFAULT_TRIGGERING_PROMPT @@ -105,13 +105,6 @@ def think( prompt: ChatSequence = self.construct_prompt(instruction) prompt = self.on_before_think(prompt, instruction) - self.log_cycle_handler.log_cycle( - self.ai_config.ai_name, - self.created_at, - self.cycle_count, - prompt.raw(), - CURRENT_CONTEXT_FILE_NAME, - ) raw_response = create_chat_completion( prompt, self.config, From 2eb346e06e3d63f106063ef37a2ad1fb2cf7b18e Mon Sep 17 00:00:00 2001 From: Reinier van der Leer Date: Sat, 29 Jul 2023 19:06:47 +0200 Subject: [PATCH 09/24] Improve prompting and prompt generation infrastructure (#5076) * Clean up prompt generation * Rename Performance Evaluations to Best Practices * Move specification of response format from system prompt to Agent.construct_base_prompt * Clean up PromptGenerator class * Add debug logging to AIConfig autogeneration * Clarify prompting and add support for multiple thought processes to Agent --- autogpt/agents/agent.py | 6 +- autogpt/agents/base.py | 111 +++++++++++++++++++-- autogpt/app/setup.py | 1 + autogpt/config/ai_config.py | 66 ++++++++----- autogpt/config/prompt_config.py | 2 +- autogpt/llm/utils/__init__.py | 3 + autogpt/prompts/generator.py | 144 ++++++++++++---------------- autogpt/prompts/prompt.py | 14 ++- prompt_settings.yaml | 5 +- tests/unit/test_prompt_config.py | 22 ++--- tests/unit/test_prompt_generator.py | 21 ++-- 11 files changed, 240 insertions(+), 155 deletions(-) diff --git a/autogpt/agents/agent.py b/autogpt/agents/agent.py index 563c682385c6..fa20ea587f1c 100644 --- a/autogpt/agents/agent.py +++ b/autogpt/agents/agent.py @@ -293,10 +293,10 @@ def execute_command( # Handle non-native commands (e.g. from plugins) for command in agent.ai_config.prompt_generator.commands: if ( - command_name == command["label"].lower() - or command_name == command["name"].lower() + command_name == command.label.lower() + or command_name == command.name.lower() ): - return command["function"](**arguments) + return command.function(**arguments) raise RuntimeError( f"Cannot execute '{command_name}': unknown command." diff --git a/autogpt/agents/base.py b/autogpt/agents/base.py index e6b24be12ba8..bf43b3769823 100644 --- a/autogpt/agents/base.py +++ b/autogpt/agents/base.py @@ -1,7 +1,8 @@ from __future__ import annotations +import re from abc import ABCMeta, abstractmethod -from typing import TYPE_CHECKING, Any, Optional +from typing import TYPE_CHECKING, Any, Literal, Optional if TYPE_CHECKING: from autogpt.config import AIConfig, Config @@ -23,6 +24,8 @@ class BaseAgent(metaclass=ABCMeta): """Base class for all Auto-GPT agents.""" + ThoughtProcessID = Literal["one-shot"] + def __init__( self, ai_config: AIConfig, @@ -91,6 +94,7 @@ def __init__( def think( self, instruction: Optional[str] = None, + thought_process_id: ThoughtProcessID = "one-shot", ) -> tuple[CommandName | None, CommandArgs | None, AgentThoughts]: """Runs the agent for one cycle. @@ -103,8 +107,8 @@ def think( instruction = instruction or self.default_cycle_instruction - prompt: ChatSequence = self.construct_prompt(instruction) - prompt = self.on_before_think(prompt, instruction) + prompt: ChatSequence = self.construct_prompt(instruction, thought_process_id) + prompt = self.on_before_think(prompt, thought_process_id, instruction) raw_response = create_chat_completion( prompt, self.config, @@ -114,7 +118,7 @@ def think( ) self.cycle_count += 1 - return self.on_response(raw_response, prompt, instruction) + return self.on_response(raw_response, thought_process_id, prompt, instruction) @abstractmethod def execute( @@ -137,6 +141,7 @@ def execute( def construct_base_prompt( self, + thought_process_id: ThoughtProcessID, prepend_messages: list[Message] = [], append_messages: list[Message] = [], reserve_tokens: int = 0, @@ -178,7 +183,11 @@ def construct_base_prompt( return prompt - def construct_prompt(self, cycle_instruction: str) -> ChatSequence: + def construct_prompt( + self, + cycle_instruction: str, + thought_process_id: ThoughtProcessID, + ) -> ChatSequence: """Constructs and returns a prompt with the following structure: 1. System prompt 2. Message history of the agent, truncated & prepended with running summary as needed @@ -195,14 +204,86 @@ def construct_prompt(self, cycle_instruction: str) -> ChatSequence: cycle_instruction_tlength = count_message_tokens( cycle_instruction_msg, self.llm.name ) - prompt = self.construct_base_prompt(reserve_tokens=cycle_instruction_tlength) + + append_messages: list[Message] = [] + + response_format_instr = self.response_format_instruction(thought_process_id) + if response_format_instr: + append_messages.append(Message("system", response_format_instr)) + + prompt = self.construct_base_prompt( + thought_process_id, + append_messages=append_messages, + reserve_tokens=cycle_instruction_tlength, + ) # ADD user input message ("triggering prompt") prompt.append(cycle_instruction_msg) return prompt - def on_before_think(self, prompt: ChatSequence, instruction: str) -> ChatSequence: + # This can be expanded to support multiple types of (inter)actions within an agent + def response_format_instruction(self, thought_process_id: ThoughtProcessID) -> str: + if thought_process_id != "one-shot": + raise NotImplementedError(f"Unknown thought process '{thought_process_id}'") + + RESPONSE_FORMAT_WITH_COMMAND = """```ts + interface Response { + thoughts: { + // Thoughts + text: string; + reasoning: string; + // Short markdown-style bullet list that conveys the long-term plan + plan: string; + // Constructive self-criticism + criticism: string; + // Summary of thoughts to say to the user + speak: string; + }; + command: { + name: string; + args: Record; + }; + } + ```""" + + RESPONSE_FORMAT_WITHOUT_COMMAND = """```ts + interface Response { + thoughts: { + // Thoughts + text: string; + reasoning: string; + // Short markdown-style bullet list that conveys the long-term plan + plan: string; + // Constructive self-criticism + criticism: string; + // Summary of thoughts to say to the user + speak: string; + }; + } + ```""" + + response_format = re.sub( + r"\n\s+", + "\n", + RESPONSE_FORMAT_WITHOUT_COMMAND + if self.config.openai_functions + else RESPONSE_FORMAT_WITH_COMMAND, + ) + + use_functions = self.config.openai_functions and self.command_registry.commands + return ( + f"Respond strictly with JSON{', and also specify a command to use through a function_call' if use_functions else ''}. " + "The JSON should be compatible with the TypeScript type `Response` from the following:\n" + f"{response_format}\n" + ) + + def on_before_think( + self, + prompt: ChatSequence, + thought_process_id: ThoughtProcessID, + instruction: str, + ) -> ChatSequence: """Called after constructing the prompt but before executing it. Calls the `on_planning` hook of any enabled and capable plugins, adding their @@ -237,7 +318,11 @@ def on_before_think(self, prompt: ChatSequence, instruction: str) -> ChatSequenc return prompt def on_response( - self, llm_response: ChatModelResponse, prompt: ChatSequence, instruction: str + self, + llm_response: ChatModelResponse, + thought_process_id: ThoughtProcessID, + prompt: ChatSequence, + instruction: str, ) -> tuple[CommandName | None, CommandArgs | None, AgentThoughts]: """Called upon receiving a response from the chat model. @@ -260,7 +345,9 @@ def on_response( ) # FIXME: support function calls try: - return self.parse_and_process_response(llm_response, prompt, instruction) + return self.parse_and_process_response( + llm_response, thought_process_id, prompt, instruction + ) except SyntaxError as e: logger.error(f"Response could not be parsed: {e}") # TODO: tune this message @@ -275,7 +362,11 @@ def on_response( @abstractmethod def parse_and_process_response( - self, llm_response: ChatModelResponse, prompt: ChatSequence, instruction: str + self, + llm_response: ChatModelResponse, + thought_process_id: ThoughtProcessID, + prompt: ChatSequence, + instruction: str, ) -> tuple[CommandName | None, CommandArgs | None, AgentThoughts]: """Validate, parse & process the LLM's response. diff --git a/autogpt/app/setup.py b/autogpt/app/setup.py index f2b52916cfe7..cb6073adc0dd 100644 --- a/autogpt/app/setup.py +++ b/autogpt/app/setup.py @@ -83,6 +83,7 @@ def prompt_user( "Falling back to manual mode.", speak_text=True, ) + logger.debug(f"Error during AIConfig generation: {e}") return generate_aiconfig_manual(config) diff --git a/autogpt/config/ai_config.py b/autogpt/config/ai_config.py index b47740f6a8d8..ce26e23dd48b 100644 --- a/autogpt/config/ai_config.py +++ b/autogpt/config/ai_config.py @@ -1,7 +1,4 @@ -# sourcery skip: do-not-use-staticmethod -""" -A module that contains the AIConfig class object that contains the configuration -""" +"""A module that contains the AIConfig class object that contains the configuration""" from __future__ import annotations import platform @@ -15,6 +12,8 @@ from autogpt.models.command_registry import CommandRegistry from autogpt.prompts.generator import PromptGenerator + from .config import Config + class AIConfig: """ @@ -104,7 +103,7 @@ def save(self, ai_settings_file: str | Path) -> None: yaml.dump(config, file, allow_unicode=True) def construct_full_prompt( - self, config, prompt_generator: Optional[PromptGenerator] = None + self, config: Config, prompt_generator: Optional[PromptGenerator] = None ) -> str: """ Returns a prompt to the user with the class information in an organized fashion. @@ -117,26 +116,27 @@ def construct_full_prompt( including the ai_name, ai_role, ai_goals, and api_budget. """ - prompt_start = ( - "Your decisions must always be made independently without" - " seeking user assistance. Play to your strengths as an LLM and pursue" - " simple strategies with no legal complications." - "" - ) - from autogpt.prompts.prompt import build_default_prompt_generator + prompt_generator = prompt_generator or self.prompt_generator if prompt_generator is None: prompt_generator = build_default_prompt_generator(config) - prompt_generator.goals = self.ai_goals - prompt_generator.name = self.ai_name - prompt_generator.role = self.ai_role - prompt_generator.command_registry = self.command_registry + prompt_generator.command_registry = self.command_registry + self.prompt_generator = prompt_generator + for plugin in config.plugins: if not plugin.can_handle_post_prompt(): continue prompt_generator = plugin.post_prompt(prompt_generator) + # Construct full prompt + full_prompt_parts = [ + f"You are {self.ai_name}, {self.ai_role.rstrip('.')}.", + "Your decisions must always be made independently without seeking " + "user assistance. Play to your strengths as an LLM and pursue " + "simple strategies with no legal complications.", + ] + if config.execute_local_commands: # add OS info to prompt os_name = platform.system() @@ -146,14 +146,30 @@ def construct_full_prompt( else distro.name(pretty=True) ) - prompt_start += f"\nThe OS you are running on is: {os_info}" + full_prompt_parts.append(f"The OS you are running on is: {os_info}") - # Construct full prompt - full_prompt = f"You are {prompt_generator.name}, {prompt_generator.role}\n{prompt_start}\n\nGOALS:\n\n" - for i, goal in enumerate(self.ai_goals): - full_prompt += f"{i+1}. {goal}\n" + additional_constraints: list[str] = [] if self.api_budget > 0.0: - full_prompt += f"\nIt takes money to let you run. Your API budget is ${self.api_budget:.3f}" - self.prompt_generator = prompt_generator - full_prompt += f"\n\n{prompt_generator.generate_prompt_string(config)}" - return full_prompt + additional_constraints.append( + f"It takes money to let you run. " + f"Your API budget is ${self.api_budget:.3f}" + ) + + full_prompt_parts.append( + prompt_generator.generate_prompt_string( + additional_constraints=additional_constraints + ) + ) + + if self.ai_goals: + full_prompt_parts.append( + "\n".join( + [ + "## Goals", + "For your task, you must fulfill the following goals:", + *[f"{i+1}. {goal}" for i, goal in enumerate(self.ai_goals)], + ] + ) + ) + + return "\n\n".join(full_prompt_parts).strip("\n") diff --git a/autogpt/config/prompt_config.py b/autogpt/config/prompt_config.py index 793bb4440437..055e7897b16f 100644 --- a/autogpt/config/prompt_config.py +++ b/autogpt/config/prompt_config.py @@ -44,4 +44,4 @@ def __init__(self, prompt_settings_file: str) -> None: self.constraints = config_params.get("constraints", []) self.resources = config_params.get("resources", []) - self.performance_evaluations = config_params.get("performance_evaluations", []) + self.best_practices = config_params.get("best_practices", []) diff --git a/autogpt/llm/utils/__init__.py b/autogpt/llm/utils/__init__.py index e433476ec0be..9eceae0db9e5 100644 --- a/autogpt/llm/utils/__init__.py +++ b/autogpt/llm/utils/__init__.py @@ -154,6 +154,9 @@ def create_chat_completion( function.schema for function in functions ] + # Print full prompt to debug log + logger.debug(prompt.dump()) + response = iopenai.create_chat_completion( messages=prompt.raw(), **chat_completion_kwargs, diff --git a/autogpt/prompts/generator.py b/autogpt/prompts/generator.py index bc836f30c593..a8217953dbbf 100644 --- a/autogpt/prompts/generator.py +++ b/autogpt/prompts/generator.py @@ -1,11 +1,8 @@ """ A module for generating custom prompt strings.""" from __future__ import annotations -import json -from typing import TYPE_CHECKING, Any, Callable, Dict, List, Optional, TypedDict - -from autogpt.config import Config -from autogpt.json_utils.utilities import llm_response_schema +from dataclasses import dataclass +from typing import TYPE_CHECKING, Callable, Optional if TYPE_CHECKING: from autogpt.models.command_registry import CommandRegistry @@ -17,34 +14,33 @@ class PromptGenerator: resources, and performance evaluations. """ - class Command(TypedDict): + @dataclass + class Command: label: str name: str params: dict[str, str] function: Optional[Callable] + def __str__(self) -> str: + """Returns a string representation of the command.""" + params_string = ", ".join( + f'"{key}": "{value}"' for key, value in self.params.items() + ) + return f'{self.label}: "{self.name}", params: ({params_string})' + constraints: list[str] commands: list[Command] resources: list[str] - performance_evaluation: list[str] + best_practices: list[str] command_registry: CommandRegistry | None - # TODO: replace with AIConfig - name: str - role: str - goals: list[str] - def __init__(self): self.constraints = [] self.commands = [] self.resources = [] - self.performance_evaluation = [] + self.best_practices = [] self.command_registry = None - self.name = "Bob" - self.role = "AI" - self.goals = [] - def add_constraint(self, constraint: str) -> None: """ Add a constraint to the constraints list. @@ -75,31 +71,15 @@ def add_command( function (callable, optional): A callable function to be called when the command is executed. Defaults to None. """ - command_params = {name: type for name, type in params.items()} - - command: PromptGenerator.Command = { - "label": command_label, - "name": command_name, - "params": command_params, - "function": function, - } - self.commands.append(command) - - def _generate_command_string(self, command: Dict[str, Any]) -> str: - """ - Generate a formatted string representation of a command. - - Args: - command (dict): A dictionary containing command information. - - Returns: - str: The formatted command string. - """ - params_string = ", ".join( - f'"{key}": "{value}"' for key, value in command["params"].items() + self.commands.append( + PromptGenerator.Command( + label=command_label, + name=command_name, + params={name: type for name, type in params.items()}, + function=function, + ) ) - return f'{command["label"]}: "{command["name"]}", params: {params_string}' def add_resource(self, resource: str) -> None: """ @@ -110,71 +90,67 @@ def add_resource(self, resource: str) -> None: """ self.resources.append(resource) - def add_performance_evaluation(self, evaluation: str) -> None: + def add_best_practice(self, best_practice: str) -> None: """ - Add a performance evaluation item to the performance_evaluation list. + Add an item to the list of best practices. Args: - evaluation (str): The evaluation item to be added. + best_practice (str): The best practice item to be added. """ - self.performance_evaluation.append(evaluation) + self.best_practices.append(best_practice) - def _generate_numbered_list(self, items: List[Any], item_type="list") -> str: + def _generate_numbered_list(self, items: list[str], start_at: int = 1) -> str: """ - Generate a numbered list from given items based on the item_type. + Generate a numbered list containing the given items. Args: items (list): A list of items to be numbered. - item_type (str, optional): The type of items in the list. - Defaults to 'list'. + start_at (int, optional): The number to start the sequence with; defaults to 1. Returns: str: The formatted numbered list. """ - if item_type == "command": - command_strings = [] - if self.command_registry: - command_strings += [ - str(item) - for item in self.command_registry.commands.values() - if item.enabled - ] - # terminate command is added manually - command_strings += [self._generate_command_string(item) for item in items] - return "\n".join(f"{i+1}. {item}" for i, item in enumerate(command_strings)) - else: - return "\n".join(f"{i+1}. {item}" for i, item in enumerate(items)) - - def generate_prompt_string(self, config: Config) -> str: + return "\n".join(f"{i}. {item}" for i, item in enumerate(items, start_at)) + + def generate_prompt_string( + self, + *, + additional_constraints: list[str] = [], + additional_resources: list[str] = [], + additional_best_practices: list[str] = [], + ) -> str: """ Generate a prompt string based on the constraints, commands, resources, - and performance evaluations. + and best practices. Returns: str: The generated prompt string. """ + return ( - f"Constraints:\n{self._generate_numbered_list(self.constraints)}\n\n" - f"{generate_commands(self, config)}" - f"Resources:\n{self._generate_numbered_list(self.resources)}\n\n" - "Performance Evaluation:\n" - f"{self._generate_numbered_list(self.performance_evaluation)}\n\n" - "Respond with only valid JSON conforming to the following schema: \n" - f"{json.dumps(llm_response_schema(config))}\n" + "## Constraints\n" + "You operate within the following constraints:\n" + f"{self._generate_numbered_list(self.constraints + additional_constraints)}\n\n" + "## Commands\n" + "You have access to the following commands:\n" + f"{self._generate_commands()}\n\n" + "## Resources\n" + "You can leverage access to the following resources:\n" + f"{self._generate_numbered_list(self.resources + additional_resources)}\n\n" + "## Best practices\n" + f"{self._generate_numbered_list(self.best_practices + additional_best_practices)}" ) + def _generate_commands(self) -> str: + command_strings = [] + if self.command_registry: + command_strings += [ + str(cmd) + for cmd in self.command_registry.commands.values() + if cmd.enabled + ] -def generate_commands(self, config: Config) -> str: - """ - Generate a prompt string based on the constraints, commands, resources, - and performance evaluations. + # Add commands from plugins etc. + command_strings += [str(cmd) for cmd in self.commands] - Returns: - str: The generated prompt string. - """ - if config.openai_functions: - return "" - return ( - "Commands:\n" - f"{self._generate_numbered_list(self.commands, item_type='command')}\n\n" - ) + return self._generate_numbered_list(command_strings) diff --git a/autogpt/prompts/prompt.py b/autogpt/prompts/prompt.py index b64f11f599a2..627b6c50f189 100644 --- a/autogpt/prompts/prompt.py +++ b/autogpt/prompts/prompt.py @@ -2,13 +2,17 @@ from autogpt.config.prompt_config import PromptConfig from autogpt.prompts.generator import PromptGenerator -DEFAULT_TRIGGERING_PROMPT = "Determine exactly one command to use, and respond using the JSON schema specified previously:" +DEFAULT_TRIGGERING_PROMPT = ( + "Determine exactly one command to use based on the given goals " + "and the progress you have made so far, " + "and respond using the JSON schema specified previously:" +) def build_default_prompt_generator(config: Config) -> PromptGenerator: """ This function generates a prompt string that includes various constraints, - commands, resources, and performance evaluations. + commands, resources, and best practices. Returns: str: The generated prompt string. @@ -28,8 +32,8 @@ def build_default_prompt_generator(config: Config) -> PromptGenerator: for resource in prompt_config.resources: prompt_generator.add_resource(resource) - # Add performance evaluations to the PromptGenerator object - for performance_evaluation in prompt_config.performance_evaluations: - prompt_generator.add_performance_evaluation(performance_evaluation) + # Add best practices to the PromptGenerator object + for best_practice in prompt_config.best_practices: + prompt_generator.add_best_practice(best_practice) return prompt_generator diff --git a/prompt_settings.yaml b/prompt_settings.yaml index 342d67b9ebb0..a83ca6225bba 100644 --- a/prompt_settings.yaml +++ b/prompt_settings.yaml @@ -7,9 +7,10 @@ constraints: [ resources: [ 'Internet access for searches and information gathering.', 'Long Term memory management.', - 'File output.' + 'File output.', + 'Command execution' ] -performance_evaluations: [ +best_practices: [ 'Continuously review and analyze your actions to ensure you are performing to the best of your abilities.', 'Constructively self-criticize your big-picture behavior constantly.', 'Reflect on past decisions and strategies to refine your approach.', diff --git a/tests/unit/test_prompt_config.py b/tests/unit/test_prompt_config.py index 4616db971b35..b83efd0d5209 100644 --- a/tests/unit/test_prompt_config.py +++ b/tests/unit/test_prompt_config.py @@ -18,10 +18,10 @@ def test_prompt_config_loading(tmp_path): - A test resource - Another test resource - A third test resource -performance_evaluations: -- A test performance evaluation -- Another test performance evaluation -- A third test performance evaluation +best_practices: +- A test best-practice +- Another test best-practice +- A third test best-practice """ prompt_settings_file = tmp_path / "test_prompt_settings.yaml" prompt_settings_file.write_text(yaml_content) @@ -36,13 +36,7 @@ def test_prompt_config_loading(tmp_path): assert prompt_config.resources[0] == "A test resource" assert prompt_config.resources[1] == "Another test resource" assert prompt_config.resources[2] == "A third test resource" - assert len(prompt_config.performance_evaluations) == 3 - assert prompt_config.performance_evaluations[0] == "A test performance evaluation" - assert ( - prompt_config.performance_evaluations[1] - == "Another test performance evaluation" - ) - assert ( - prompt_config.performance_evaluations[2] - == "A third test performance evaluation" - ) + assert len(prompt_config.best_practices) == 3 + assert prompt_config.best_practices[0] == "A test best-practice" + assert prompt_config.best_practices[1] == "Another test best-practice" + assert prompt_config.best_practices[2] == "A third test best-practice" diff --git a/tests/unit/test_prompt_generator.py b/tests/unit/test_prompt_generator.py index 44147e6dbce8..d1b08f1a041a 100644 --- a/tests/unit/test_prompt_generator.py +++ b/tests/unit/test_prompt_generator.py @@ -20,13 +20,12 @@ def test_add_command(): params = {"arg1": "value1", "arg2": "value2"} generator = PromptGenerator() generator.add_command(command_label, command_name, params) - command = { + assert generator.commands[0].__dict__ == { "label": command_label, "name": command_name, "params": params, "function": None, } - assert command in generator.commands def test_add_resource(): @@ -39,18 +38,18 @@ def test_add_resource(): assert resource in generator.resources -def test_add_performance_evaluation(): +def test_add_best_practice(): """ - Test if the add_performance_evaluation() method adds an evaluation to the generator's - performance_evaluation list. + Test if the add_best_practice() method adds a best practice to the generator's + best_practices list. """ - evaluation = "Evaluation1" + practice = "Practice1" generator = PromptGenerator() - generator.add_performance_evaluation(evaluation) - assert evaluation in generator.performance_evaluation + generator.add_best_practice(practice) + assert practice in generator.best_practices -def test_generate_prompt_string(config): +def test_generate_prompt_string(): """ Test if the generate_prompt_string() method generates a prompt string with all the added constraints, commands, resources, and evaluations. @@ -82,10 +81,10 @@ def test_generate_prompt_string(config): for resource in resources: generator.add_resource(resource) for evaluation in evaluations: - generator.add_performance_evaluation(evaluation) + generator.add_best_practice(evaluation) # Generate the prompt string and verify its correctness - prompt_string = generator.generate_prompt_string(config) + prompt_string = generator.generate_prompt_string() assert prompt_string is not None # Check if all constraints, commands, resources, and evaluations are present in the prompt string From cdafee02fc35b4203af8764652dfdaf611a48622 Mon Sep 17 00:00:00 2001 From: Auto-GPT-Bot Date: Sat, 29 Jul 2023 17:10:41 +0000 Subject: [PATCH 10/24] Update cassette submodule --- tests/Auto-GPT-test-cassettes | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/Auto-GPT-test-cassettes b/tests/Auto-GPT-test-cassettes index 6b4f855269df..9c243c62dbf0 160000 --- a/tests/Auto-GPT-test-cassettes +++ b/tests/Auto-GPT-test-cassettes @@ -1 +1 @@ -Subproject commit 6b4f855269dfc7ec220cc7774d675940dcaa78ef +Subproject commit 9c243c62dbf00f368a47ad646b17825a2765e060 From 45c9566298d26531cacb3be42584efb5254fe62b Mon Sep 17 00:00:00 2001 From: merwanehamadi Date: Sat, 29 Jul 2023 10:55:19 -0700 Subject: [PATCH 11/24] Remove append to file (#5051) Signed-off-by: Merwane Hamadi --- agbenchmark/benchmarks.py | 1 + autogpt/commands/file_operations.py | 16 ---------------- autogpt/config/config.py | 2 +- autogpt/llm/utils/__init__.py | 4 +++- tests/unit/test_config.py | 2 +- 5 files changed, 6 insertions(+), 19 deletions(-) diff --git a/agbenchmark/benchmarks.py b/agbenchmark/benchmarks.py index b3df802092c7..5bcbb7ef5c74 100644 --- a/agbenchmark/benchmarks.py +++ b/agbenchmark/benchmarks.py @@ -1,3 +1,4 @@ +import os import sys from pathlib import Path from typing import Tuple diff --git a/autogpt/commands/file_operations.py b/autogpt/commands/file_operations.py index adafe14edee0..519a20cde05f 100644 --- a/autogpt/commands/file_operations.py +++ b/autogpt/commands/file_operations.py @@ -228,22 +228,6 @@ def write_to_file(filename: str, text: str, agent: Agent) -> str: return f"Error: {err}" -@command( - "append_to_file", - "Appends to a file", - { - "filename": { - "type": "string", - "description": "The name of the file to write to", - "required": True, - }, - "text": { - "type": "string", - "description": "The text to write to the file", - "required": True, - }, - }, -) @sanitize_path_arg("filename") def append_to_file( filename: str, text: str, agent: Agent, should_log: bool = True diff --git a/autogpt/config/config.py b/autogpt/config/config.py index 66f2e8713946..c0d30910239a 100644 --- a/autogpt/config/config.py +++ b/autogpt/config/config.py @@ -54,7 +54,7 @@ class Config(SystemSettings, arbitrary_types_allowed=True): file_logger_path: Optional[Path] = None # Model configuration fast_llm: str = "gpt-3.5-turbo" - smart_llm: str = "gpt-4" + smart_llm: str = "gpt-4-0314" temperature: float = 0 openai_functions: bool = False embedding_model: str = "text-embedding-ada-002" diff --git a/autogpt/llm/utils/__init__.py b/autogpt/llm/utils/__init__.py index 9eceae0db9e5..5438bdd853cd 100644 --- a/autogpt/llm/utils/__init__.py +++ b/autogpt/llm/utils/__init__.py @@ -119,7 +119,9 @@ def create_chat_completion( temperature = config.temperature if max_tokens is None: prompt_tlength = prompt.token_length - max_tokens = OPEN_AI_CHAT_MODELS[model].max_tokens - prompt_tlength + max_tokens = ( + OPEN_AI_CHAT_MODELS[model].max_tokens - prompt_tlength - 1 + ) # the -1 is just here because we have a bug and we don't know how to fix it. When using gpt-4-0314 we get a token error. logger.debug(f"Prompt length: {prompt_tlength} tokens") if functions: functions_tlength = count_openai_functions_tokens(functions, model) diff --git a/tests/unit/test_config.py b/tests/unit/test_config.py index 6445ae786e39..80de7073a791 100644 --- a/tests/unit/test_config.py +++ b/tests/unit/test_config.py @@ -21,7 +21,7 @@ def test_initial_values(config: Config): assert config.continuous_mode == False assert config.speak_mode == False assert config.fast_llm == "gpt-3.5-turbo" - assert config.smart_llm == "gpt-4" + assert config.smart_llm == "gpt-4-0314" def test_set_continuous_mode(config: Config): From 2dcaa07470bea1ab15d314c979a2c526f8c3fa5b Mon Sep 17 00:00:00 2001 From: Auto-GPT-Bot Date: Sat, 29 Jul 2023 17:59:06 +0000 Subject: [PATCH 12/24] Update cassette submodule --- tests/Auto-GPT-test-cassettes | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/Auto-GPT-test-cassettes b/tests/Auto-GPT-test-cassettes index 9c243c62dbf0..f50b6bfc8853 160000 --- a/tests/Auto-GPT-test-cassettes +++ b/tests/Auto-GPT-test-cassettes @@ -1 +1 @@ -Subproject commit 9c243c62dbf00f368a47ad646b17825a2765e060 +Subproject commit f50b6bfc88530aeadcd16001b70cc21e5258a454 From c9bf2ee48d639bad1a7975d19edf5078a1786f87 Mon Sep 17 00:00:00 2001 From: Reinier van der Leer Date: Sat, 29 Jul 2023 22:38:25 +0200 Subject: [PATCH 13/24] Add categories to command registry (#5063) * Add categories to command registry * Fix tests --- agbenchmark/benchmarks.py | 4 +- autogpt/app/main.py | 31 +----- autogpt/commands/__init__.py | 2 +- autogpt/commands/execute_code.py | 6 +- autogpt/commands/file_operations.py | 6 +- autogpt/commands/git_operations.py | 5 +- autogpt/commands/image_gen.py | 6 +- .../commands/{task_statuses.py => system.py} | 6 +- autogpt/commands/web_search.py | 6 +- autogpt/commands/web_selenium.py | 6 +- autogpt/core/runner/cli_web_app/server/api.py | 4 +- autogpt/models/command_registry.py | 95 +++++++++++++++++-- tests/mocks/mock_commands.py | 2 + tests/unit/test_commands.py | 4 +- 14 files changed, 132 insertions(+), 51 deletions(-) rename autogpt/commands/{task_statuses.py => system.py} (87%) diff --git a/agbenchmark/benchmarks.py b/agbenchmark/benchmarks.py index 5bcbb7ef5c74..b7fc8d3f62d1 100644 --- a/agbenchmark/benchmarks.py +++ b/agbenchmark/benchmarks.py @@ -50,8 +50,8 @@ def get_command_registry(config: Config): enabled_command_categories = [ x for x in COMMAND_CATEGORIES if x not in config.disabled_command_categories ] - for command_category in enabled_command_categories: - command_registry.import_commands(command_category) + for command_module in enabled_command_categories: + command_registry.import_command_module(command_module) return command_registry diff --git a/autogpt/app/main.py b/autogpt/app/main.py index d73a511d21b3..f8ac3ca4b0a6 100644 --- a/autogpt/app/main.py +++ b/autogpt/app/main.py @@ -134,36 +134,9 @@ def run_auto_gpt( config.file_logger_path = Workspace.build_file_logger_path(config.workspace_path) config.plugins = scan_plugins(config, config.debug_mode) - # Create a CommandRegistry instance and scan default folder - command_registry = CommandRegistry() - - logger.debug( - f"The following command categories are disabled: {config.disabled_command_categories}" - ) - enabled_command_categories = [ - x for x in COMMAND_CATEGORIES if x not in config.disabled_command_categories - ] - logger.debug( - f"The following command categories are enabled: {enabled_command_categories}" - ) - - for command_category in enabled_command_categories: - command_registry.import_commands(command_category) - - # Unregister commands that are incompatible with the current config - incompatible_commands = [] - for command in command_registry.commands.values(): - if callable(command.enabled) and not command.enabled(config): - command.enabled = False - incompatible_commands.append(command) - - for command in incompatible_commands: - command_registry.unregister(command) - logger.debug( - f"Unregistering incompatible command: {command.name}, " - f"reason - {command.disabled_reason or 'Disabled by current config.'}" - ) + # Create a CommandRegistry instance and scan default folder + command_registry = CommandRegistry.with_command_modules(COMMAND_CATEGORIES, config) ai_config = construct_main_ai_config( config, diff --git a/autogpt/commands/__init__.py b/autogpt/commands/__init__.py index 9a932b175f03..018f5b8fcfb6 100644 --- a/autogpt/commands/__init__.py +++ b/autogpt/commands/__init__.py @@ -3,5 +3,5 @@ "autogpt.commands.file_operations", "autogpt.commands.web_search", "autogpt.commands.web_selenium", - "autogpt.commands.task_statuses", + "autogpt.commands.system", ] diff --git a/autogpt/commands/execute_code.py b/autogpt/commands/execute_code.py index dd35f8593259..30e1e27ea8ca 100644 --- a/autogpt/commands/execute_code.py +++ b/autogpt/commands/execute_code.py @@ -1,4 +1,8 @@ -"""Execute code in a Docker container""" +"""Commands to execute code""" + +COMMAND_CATEGORY = "execute_code" +COMMAND_CATEGORY_TITLE = "Execute Code" + import os import subprocess from pathlib import Path diff --git a/autogpt/commands/file_operations.py b/autogpt/commands/file_operations.py index 519a20cde05f..41da057e3788 100644 --- a/autogpt/commands/file_operations.py +++ b/autogpt/commands/file_operations.py @@ -1,6 +1,10 @@ -"""File operations for AutoGPT""" +"""Commands to perform operations on files""" + from __future__ import annotations +COMMAND_CATEGORY = "file_operations" +COMMAND_CATEGORY_TITLE = "File Operations" + import contextlib import hashlib import os diff --git a/autogpt/commands/git_operations.py b/autogpt/commands/git_operations.py index 021157fbbd56..f7f8186be161 100644 --- a/autogpt/commands/git_operations.py +++ b/autogpt/commands/git_operations.py @@ -1,4 +1,7 @@ -"""Git operations for autogpt""" +"""Commands to perform Git operations""" + +COMMAND_CATEGORY = "git_operations" +COMMAND_CATEGORY_TITLE = "Git Operations" from git.repo import Repo diff --git a/autogpt/commands/image_gen.py b/autogpt/commands/image_gen.py index e02400a8189b..3f6c1d98de43 100644 --- a/autogpt/commands/image_gen.py +++ b/autogpt/commands/image_gen.py @@ -1,4 +1,8 @@ -""" Image Generation Module for AutoGPT.""" +"""Commands to generate images based on text input""" + +COMMAND_CATEGORY = "text_to_image" +COMMAND_CATEGORY_TITLE = "Text to Image" + import io import json import time diff --git a/autogpt/commands/task_statuses.py b/autogpt/commands/system.py similarity index 87% rename from autogpt/commands/task_statuses.py rename to autogpt/commands/system.py index 34908928feea..08bfd5e57ea7 100644 --- a/autogpt/commands/task_statuses.py +++ b/autogpt/commands/system.py @@ -1,6 +1,10 @@ -"""Task Statuses module.""" +"""Commands to control the internal state of the program""" + from __future__ import annotations +COMMAND_CATEGORY = "system" +COMMAND_CATEGORY_TITLE = "System" + from typing import NoReturn from autogpt.agents.agent import Agent diff --git a/autogpt/commands/web_search.py b/autogpt/commands/web_search.py index 9ea0d2061164..49712049d472 100644 --- a/autogpt/commands/web_search.py +++ b/autogpt/commands/web_search.py @@ -1,6 +1,10 @@ -"""Google search command for Autogpt.""" +"""Commands to search the web with""" + from __future__ import annotations +COMMAND_CATEGORY = "web_search" +COMMAND_CATEGORY_TITLE = "Web Search" + import json import time from itertools import islice diff --git a/autogpt/commands/web_selenium.py b/autogpt/commands/web_selenium.py index 948d799e9c95..2d978494a9d3 100644 --- a/autogpt/commands/web_selenium.py +++ b/autogpt/commands/web_selenium.py @@ -1,6 +1,10 @@ -"""Selenium web scraping module.""" +"""Commands for browsing a website""" + from __future__ import annotations +COMMAND_CATEGORY = "web_browse" +COMMAND_CATEGORY_TITLE = "Web Browsing" + import logging from pathlib import Path from sys import platform diff --git a/autogpt/core/runner/cli_web_app/server/api.py b/autogpt/core/runner/cli_web_app/server/api.py index 1ba0974b4953..2f19d62446a3 100644 --- a/autogpt/core/runner/cli_web_app/server/api.py +++ b/autogpt/core/runner/cli_web_app/server/api.py @@ -109,6 +109,6 @@ def get_command_registry(config: Config): enabled_command_categories = [ x for x in COMMAND_CATEGORIES if x not in config.disabled_command_categories ] - for command_category in enabled_command_categories: - command_registry.import_commands(command_category) + for command_module in enabled_command_categories: + command_registry.import_command_module(command_module) return command_registry diff --git a/autogpt/models/command_registry.py b/autogpt/models/command_registry.py index f54f4adb5030..9dfb35bd3aea 100644 --- a/autogpt/models/command_registry.py +++ b/autogpt/models/command_registry.py @@ -1,6 +1,13 @@ +from __future__ import annotations + import importlib import inspect -from typing import Any +from dataclasses import dataclass, field +from types import ModuleType +from typing import TYPE_CHECKING, Any + +if TYPE_CHECKING: + from autogpt.config import Config from autogpt.command_decorator import AUTO_GPT_COMMAND_IDENTIFIER from autogpt.logs import logger @@ -18,9 +25,21 @@ class CommandRegistry: commands: dict[str, Command] commands_aliases: dict[str, Command] + # Alternative way to structure the registry; currently redundant with self.commands + categories: dict[str, CommandCategory] + + @dataclass + class CommandCategory: + name: str + title: str + description: str + commands: list[Command] = field(default_factory=list[Command]) + modules: list[ModuleType] = field(default_factory=list[ModuleType]) + def __init__(self): self.commands = {} self.commands_aliases = {} + self.categories = {} def __contains__(self, command_name: str): return command_name in self.commands or command_name in self.commands_aliases @@ -84,7 +103,41 @@ def command_prompt(self) -> str: ] return "\n".join(commands_list) - def import_commands(self, module_name: str) -> None: + @staticmethod + def with_command_modules(modules: list[str], config: Config) -> CommandRegistry: + new_registry = CommandRegistry() + + logger.debug( + f"The following command categories are disabled: {config.disabled_command_categories}" + ) + enabled_command_modules = [ + x for x in modules if x not in config.disabled_command_categories + ] + + logger.debug( + f"The following command categories are enabled: {enabled_command_modules}" + ) + + for command_module in enabled_command_modules: + new_registry.import_command_module(command_module) + + # Unregister commands that are incompatible with the current config + incompatible_commands: list[Command] = [] + for command in new_registry.commands.values(): + if callable(command.enabled) and not command.enabled(config): + command.enabled = False + incompatible_commands.append(command) + + for command in incompatible_commands: + new_registry.unregister(command) + logger.debug( + f"Unregistering incompatible command: {command.name}, " + f"reason - {command.disabled_reason or 'Disabled by current config.'}" + ) + + return new_registry + + def import_command_module(self, module_name: str) -> None: """ Imports the specified Python module containing command plugins. @@ -99,16 +152,42 @@ def import_commands(self, module_name: str) -> None: module = importlib.import_module(module_name) + category = self.register_module_category(module) + for attr_name in dir(module): attr = getattr(module, attr_name) + + command = None + # Register decorated functions - if hasattr(attr, AUTO_GPT_COMMAND_IDENTIFIER) and getattr( - attr, AUTO_GPT_COMMAND_IDENTIFIER - ): - self.register(attr.command) + if getattr(attr, AUTO_GPT_COMMAND_IDENTIFIER, False): + command = attr.command + # Register command classes elif ( inspect.isclass(attr) and issubclass(attr, Command) and attr != Command ): - cmd_instance = attr() - self.register(cmd_instance) + command = attr() + + if command: + self.register(command) + category.commands.append(command) + + def register_module_category(self, module: ModuleType) -> CommandCategory: + if not (category_name := getattr(module, "COMMAND_CATEGORY", None)): + raise ValueError(f"Cannot import invalid command module {module.__name__}") + + if category_name not in self.categories: + self.categories[category_name] = CommandRegistry.CommandCategory( + name=category_name, + title=getattr( + module, "COMMAND_CATEGORY_TITLE", category_name.capitalize() + ), + description=getattr(module, "__doc__", ""), + ) + + category = self.categories[category_name] + if module not in category.modules: + category.modules.append(module) + + return category diff --git a/tests/mocks/mock_commands.py b/tests/mocks/mock_commands.py index 278894c4d096..3758c1da2bbb 100644 --- a/tests/mocks/mock_commands.py +++ b/tests/mocks/mock_commands.py @@ -1,5 +1,7 @@ from autogpt.command_decorator import command +COMMAND_CATEGORY = "mock" + @command( "function_based", diff --git a/tests/unit/test_commands.py b/tests/unit/test_commands.py index 2cdf8701a69b..57de732a626b 100644 --- a/tests/unit/test_commands.py +++ b/tests/unit/test_commands.py @@ -193,7 +193,7 @@ def test_import_mock_commands_module(): registry = CommandRegistry() mock_commands_module = "tests.mocks.mock_commands" - registry.import_commands(mock_commands_module) + registry.import_command_module(mock_commands_module) assert "function_based" in registry assert registry.commands["function_based"].name == "function_based" @@ -219,7 +219,7 @@ def test_import_temp_command_file_module(tmp_path: Path): sys.path.append(str(tmp_path)) temp_commands_module = "mock_commands" - registry.import_commands(temp_commands_module) + registry.import_command_module(temp_commands_module) # Remove the temp directory from sys.path sys.path.remove(str(tmp_path)) From c1567c22f5cf7f715b025293adc92acab1adabdd Mon Sep 17 00:00:00 2001 From: Luke <2609441+lc0rp@users.noreply.github.com> Date: Sun, 30 Jul 2023 14:51:50 -0400 Subject: [PATCH 14/24] Do not load disabled commands (faster exec & benchmark runs) (#5078) * Use modern material theme for docs * Do not load disabled commands * black . --------- Co-authored-by: lc0rp <2609411+lc0rp@users.noreply.github.com> --- agbenchmark/benchmarks.py | 12 +----------- autogpt/core/runner/cli_web_app/server/api.py | 14 ++------------ benchmarks.py | 12 +----------- 3 files changed, 4 insertions(+), 34 deletions(-) diff --git a/agbenchmark/benchmarks.py b/agbenchmark/benchmarks.py index b7fc8d3f62d1..6a646f370491 100644 --- a/agbenchmark/benchmarks.py +++ b/agbenchmark/benchmarks.py @@ -26,7 +26,7 @@ def bootstrap_agent(task, continuous_mode) -> Agent: config.continuous_mode = continuous_mode config.temperature = 0 config.plain_output = True - command_registry = get_command_registry(config) + command_registry = CommandRegistry.with_command_modules(COMMAND_CATEGORIES, config) config.memory_backend = "no_memory" config.workspace_path = Workspace.init_workspace_directory(config) config.file_logger_path = Workspace.build_file_logger_path(config.workspace_path) @@ -45,16 +45,6 @@ def bootstrap_agent(task, continuous_mode) -> Agent: ) -def get_command_registry(config: Config): - command_registry = CommandRegistry() - enabled_command_categories = [ - x for x in COMMAND_CATEGORIES if x not in config.disabled_command_categories - ] - for command_module in enabled_command_categories: - command_registry.import_command_module(command_module) - return command_registry - - if __name__ == "__main__": # The first argument is the script name itself, second is the task if len(sys.argv) != 2: diff --git a/autogpt/core/runner/cli_web_app/server/api.py b/autogpt/core/runner/cli_web_app/server/api.py index 2f19d62446a3..7a5ae9a74f58 100644 --- a/autogpt/core/runner/cli_web_app/server/api.py +++ b/autogpt/core/runner/cli_web_app/server/api.py @@ -6,7 +6,7 @@ from autogpt.agents import Agent from autogpt.app.main import UserFeedback from autogpt.commands import COMMAND_CATEGORIES -from autogpt.config import AIConfig, Config, ConfigBuilder +from autogpt.config import AIConfig, ConfigBuilder from autogpt.logs import logger from autogpt.memory.vector import get_memory from autogpt.models.command_registry import CommandRegistry @@ -85,7 +85,7 @@ def bootstrap_agent(task, continuous_mode) -> Agent: config.continuous_mode = continuous_mode config.temperature = 0 config.plain_output = True - command_registry = get_command_registry(config) + command_registry = CommandRegistry.with_command_modules(COMMAND_CATEGORIES, config) config.memory_backend = "no_memory" config.workspace_path = Workspace.init_workspace_directory(config) config.file_logger_path = Workspace.build_file_logger_path(config.workspace_path) @@ -102,13 +102,3 @@ def bootstrap_agent(task, continuous_mode) -> Agent: config=config, triggering_prompt=DEFAULT_TRIGGERING_PROMPT, ) - - -def get_command_registry(config: Config): - command_registry = CommandRegistry() - enabled_command_categories = [ - x for x in COMMAND_CATEGORIES if x not in config.disabled_command_categories - ] - for command_module in enabled_command_categories: - command_registry.import_command_module(command_module) - return command_registry diff --git a/benchmarks.py b/benchmarks.py index 9cf93acaef4f..62f89662e51c 100644 --- a/benchmarks.py +++ b/benchmarks.py @@ -22,7 +22,7 @@ def bootstrap_agent(task): config.continuous_mode = False config.temperature = 0 config.plain_output = True - command_registry = get_command_registry(config) + command_registry = CommandRegistry.with_command_modules(COMMAND_CATEGORIES, config) config.memory_backend = "no_memory" config.workspace_path = Workspace.init_workspace_directory(config) config.file_logger_path = Workspace.build_file_logger_path(config.workspace_path) @@ -39,13 +39,3 @@ def bootstrap_agent(task): config=config, triggering_prompt=DEFAULT_TRIGGERING_PROMPT, ) - - -def get_command_registry(config: Config): - command_registry = CommandRegistry() - enabled_command_categories = [ - x for x in COMMAND_CATEGORIES if x not in config.disabled_command_categories - ] - for command_category in enabled_command_categories: - command_registry.import_commands(command_category) - return command_registry From f528b9a9956c1e5317d0eacc19da8292895127bc Mon Sep 17 00:00:00 2001 From: Auto-GPT-Bot Date: Sun, 30 Jul 2023 18:55:25 +0000 Subject: [PATCH 15/24] Update cassette submodule --- tests/Auto-GPT-test-cassettes | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/Auto-GPT-test-cassettes b/tests/Auto-GPT-test-cassettes index f50b6bfc8853..0e4b46dc5155 160000 --- a/tests/Auto-GPT-test-cassettes +++ b/tests/Auto-GPT-test-cassettes @@ -1 +1 @@ -Subproject commit f50b6bfc88530aeadcd16001b70cc21e5258a454 +Subproject commit 0e4b46dc515585902eaae068dcbc3f182dd263ba From b7f1df3e1d397edb4f3a7168a929dc762280f597 Mon Sep 17 00:00:00 2001 From: Reinier van der Leer Date: Mon, 31 Jul 2023 01:21:09 +0200 Subject: [PATCH 16/24] Fix execute_shell_popen --- autogpt/commands/execute_code.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/autogpt/commands/execute_code.py b/autogpt/commands/execute_code.py index 30e1e27ea8ca..3d52eb0a58b8 100644 --- a/autogpt/commands/execute_code.py +++ b/autogpt/commands/execute_code.py @@ -255,9 +255,9 @@ def execute_shell(command_line: str, agent: Agent) -> str: "execute_shell_popen", "Executes a Shell Command, non-interactive commands only", { - "query": { + "command_line": { "type": "string", - "description": "The search query", + "description": "The command line to execute", "required": True, } }, From 3651d22147825c8ebff6b98112fa13f33414e4d5 Mon Sep 17 00:00:00 2001 From: Reinier van der Leer Date: Mon, 31 Jul 2023 17:43:41 +0200 Subject: [PATCH 17/24] Verify model compatibility if OPENAI_FUNCTIONS is set (#5075) Co-authored-by: Luke <2609441+lc0rp@users.noreply.github.com> --- autogpt/command_decorator.py | 8 ++++++-- autogpt/config/config.py | 10 ++++++++++ autogpt/llm/base.py | 2 ++ autogpt/llm/providers/openai.py | 4 ++++ autogpt/models/command.py | 7 +++++-- 5 files changed, 27 insertions(+), 4 deletions(-) diff --git a/autogpt/command_decorator.py b/autogpt/command_decorator.py index d082d9bf5d7f..9a6f58ae194e 100644 --- a/autogpt/command_decorator.py +++ b/autogpt/command_decorator.py @@ -1,7 +1,11 @@ +from __future__ import annotations + import functools -from typing import Any, Callable, Optional, TypedDict +from typing import TYPE_CHECKING, Any, Callable, Optional, TypedDict + +if TYPE_CHECKING: + from autogpt.config import Config -from autogpt.config import Config from autogpt.models.command import Command, CommandParameter # Unique identifier for auto-gpt commands diff --git a/autogpt/config/config.py b/autogpt/config/config.py index c0d30910239a..93fc42e914ba 100644 --- a/autogpt/config/config.py +++ b/autogpt/config/config.py @@ -13,6 +13,7 @@ from pydantic import Field, validator from autogpt.core.configuration.schema import Configurable, SystemSettings +from autogpt.llm.providers.openai import OPEN_AI_CHAT_MODELS from autogpt.plugins.plugins_config import PluginsConfig AI_SETTINGS_FILE = "ai_settings.yaml" @@ -147,6 +148,15 @@ def validate_plugins(cls, p: AutoGPTPluginTemplate | Any): ), f"Plugins must subclass AutoGPTPluginTemplate; {p} is a template instance" return p + @validator("openai_functions") + def validate_openai_functions(cls, v: bool, values: dict[str, Any]): + if v: + smart_llm = values["smart_llm"] + assert OPEN_AI_CHAT_MODELS[smart_llm].supports_functions, ( + f"Model {smart_llm} does not support OpenAI Functions. " + "Please disable OPENAI_FUNCTIONS or choose a suitable model." + ) + def get_openai_credentials(self, model: str) -> dict[str, str]: credentials = { "api_key": self.openai_api_key, diff --git a/autogpt/llm/base.py b/autogpt/llm/base.py index 14a146b3c5c7..1ac00112d931 100644 --- a/autogpt/llm/base.py +++ b/autogpt/llm/base.py @@ -67,6 +67,8 @@ class CompletionModelInfo(ModelInfo): class ChatModelInfo(CompletionModelInfo): """Struct for chat model information.""" + supports_functions: bool = False + @dataclass class TextModelInfo(CompletionModelInfo): diff --git a/autogpt/llm/providers/openai.py b/autogpt/llm/providers/openai.py index 6e7461428327..35c652f0587e 100644 --- a/autogpt/llm/providers/openai.py +++ b/autogpt/llm/providers/openai.py @@ -36,12 +36,14 @@ prompt_token_cost=0.0015, completion_token_cost=0.002, max_tokens=4096, + supports_functions=True, ), ChatModelInfo( name="gpt-3.5-turbo-16k-0613", prompt_token_cost=0.003, completion_token_cost=0.004, max_tokens=16384, + supports_functions=True, ), ChatModelInfo( name="gpt-4-0314", @@ -54,6 +56,7 @@ prompt_token_cost=0.03, completion_token_cost=0.06, max_tokens=8191, + supports_functions=True, ), ChatModelInfo( name="gpt-4-32k-0314", @@ -66,6 +69,7 @@ prompt_token_cost=0.06, completion_token_cost=0.12, max_tokens=32768, + supports_functions=True, ), ] } diff --git a/autogpt/models/command.py b/autogpt/models/command.py index 614697861efb..a7cec509f1b7 100644 --- a/autogpt/models/command.py +++ b/autogpt/models/command.py @@ -1,6 +1,9 @@ -from typing import Any, Callable, Optional +from __future__ import annotations -from autogpt.config import Config +from typing import TYPE_CHECKING, Any, Callable, Optional + +if TYPE_CHECKING: + from autogpt.config import Config from .command_parameter import CommandParameter From ad18f77e25425c483332098d3c51f098bc032cb7 Mon Sep 17 00:00:00 2001 From: Luke <2609441+lc0rp@users.noreply.github.com> Date: Tue, 1 Aug 2023 01:47:32 -0400 Subject: [PATCH 18/24] fix: Nonetype error from command_name.startswith() (#5079) Co-authored-by: lc0rp <2609411+lc0rp@users.noreply.github.com> Co-authored-by: James Collins --- autogpt/app/main.py | 34 ++++++++++++++------------- tests/integration/test_update_user.py | 33 ++++++++++++++++++++++++++ 2 files changed, 51 insertions(+), 16 deletions(-) create mode 100644 tests/integration/test_update_user.py diff --git a/autogpt/app/main.py b/autogpt/app/main.py index f8ac3ca4b0a6..e7d51ce6361c 100644 --- a/autogpt/app/main.py +++ b/autogpt/app/main.py @@ -343,23 +343,25 @@ def update_user( print_assistant_thoughts(ai_config.ai_name, assistant_reply_dict, config) if command_name is not None: - if config.speak_mode: - say_text(f"I want to execute {command_name}", config) + if command_name.lower().startswith("error"): + logger.typewriter_log( + "ERROR: ", + Fore.RED, + f"The Agent failed to select an action. " + f"Error message: {command_name}", + ) + else: + if config.speak_mode: + say_text(f"I want to execute {command_name}", config) - # First log new-line so user can differentiate sections better in console - logger.typewriter_log("\n") - logger.typewriter_log( - "NEXT ACTION: ", - Fore.CYAN, - f"COMMAND = {Fore.CYAN}{remove_ansi_escape(command_name)}{Style.RESET_ALL} " - f"ARGUMENTS = {Fore.CYAN}{command_args}{Style.RESET_ALL}", - ) - elif command_name.lower().startswith("error"): - logger.typewriter_log( - "ERROR: ", - Fore.RED, - f"The Agent failed to select an action. " f"Error message: {command_name}", - ) + # First log new-line so user can differentiate sections better in console + logger.typewriter_log("\n") + logger.typewriter_log( + "NEXT ACTION: ", + Fore.CYAN, + f"COMMAND = {Fore.CYAN}{remove_ansi_escape(command_name)}{Style.RESET_ALL} " + f"ARGUMENTS = {Fore.CYAN}{command_args}{Style.RESET_ALL}", + ) else: logger.typewriter_log( "NO ACTION SELECTED: ", diff --git a/tests/integration/test_update_user.py b/tests/integration/test_update_user.py new file mode 100644 index 000000000000..bc9206317513 --- /dev/null +++ b/tests/integration/test_update_user.py @@ -0,0 +1,33 @@ +from unittest.mock import MagicMock, patch + +from colorama import Fore + +from autogpt.app.main import update_user + + +def test_update_user_command_name_is_none() -> None: + # Mock necessary objects + config = MagicMock() + ai_config = MagicMock() + assistant_reply_dict = MagicMock() + + # Mock print_assistant_thoughts and logger.typewriter_log + with patch( + "autogpt.app.main.print_assistant_thoughts" + ) as mock_print_assistant_thoughts, patch( + "autogpt.app.main.logger.typewriter_log" + ) as mock_logger_typewriter_log: + # Test the update_user function with None command_name + update_user(config, ai_config, None, None, assistant_reply_dict) + + # Check that print_assistant_thoughts was called once + mock_print_assistant_thoughts.assert_called_once_with( + ai_config.ai_name, assistant_reply_dict, config + ) + + # Check that logger.typewriter_log was called once with expected arguments + mock_logger_typewriter_log.assert_called_once_with( + "NO ACTION SELECTED: ", + Fore.RED, + f"The Agent failed to select an action.", + ) From c8914ebb66e5147b42f210898abd480c64563c52 Mon Sep 17 00:00:00 2001 From: Cyrus <39694513+cyrus-hawk@users.noreply.github.com> Date: Tue, 1 Aug 2023 18:39:19 +0300 Subject: [PATCH 19/24] slips of the pen (bloopers) in autogpt/core part of the repo (#5045) * fix omitted "()" in __str__(self) in core/ability/base.py * put back async keyword in the base class * Remove extra () in OpenAISettings class in autogpt/core/resourece/model_providers/openai.py --------- Co-authored-by: James Collins Co-authored-by: Nicholas Tindle --- autogpt/core/ability/base.py | 8 ++++---- autogpt/core/resource/model_providers/openai.py | 2 +- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/autogpt/core/ability/base.py b/autogpt/core/ability/base.py index ac26f0267151..1faaaf3327d4 100644 --- a/autogpt/core/ability/base.py +++ b/autogpt/core/ability/base.py @@ -1,6 +1,6 @@ import abc from pprint import pformat -from typing import ClassVar +from typing import Any, ClassVar import inflection from pydantic import Field @@ -50,11 +50,11 @@ def required_arguments(cls) -> list[str]: return [] @abc.abstractmethod - async def __call__(self, *args, **kwargs) -> AbilityResult: + async def __call__(self, *args: Any, **kwargs: Any) -> AbilityResult: ... def __str__(self) -> str: - return pformat(self.dump) + return pformat(self.dump()) def dump(self) -> dict: return { @@ -88,5 +88,5 @@ def get_ability(self, ability_name: str) -> Ability: ... @abc.abstractmethod - def perform(self, ability_name: str, **kwargs) -> AbilityResult: + async def perform(self, ability_name: str, **kwargs: Any) -> AbilityResult: ... diff --git a/autogpt/core/resource/model_providers/openai.py b/autogpt/core/resource/model_providers/openai.py index 3707796a134a..df7bdb83bafb 100644 --- a/autogpt/core/resource/model_providers/openai.py +++ b/autogpt/core/resource/model_providers/openai.py @@ -109,7 +109,7 @@ class OpenAIModelProviderBudget(ModelProviderBudget): class OpenAISettings(ModelProviderSettings): configuration: OpenAIConfiguration - credentials: ModelProviderCredentials() + credentials: ModelProviderCredentials budget: OpenAIModelProviderBudget From fc6255296a2ff340a94e53f4f7fb9e861102eba0 Mon Sep 17 00:00:00 2001 From: merwanehamadi Date: Tue, 1 Aug 2023 09:13:37 -0700 Subject: [PATCH 20/24] Add information on how to improve Auto-GPT with agbenchmark (#5056) * Add information on how to improve Auto-GPT with agbenchmark * Update introduction.md * Update docs/challenges/introduction.md Co-authored-by: Reinier van der Leer * Update introduction.md --------- Co-authored-by: James Collins Co-authored-by: Reinier van der Leer --- docs/challenges/introduction.md | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/docs/challenges/introduction.md b/docs/challenges/introduction.md index 63f570f268be..256a82385071 100644 --- a/docs/challenges/introduction.md +++ b/docs/challenges/introduction.md @@ -21,3 +21,15 @@ There are two main ways to get involved with challenges: To learn more about submitting and beating challenges, please visit the [List of Challenges](list.md), [Submit a Challenge](submit.md), and [Beat a Challenge](beat.md) pages. We look forward to your contributions and the exciting solutions that the community will develop together to make Auto-GPT even better! + +!!! warning + + We're slowly transitioning to agbenchmark. agbenchmark is a simpler way to improve Auto-GPT. Simply run: + + ``` + agbenchmark start + ``` + + and beat as many challenges as possible. + +For more agbenchmark options, look at the [readme](https://github.com/Significant-Gravitas/Auto-GPT-Benchmarks/tree/master/agbenchmark). From 7cd407b7b4a9f4395761e772335e859e40e8c3d3 Mon Sep 17 00:00:00 2001 From: Luke <2609441+lc0rp@users.noreply.github.com> Date: Tue, 1 Aug 2023 13:17:33 -0400 Subject: [PATCH 21/24] Use modern material theme for docs (#5035) * Use modern material theme for docs * Update mkdocs.yml Added search plugin Co-authored-by: James Collins * Updating mkdocs material theme config per recommendations to enable all markdown options * Updated highlight extension settings and codeblocks throughout the docs to align with mkdocs-material recommendations. codehilite is deprecated in favor of the highlight extension: https://squidfunk.github.io/mkdocs-material/setup/extensions/python-markdown-extensions/#highlight --------- Co-authored-by: lc0rp <2609411+lc0rp@users.noreply.github.com> Co-authored-by: James Collins Co-authored-by: Nicholas Tindle --- docs/_javascript/mathjax.js | 16 ++++ docs/_javascript/tablesort.js | 6 ++ docs/challenges/memory/challenge_b.md | 5 +- docs/challenges/memory/challenge_c.md | 28 ++++-- docs/challenges/memory/challenge_d.md | 19 ++-- docs/configuration/imagegen.md | 12 ++- docs/configuration/memory.md | 24 ++--- docs/configuration/voice.md | 2 +- docs/imgs/Auto_GPT_Logo.png | Bin 0 -> 26841 bytes docs/setup.md | 113 +++++++++++++----------- docs/share-your-logs.md | 2 +- docs/testing.md | 25 +++--- docs/usage.md | 42 +++++---- mkdocs.yml | 121 ++++++++++++++++++++------ requirements.txt | 2 + tests/unit/test_config.py | 17 ++-- 16 files changed, 291 insertions(+), 143 deletions(-) create mode 100644 docs/_javascript/mathjax.js create mode 100644 docs/_javascript/tablesort.js create mode 100644 docs/imgs/Auto_GPT_Logo.png diff --git a/docs/_javascript/mathjax.js b/docs/_javascript/mathjax.js new file mode 100644 index 000000000000..a80ddbff75af --- /dev/null +++ b/docs/_javascript/mathjax.js @@ -0,0 +1,16 @@ +window.MathJax = { + tex: { + inlineMath: [["\\(", "\\)"]], + displayMath: [["\\[", "\\]"]], + processEscapes: true, + processEnvironments: true + }, + options: { + ignoreHtmlClass: ".*|", + processHtmlClass: "arithmatex" + } +}; + +document$.subscribe(() => { + MathJax.typesetPromise() +}) \ No newline at end of file diff --git a/docs/_javascript/tablesort.js b/docs/_javascript/tablesort.js new file mode 100644 index 000000000000..ee04e9008218 --- /dev/null +++ b/docs/_javascript/tablesort.js @@ -0,0 +1,6 @@ +document$.subscribe(function () { + var tables = document.querySelectorAll("article table:not([class])") + tables.forEach(function (table) { + new Tablesort(table) + }) +}) \ No newline at end of file diff --git a/docs/challenges/memory/challenge_b.md b/docs/challenges/memory/challenge_b.md index abc6da6bef89..7df7044ff39a 100644 --- a/docs/challenges/memory/challenge_b.md +++ b/docs/challenges/memory/challenge_b.md @@ -3,9 +3,10 @@ **Status**: Current level to beat: level 3 **Command to try**: -``` + +```shell pytest -s tests/challenges/memory/test_memory_challenge_b.py --level=3 -`` +``` ## Description diff --git a/docs/challenges/memory/challenge_c.md b/docs/challenges/memory/challenge_c.md index e197ddbd2c68..955c823f31cd 100644 --- a/docs/challenges/memory/challenge_c.md +++ b/docs/challenges/memory/challenge_c.md @@ -3,9 +3,10 @@ **Status**: Current level to beat: level 1 **Command to try**: -``` + +```shell pytest -s tests/challenges/memory/test_memory_challenge_c.py --level=2 -`` +``` ## Description @@ -20,13 +21,23 @@ The challenge involves a series of text files containing instructions and silly - `instructions_1.txt` -The current phrase is "The purple elephant danced on a rainbow while eating a taco.".\n[NOISE intended to confuse the agent] -Read the file instructions_2.txt using the read_file command. +The current phrase is + +``` +The purple elephant danced on a rainbow while eating a taco.\n[NOISE intended to confuse the agent] +``` + +Read the file `instructions_2.txt` using the read_file command. - `instructions_2.txt` -The current phrase is "The sneaky toaster stole my socks and ran away to Hawaii.".\n[NOISE intended to confuse the agent] +The current phrase is + +``` +The sneaky toaster stole my socks and ran away to Hawaii.\n[NOISE intended to confuse the agent] +``` + Read the file instructions_3.txt using the read_file command. @@ -34,7 +45,12 @@ Read the file instructions_3.txt using the read_file command. - `instructions_n.txt` -The current phrase is "My pet rock sings better than Beyoncé on Tuesdays." +The current phrase is + +``` +My pet rock sings better than Beyoncé on Tuesdays. +``` + Write all the phrases into the file output.txt. The file has not been created yet. After that, use the task_complete command. diff --git a/docs/challenges/memory/challenge_d.md b/docs/challenges/memory/challenge_d.md index 7563cce5939d..5ecec045e01a 100644 --- a/docs/challenges/memory/challenge_d.md +++ b/docs/challenges/memory/challenge_d.md @@ -1,11 +1,12 @@ -# Memory Challenge C +# Memory Challenge D **Status**: Current level to beat: level 1 **Command to try**: -``` + +```shell pytest -s tests/challenges/memory/test_memory_challenge_d.py --level=1 -`` +``` ## Description @@ -30,13 +31,16 @@ The test runs for levels up to the maximum level that the AI has successfully be - `instructions_1.txt` -"Sally has a marble (marble A) and she puts it in her basket (basket S), then leaves the room. Anne moves marble A from Sally's basket (basket S) to her own basket (basket A).", +``` +Sally has a marble (marble A) and she puts it in her basket (basket S), then leaves the room. Anne moves marble A from Sally's basket (basket S) to her own basket (basket A). +``` - `instructions_2.txt` -"Sally gives a new marble (marble B) to Bob who is outside with her. Bob goes into the room and places marble B into Anne's basket (basket A). Anne tells Bob to tell Sally that he lost the marble b. Bob leaves the room and speaks to Sally about the marble B. Meanwhile, after Bob left the room, Anne moves marble A into the green box, but tells Charlie to tell Sally that marble A is under the sofa. Charlie leaves the room and speak to Sally about the marble A as instructed by Anne.", - +``` +Sally gives a new marble (marble B) to Bob who is outside with her. Bob goes into the room and places marble B into Anne's basket (basket A). Anne tells Bob to tell Sally that he lost the marble b. Bob leaves the room and speaks to Sally about the marble B. Meanwhile, after Bob left the room, Anne moves marble A into the green box, but tells Charlie to tell Sally that marble A is under the sofa. Charlie leaves the room and speak to Sally about the marble A as instructed by Anne. +``` ...and so on. @@ -44,6 +48,7 @@ The test runs for levels up to the maximum level that the AI has successfully be The expected believes of every characters are given in a list: +```json expected_beliefs = { 1: { 'Sally': { @@ -68,7 +73,7 @@ expected_beliefs = { 'A': 'sofa', # Because Anne told him to tell Sally so } },... - +``` ## Objective diff --git a/docs/configuration/imagegen.md b/docs/configuration/imagegen.md index 38fdcebb28bf..1a10d61d2827 100644 --- a/docs/configuration/imagegen.md +++ b/docs/configuration/imagegen.md @@ -7,7 +7,8 @@ ## DALL-e In `.env`, make sure `IMAGE_PROVIDER` is commented (or set to `dalle`): -``` ini + +```ini # IMAGE_PROVIDER=dalle # this is the default ``` @@ -23,7 +24,8 @@ To use text-to-image models from Hugging Face, you need a Hugging Face API token Link to the appropriate settings page: [Hugging Face > Settings > Tokens](https://huggingface.co/settings/tokens) Once you have an API token, uncomment and adjust these variables in your `.env`: -``` ini + +```ini IMAGE_PROVIDER=huggingface HUGGINGFACE_API_TOKEN=your-huggingface-api-token ``` @@ -39,7 +41,8 @@ Further optional configuration: ## Stable Diffusion WebUI It is possible to use your own self-hosted Stable Diffusion WebUI with Auto-GPT: -``` ini + +```ini IMAGE_PROVIDER=sdwebui ``` @@ -54,6 +57,7 @@ Further optional configuration: | `SD_WEBUI_AUTH` | `{username}:{password}` | *Note: do not copy the braces!* | ## Selenium -``` shell + +```shell sudo Xvfb :10 -ac -screen 0 1024x768x24 & DISPLAY=:10 ``` diff --git a/docs/configuration/memory.md b/docs/configuration/memory.md index 9d18f5ba2aca..3fa908b26169 100644 --- a/docs/configuration/memory.md +++ b/docs/configuration/memory.md @@ -51,17 +51,19 @@ Links to memory backends 1. Launch Redis container - :::shell - docker run -d --name redis-stack-server -p 6379:6379 redis/redis-stack-server:latest + ```shell + docker run -d --name redis-stack-server -p 6379:6379 redis/redis-stack-server:latest + ``` 3. Set the following settings in `.env` - :::ini - MEMORY_BACKEND=redis - REDIS_HOST=localhost - REDIS_PORT=6379 - REDIS_PASSWORD= - + ```shell + MEMORY_BACKEND=redis + REDIS_HOST=localhost + REDIS_PORT=6379 + REDIS_PASSWORD= + ``` + Replace `` by your password, omitting the angled brackets (<>). Optional configuration: @@ -157,7 +159,7 @@ To enable it, set `USE_WEAVIATE_EMBEDDED` to `True` and make sure you `pip insta Install the Weaviate client before usage. -``` shell +```shell $ pip install weaviate-client ``` @@ -165,7 +167,7 @@ $ pip install weaviate-client In your `.env` file set the following: -``` ini +```ini MEMORY_BACKEND=weaviate WEAVIATE_HOST="127.0.0.1" # the IP or domain of the running Weaviate instance WEAVIATE_PORT="8080" @@ -195,7 +197,7 @@ View memory usage by using the `--debug` flag :) Memory pre-seeding allows you to ingest files into memory and pre-seed it before running Auto-GPT. -``` shell +```shell $ python data_ingestion.py -h usage: data_ingestion.py [-h] (--file FILE | --dir DIR) [--init] [--overlap OVERLAP] [--max_length MAX_LENGTH] diff --git a/docs/configuration/voice.md b/docs/configuration/voice.md index 728fbaf5fd43..654d2ee45b43 100644 --- a/docs/configuration/voice.md +++ b/docs/configuration/voice.md @@ -2,7 +2,7 @@ Enter this command to use TTS _(Text-to-Speech)_ for Auto-GPT -``` shell +```shell python -m autogpt --speak ``` diff --git a/docs/imgs/Auto_GPT_Logo.png b/docs/imgs/Auto_GPT_Logo.png new file mode 100644 index 0000000000000000000000000000000000000000..9c60eea982b46802df3094fc11ca76824631f089 GIT binary patch literal 26841 zcmeFZXH*ki*e<*iqzFhoq$-ah9nnyv3yPG`drMFO3kZr7DFRB7gd)9@ zAYG(Lix4S-)X;lNa)#%;-*?{c$9L9x*E&DWI_qRDNM>fwzW2WO%sqFxu6b#or*Vw= z3^M@WnC6{-3;{R-{e;glF+g9_!x2W%*HO1Srk((>upj=xKvD`fv=ioOsBse%bn(tX z8%OMK=-vRJD4ONKh8}7XY`G(FXL6{~Q@E3ziPYyIVb+Hn9l&g*Sd^j^U0;kJ;Q5tZLUyss9GF%$Z;(#Myi zFFYaPueG*{@+Pt{g74=5NK7xq;(g|N%jC!L!-Jaw6P;AYuK|dIUQMYUSu`qXT7*|X zV8{|kS-=72s7d^4&H$yl{*>|N=rNeW1fFTC_Qdu(OR@1rCuQ#%E(47F zNdO|77RIfGM@#!0X+D74fR*meaQ5zX23Q0P^Ann%oy*l#T4iyeK${Mh`Na)_@Kflt z!~K`y_KeA7s}DPLRqRJVkv%r---KU}jQ9gVMQAcQV$o}) z-v^a0Bm^DdMe}iM$#9T%1QbI`X~TG+tsUb@@SA?B3@{BCMvMsn2D<@)#Onh9>_Wm= z?aGKW9vqd>pO1x&{^SAHCf*?3*LZ7NH7`0EFSpC66|>~+O$t$cy!bJjHi6nQq!Q&1e7w%h$x9VRl+fOxlR%2q41j>Izn5Lt5MSRf zh0T3TeEJipxk63(k{6e0MJg)Sb{YMCe69zKj3gMw5*pw2Qs*sWWW;KpDHzv8Ud7$J zsK%;A2U5*gfY-$LhK5#mw|(eP7#3SyzKW+BS8k`#b}c}<45SM-F=5)-1$xkx$!ufB z1bW1wMWmO3Bq-km7?+~}NW`?O5_ZMZF|?g%e^wav45&1K&SQCZhB~y4a0}`92%wfM z#!wfit+^c1=H6Zf4DM?2`I34;|a zXBd$Ge1Fd>VnrQVZdA<^?7X-c3|K~q17QDti^2%Pjgc~kdr|=yXDwJ*Mn4%-Ra2Qz z_76O@+zG?Tl+c6mi$+Hn z&^t*#egXyzO1%!OIKLSIYi#-Bz`zoK0YJnYiJ}KWx&i>h4hQK^ypbrz|Fnr@+M?0v zm^l2>I&N7y(Eq)^DKeN4yI0C=xMr-t!xcK)&he+8-g z6|y3~K~U}N?{yf|x?zFFC~}A7LXY`&6kr|>cvF&r+KH4SKo9`tY)CpU(n4{IQ{*Eg zsdr&(vH#cujVDlV|Gm95Wj)OdhWYekYpWct)&TYQb}+eRURBLO!op*j@fEa$ulfrL zUFpHA;_Ph}19@n|ndJ`UaB#~3r$rA8WT73|)z_Ggi0G24x9M&{PuyFE(7yC)YA_H< zgoGMe5b;L=$Hj2G>k-UTXpHBso)w{^7tzUr1Cf-&28Y^9m;Gjy{sbECeTGnw_4CN_ ze-CvnY@wjJpHyJ@@7vvVfeaWBmbAF&&j6aCt8&WW%rjMt$A4@!n{q*qRRcj7^ndEt zuV2g{3~JNtKqOYcp@-USXt&yx!KBBR0f!c}o#V|W3jqCLHz)qz^O~RFw3t{qv=7@V z+z*A(Lo=Gn#pXlXVTY}N8l-h-O+FW(1pxEK!#x-{6TnQAo>7D54aqi0sV+c6XQfL) zLZSI#4x2nI)m^_0MBX3%_P8IXBBfSz*bOiMhl2$iYKP-Nk>u$9W1P^qAba)yNc+#t zpebOndysMu`-0Y3@bLfP5TH*w_#YzwYlqU6kemBZ5C3_N{~xLUlL7tTL;q}G3%HOKfbQy*GMWO2L1&AAr$1+t)A&EM0HYiQy7+7IW%Jz41U!NWUI+iKME@5S5 z`DN9kjyT6t!&@J0Tjd=-3dnVyOZjnmxy66#T;%;Nm|DQyK?#fDH?apelm#v0`iBkt zR(d^(yZ{8q%L2+vZ!f`oHs& zrVFpgDlwDEzO5eJZPFkycyG?ff=Ik+f#<0fXp(#=;>LleT%`JIxNJ$exCU(v$stLW zEJ|i1s(A1n%`F-9o-CyRJH}|8Vm>o-ig8PW_G=_q7<`Vywvam#d?z*!eBy+te8%C~ zVbeP^zft@u2LY=yssdp;kJy3IwKr&uzvEyC^mzJ;ej_NCc z8KgybG;we#98LA4Vy7JE8mj7x+^>$Z+pn5ozg3?RI3s!$iiN&Tc=HRdIc$nr%=jh{ zcUM;@kX+L7q)TWDIln9dQNdr+Rq?I8%(7HSMSp_09t$E{);{{|uKA?AKpUjytsy=s z=gO-(CQWzzzSPBd4De@3BR7{~BzsE4tV+2VWRaU$SkN}+I7UNc=%A13dU6Q(XtW~u zdA;i-#e?&Sz5kqj?)2yHi{=So=AJ5!g=Y75k75DFIE!dqJvxHCpYCfJQbrHCR$++Q zjRTV>E==X&5$G4rQGGN(cL~YRw)c?4E25*;h<<(i)}Q6?FH~>^BAV z;$Q+t%;?9xGoAK3rl}N3kNU2A+&v$~{_5bR)3NnCbNc9NevGpi*LyiRRsP8xSE$zt zAh@_%I5_BHEIjq;bDXvZU5d#T!qkd(O~+(F7M(l4_ZlCa$GYhYxv#D6Pmc@8E;EdW z84|c!?*!V`blIn&L&`#SRvNG5)u%?}gP7>Ik|L_o{V&g_G6GvYd&{O1SEOoRCQ$A1 z%zj3;m%5+TM_nBc9PTglG=o|DiMr)Mmz3iI%Xhp1(n@xFDY%zN$?5~vRrC0I>cIQ$ zsrln9jUU|R&`CSXx~OzfCJb8pGPRXV7LZWc6TvO@%v_x#|26{LKO6^WK8ascxDpgu zfY$XlPX1ojNDJd*fb!uXkv8=_9qW@FB57LZGuEV^6F1&?IBf#v;(in7Oo6I{Ky!7@MGRED(vW;$vX7HQvGR6m$)_+ia|x^-sjj=RUV8t z>{}q8T0Q|^wVu?AFWw5sL~&gy+jVoFY$3vCb!v$VO-I*_ZO#apE}@Oe=?$LB!|MA+ zYqmq$_gf2jc^ATB39cJSv5$KKVY5H_wW(Iw9ngSTK)i)@bo4#prMteiwM=_(HD4hP z*;7^XG$kx7@k^;xOgR%jgI)zKvuMJIyQFk{eY_$04E&~?*02`!oJAP$t^`6f{a04~ zbwlkuT$R=iWq$BjTHz#?W+p_VZP}&>X4RFgmMJq!!5j+FLHhVdYcyAf<0NO7dQMe} zbfhV=bG}n|QfU^`PQz!dy6|p?s6s~TW}r}N4cxjMkIpZ^y2A14CxOJ^g)_9W#m(L= zfX9l)G2hFRL=TUoP8_|+^_Y38?hO1}v-B?d&3*6w@?|lX(b|}Mwp~crSM%LMZ0OsNqIaLHf@u6>BdEK z?pPYv9i#?wbMY1uUh6|oaJ;y*pT0oLx(_GpMg?u6N98a6jnjRMm&!_PJ-Qh9I$EVV zhBL`9l5!o2pl9sY(6@6>rejC1FZ->PT>^T^ZuGG<8B-(p!ZO+I*v!{!y#+KBX;gQg zqD!?J@xZ(Bnn#={JZ=~n8`J%l7MHq3|3sa8im1m4Qo219W7=s$_Xor}8~V(ZQW`HC z11Mm4d}u0~`8T(#CH$5d-v9MY3#lH-D@60$7Sg4X7!jjCMfZa4!($XURk$t`!1-Gc zpUtH1(t|?#fjiWF!e!0L4)pKJRU<#$LUWjbRsBI&JI(3NOI-_eEc`vFediM#9C=yH zDkr(5owgel92TP~vx`?#U0hmfLRdw-J?zQF^lnH{-rEDGFWC9{xNeoeMIme<2N2t~ zzaM9~MSTA>*3q_+htKjxfLhbbF(2-??|&396r0C1Z<1g8c=q`KkB4hxp;y9zdr$>m z8j=K)FYy}*dUSV}cX+FDsD10DvClNHE+bk?x=D0IG&$9q7y(q%iycIHTp^>Xa! zy{$36C|EnD9gQ$-?oPZ3kihNQsW{0V&3go6mv_P0z&ftinO>shP+-#Ad}=bshTUm+ zUVdGI%CVl3sk`Wc^)B%W%JZ0lb${-c)eZ&p<6&Z#tn!Gt66iTmFqBp|QjW&;VoCQ9 zFE~t^;wt;ymJw&~>5?q1AdTMwAvQkyTY2@qk|Tk)f6B|~F^_s2F~Lu)JLH_o?#OtAF~&*!nUoK>p1QLE-F8^Q_)t9M@0tb0s>x{wpZ3uhvhU_GNo5XKYOx^#9`Bcw zk>1nibBI=3EWJY{RGS8rX=%oHr&eQ^RBF+*B~zE;xj#idK|y6G(&EmyIv#2N7h)Cg zg+;yq1A@%&ngk=m7I(+n%driS>U4$IPx0^g*ua$i{Ef|T z%ouYcV?ANY6=YpZ`IQ#R{SqT)zQi#$~K#8 z`rNVO_l8q<9{b6#{&FbCl1VVXOepPq^!!)ezaI_|5+faxGLsa*_a$kXJ(i}Fmjr)| z?v7$H*)_hT7Cp$2Le8Y0k|k;nBw36?x9*5?B)TPepxw`opW&-BlOK7ot0TmM@^sAl zi*lS*HfI@rJ26G#8D?apqE}^9itw!Y=am)L+}$y(vBqFhslcyHK^~laO9D({@K3ay zD?dZ=*V>u#Md7oa9quEeqw)d2)LIRZGis!bUVa+c;YL;W>H}tYQYjZ6g=eFC!O|g_ z+Ndf6xSwpIPvA|KCHp>v8uc66kL~#02|Sq2u#Xcma(l?zNt<4^DduX^DygdCy6t7h zJ*1sQ8`8g~9oypQ{=rHX_a?eZ;_rv&gN1c}EPJ*}? zv3plwKP019{TiQz8#hC_l2w4`dValt1nv?hAB35)*sL&Ik?cz;>bOMFnh7T{6K>tz zODmj=B`cBh!6MV8p>!t)7a77ljkx%X(R2f4Dj`vrzgb^-P+WB&3CG;e{<%Jn_UZKQ z6XYp0_ei{)iKprm6fmQZltrWb04R8 zSAIm+?~5|UeHw0D|JQ^>16bQ@oG8DpmMP% z)-Qjg-Fpo^UvxY@js6vPy^^bqKP7kBHR7YJlhd-e%H9+H+vy{}gL0~UB~vY`IIj=( zdnW-t1`(Zk>ey&a1>f4$G%ZcHAVKFa?S#kf?+*Sx%P8RbGm|!RPkvY`pKWk^DeH6P zedOvTp}rkgg_RmF2QnAY-y3Z7#8-cvl++Ms~;Szkh%Z-;U<<(?tVQb|cK4qaWAIe%@w;-cm`1a!d+#>Go1ypw> zq!v#q|8ZGg`C~N^J;FDMS4P7A{#Ja99P;N*7gXNzdLVZByN0amgWr|z!lUSWaSvzK zmyn#j&;_OOd2!=rvqlpl!xl*jN$9d=JDH-#E=tX2~q}3jA;6W>y=M8qP=tr3f5VUR`o6;iJQiU zzRBt~%~7nC-F}269kuE6Ngk={`C&JaONUfeX8E+ielsDE18n>Zax&dhU2D0%O7>Fr z%5x;)^ytzrxZm_Io0-(Gy0sNZ6G+=>P2RJdb!%3v({p$#6)$K#Ao@H0QrCo(okTu% z(>pbSV=rt)y}IK0k4M1=_Q+04FJ3MkU8^Y?ErOc{9kF1fwt9@zIe*cSq z$!%3L8uR%p(IX>w3ymyGh0hop4J=P0P)G+@q-bK;o5IGF<)K1sG{2`ibrpK_b!w1) zCFB5E?>fF}4#tR9?_dt0b4b|1*7*3BN$krmN&daNsXpb=MAM+2;F#-q5&Z@zxY)daCSxp0~f{#bG%4U3Ly3+-fP=TDAL;rk*WSw1>t< z+rBoAE02a_rcPnH7{{Q9*O5InkEbi9CN|vh?15CakhX>9KGfRA_{~gcw+iiG-eWDz zw)?rP!mlx!N-Mw6(vXt=NO{CrnYlAq2`jz0a7F0nXzbDr1(g~jW!1IM2*R3js@}e4 z)aA*)_`Y_wzQMn>BQsYsSBgT`s-1Rp}Jg>kVI7&fJAHf=&B@es_^iPh;AB%p=)u>#f_oIzKW`iiEol z>!4Uu<7afy&U0dVq`Hnc6`Q@OG@0^%=$``F}zWMkd z^m22Y*G*kyFnivP=ZPetZt{LgfJx@BfFK>CLRvS?EBTJzMwr(8@uee`g1LPjFVCta z&!vmx)L-o2Y?z$9do11PEW!Bfd1xBq6^6YWDN#0S=u54~({~37+0;LltJ@T$L^^E2 z&0`+Jg}>LSRdj#WPAJz?%}*{XbLpUw1B&SiB9fDHi*IO(a%DFiPoMW==_Q+7qwmN( zKVz{;CopOrtnh(1g4$2wfeaoR`jkWArjT2jt{(@SN$`@|z*WB3I1VKHW@g2^6eP^PFR} zCZEcABcLSDeA+}eUg*S*(TC^39pdH|Y?)TZuqa^LRwOr%y;{uqtggMgYPfuHVT^lb z$~7g#QjSl>ODmCAKrRX@1gGR>JBJ;8z5RlhQC;IK-94UvvO<7D9@}trv(_ojSGnJF z97*lYyAMW7BrLw$URM8z1hECcXo&%qNlI!y~O^gjn zIwOT>Wkk>0YKPl3!W&{qtj2ZN>|F^~XY8{tJGJef#7aBIuRgZ^P|GNEZ)!cUx>+%98emV3cf7SbP&j*=C`28wOHA~ah|<{a;7Iv{%DGaB!*WX z$ah)y$Pe7O@5phj#@k7lHLq*-g+}<>M$6u*dD=hPS!C9yQIov-@E4ce?geLeyNrEo zMvv9Ns(Z?(&A7g>rC7b$|B)CjZ(;(uDer{gO)srOvSLx z3-UoR3R$gBo$1GG%d;!(zoL@zV?H!&)2F>R&>Jmg zww~SgB6ZQ-I-@iTm6sib&n&2%)%B9Q#$d5n;IVM)YQt@eXsyqNfZb9NiX#A+KWF0+ z{npE|&1y7Y)b?YQ5cosOgi{H5=&CckpK8Kb+Vuc8MUGGWJ0Ig8ohMXNbrTaeX}f8f z0SdDiihKudEu9wIEvD>X3ZrwWfXp%~4Fk1Bt}r@^z;Wjf!}BAEmhP>gVB=_wH@aBU%FTyS_O zHGVgyNm<05*jd($?87vr^vbP>VsSLOv^+;wFg3BqWabKv!hnJzWDw}ojpk5R1Y zByQJoQGK}$IKOSs_2sbg2e2L_$-MCAHn>M5Wk@X&S{sol#hqM)exmg)fsG7t8y+e)xTNf8uW7XaY6yN6}==%q}%jx8C~h(f>M;QP+U`ZNaviDqML%>OQJuo zFejHwy}?nEZ3YW_p_o-__RkTSPY%>N`t-ifF$|;o0}PNO_k2hwjq}fUb#@L5yf;Kav|qJM9TW$Bo^pZVr;(~?R^zjrE?okq~Zf$YS=%r)9m?OhB@fBXpN15vTy zlY1bt-^0fLDP?LNyC>!KB^cX1APrtz9!lkZdy;lnKqvo?W=VNDJCJy4n*qU1ur5iO zs0O_O8x+#}!H@xR;OFF6p)swBz4|_~WN&RMRq8~CbUk*@7F05AK^Tf!^OR@0&Nfx+ z*tO~0kLV6C{%87eg&Fvi=p1xh=#vYn4!GT5b~U_skgktKfflh%-D%A2UJ9Q1d^xk!*3H)Wa^-{OXyv`kg6NyX z-q+r{Yfr55c5W_0cUx`?yVUbfjNiIf+=7Gonftxz9{4>lU+_E6nv0^H(%M+>UYB~W zL+=>FVb>B;bJ>kXSo!EN7BPfdazC^uk_f-j?TB29Iv-b4;6l<9$~5eg`psz?Q5?+KZYf? z9B%&7vrG!^uvGKl{!;U*XNxHm>gN8FPVNJGS)s;3B0;f%_Xz!|SF(j;6&&FnKaB=d zZ(4?@a^D}gD+0xB>iXe%tnoq;YOD%N@}s|N*_mZF>ckth1g2inV5N1#gE37hE z(&lj!SXda$$AwB;RF`rx6gy)cn4Bj}G9uo5!%SE!;+*O_^Y)HqpVM>P_BaNMkEKm3XRJJjUEfQ{|s{!W;`aL#R$+y&K*+BJbO?o@&1 zwxOnhxeB_}SHzGvIKjDkxwf&(BQz+{=hh$om5=$jz|&_1&kN)_^K!{*{)2i#&l{0x z_mKh1=;O>%emx13MtvF4s^YCym#xTR8w%N$VZcFWQ?sqnk-3;+Tf<-BZA zV$Fp7{)swYMUHfZ4@Zt|&lKk80+IWQE1F<+*CR*t0{9Z>C_zyoBLrNolF4U<f2T*BTpG(Q|Iw}R4W{z&KQj99!Rl2v8>#jdgR8C;jBW_uW2uLh1BCvg;QEW1Yd-w z1zTxQlouc2XziYIYHWAu-R6I?__(bg6C{6mOr%vGxH=z5+2x>dG!D^5W=u>&ll6yNt8gDpFijbD$U)Vx>)JBVKz zOJfxIVSofV@vl3)DM+j|ewKcE>J_b_p`wFogWPPDN-xQy3QI0TuJuLnST&)01Y{~h zmzrcB2~#cNr;0ioBhW`%TQ%(Y{*hqZa1i*lw?}e7qvL*l*O#FTsQ6{ef~EDQwkLL>ybpUrp%IB2bI2hhz!F=Ghp$s8!DU|C^-m@Nwd?<)B z@63l;f4ah#rG*6Q#$);A0Xg*3vV}_6;VsF|)zy(hFt69(nC;cV-v#&jrmOYXspd<6 z>>p6{Ed%V$S^fm1WQOoauguw~uXHbH5Xv1GU*tABtM0It z*7q}_r(#Di(cbU&x(~NJFo6%{%9$*pyb2F|>eb{k-^mEcZXDj%!MHo2lqmaab_%O&=TdeS+it)KfPks@4C|3v{9EwL8R_+UpLz zdw9FsVu;r`m%X!_4gN=EP^4_XEtUyGSt!TM4g`2Jatx zdP5?R4%VXW9f^GhP#*ticiHJ zEgkrLvT>Rc6WUKFr1SM}lJR|H?$H$=ZWAoLZECoZsp|fPh3wk_?4jlXcA8? z;jMY*QUp!=38tqUfz(UrKW%asn+C z-6AZd4T;O)I+3g%TL_HOdQ+@ETcIngF2Rh8(oRJX(F0^L) zjkyux4)^eU<FdW+J`&eF|U za?@eF@_#fkWyNZHV4WkkQxEonN*5oxuV*s@raqH#?gzEqBmN`w@y5*H-DGS&SSMWW zScDx_F!8qN)JArOs|X*suhzSBxA@x4uKoEqr0l9ln1moLM?aW^2Rk{59^ocp(mf zq#MQsTqaQecj6IW$_F@BB8~QCw%{u)-r2fudw0K9In&8s zeRq7^ZcQMK{p|VgH2*jGyn1wgzaiIvxAj`?KDw>6TjL&41GW=3@auM3m5dh4>Wqc8 ze?Ab@#0^Eo!n!83W6^%~P<~{0>1C~#=t{tOsT>=dr^7WUZK3oSF&Rot-ZZWq9MB9?)_*+STR~Y@n5dPTp4s?1TTxD z67~W%!x=uFzxuSjtm;YJ-Gr6ataqfH<9;%BvI=)PpCs*Xk9{xe){NI}Xj3=8(v?3f zY`Pd7EL^fb8B|s`BV{KtiBj?93Jfgo`o_|v(5H7Cu0*`#r_9PVZVpqEE)LEOh9ul( zuslbczLRUGRIxUGFDLd{Pr16PSf3^vk+zznT9-fua`oYg3fJh*v**s)hYzS|R(C-) zD_s)IgWEp1igi~yZ$;CnleYGIx=(hDemv_r==OOVOZ)p9ruOb8Ai=1pA&%C+0$pOG zFN#qyAIxM3ty%32`f@Ir?TdmrCHGPL7MJdI9!_uR4d{WRfBu*h@Wn2WQ|}O|`bVHHn|gm(sM~;-NP(5RYRy2Hs88_jh(y zNdHP0&WY_J@?DTMc;#2JuiZxM@(6RdV0b)6h$+`jJ`9FpnseHpNA#3aNOPI^Dm1M# z&1Lk!O^ls(Ve#l2+SVelmkM`GYt3@Hf;^JxB%?PxW)@X5{bxWte*1-d&l^0E$Ci2L z-EU(WF0!KX#*&e4v^GwPRvf%Co+@!!jHu4o>u7oK)$PF(t3r=e!UBmm}Neuh*6Ao$7tbK<42wPA;$8t0gN@O`wdd zfbqb{?~2vhA0C_d@7r3@g^hQCrCGH`r^{Ffj!MMWw<#>tfVmd~GG+y*q?C&C zhS^NLgi&k5XO@hjJDy()W|8E))sPDAX{4f<=X`LhdDO15zaWqD}^Sr7i%XjsS7_vBI z0wqmSln~zWi{vwXZ;hoM=VFEQe!scdlE*fU8pv~(Bq%gVtxaCJN)^+sW{gh%Ztv2; z?IqmVPK9udW&F{%>zH!bX{G5(CWtD%BvAju@nmYX<8ppdMJ=DSd4BFg?EQgFE;QbK zQUuhQxX^teFUH_1uba5KWQWdds`+@hGPU?c7OmQgLXb_w%_XGDcUlG)PJD=jhjLgh z!#=`&w?iF1rIp8^->P%^7jgo9Me#qgE5GNB>`T3%fE_F_tOW5@Ux>N<%qQ-av znrRM0;<0&p$9}z`TS(o?luq}YYkr9yysM-BMrX&JnXuxadQ9*o8tkrIi|Uy%?DO;ffLS?h%EojLU$h#$MD#b;*;^8jb8(KiDNm0v7stVqriZb`8;Fd+ z*Ybr2PPP0?vJlE}`uc5NI~Kxgr4X0R&n3>uk87~lMR`qq$>09lquE>&t{Dz*u+{Z} zK{P5KonM;{Dde=VM}>MzFDNbM6y(V@C;d%hF-DfiC+-V{Y#CYmt$yUOiq4OzszxE` zq$&r#x$OBAxBV_RwpB}*jx(0D%cz1Pc63^e`+>Wh_yyHN$O!jz+z>s8HVcMDf3wwX z&{GfHndqFTz|BR(^980|6xyDvc*#!j-#d`6S2GN18(W1ul>1{f2?b-d$%|R}P`Hma z`(2sQ@2QqcMz_^Ie7>qh*L;}u-_KE{X#SE*FKrgq%Ud6N_qHWjm6o@!YWMgmqs|z= zMcN_dt2!h|>GkC;GgO~*SVHGzoFjkQ(btn@Wi@}&XerU&CrooL_`MPb{*@l&LCY`X zgKiJwb4p)^a?)IgPz1yx^@jrwt| z<+Q)fLkMfz$z(NGJz@GSN*|BSm>{;Z^XJC)ole^QXRs=WeK3T*UHb0b8&>W8=X7=4 zeVLd!$i+#JBzg44_K$sbBstk!kfqVJs6nK+%igYh+MEPKM`pj?m^W;l4|}AuHDP_l z&dh#p)@03AqfX`RZ+<_t$pdteu@JLuglO~6gs!(fQ%|a_DEBR4w`>=UW_{n=^1J$- z0gDvI+c%(27h6Y08r?jcJDxDXs;{(Y!q~GXa@Zkq+dV=BS4;D}s?-@c5Dc`Pr+3q0 zax2oIw!bAGJQOcn$DXlx6gGgi)`7?zTPq~w8U(oJXn8;%lX-kRib8SK zVe66z7s_dcSV!EUUK~##^mTB+GDFv2*}p84`71?xh{!hpa7Xzy2B=2fMh_!yr}guAqQ3>&mE67Fxdow+V6(ppJ)x6D@)$Yr7gD zp4Yuua?zmdGCk%5#J-8t!opt&)U(=hF%od%Fj9tF#{eNJO5jo~TIg}r=8T;-K~LoO zLZ5zu@=2lI{`D0+1X<((x9`vcsq3B7)0V-@qXLsTpIxxN)mA$Y@9&WHn3aE0!k8i_ zhlk?32bh-+EN7sYw6<$ZV)J_1cP#Dur_*z}vfw2%u=YAdER>vJ-h0nrtq7f!P5Ie@ zM8~4P7nLl)3=Lc{JFX)ida$&YI}1fwblft~GHR)gRIWmWXt)Y$Kz;!y$Ho?NC#%@z zQ!iee!b6U=#q++tV`_+q>E{u&m|MIcjk%{7f@Psd7C1m~r zpOEZZ^LQ2rq`<@2t+Ch`ktRkxnE5OP>!9Ev1TJX?K^r}Fyl_351R>b2ymeW-hwn;0 zK6(_KhqzU@FUQ$fLRq{E1=tZb_E?4(w>ujn1`9UVJNwn6~DtE&-KNm|DZ zZW4?i(ip6hAZ-4ZERP>!{Jgb_^Pgc>)~Ap3Z*F-!J^sz+AfwPz;CYwJNcEZdy^-tB zOP76(=*MsIBjCM**;jO~C3I4*T~m>->uN0RoFo@<0U_v?j2z{tfGeTA9}+C(hg56x zm|w&$d^Q(4YPA37z-WKUcY@+P16|EEC{oDWb%Bq)XlpHK2(!BWmkA{)M)aB8_qYwp z7Of!ge@*Zfi%9`BKi}dQnd~Y_Q^B=k20|*ikw}ZEv3jkv94SG|nX#+7o;l0)g1NQ$ zH^jZkSn9h1&nz0DAEGsBLJY&vorRJLZABv&1;Ik>VnDF6xq`e&>Dhmg{O+IJIE>k5 zUMI^fhkHKI+mf^xe!%j_F}Z#K;sK`xUY(sl)ZMXBJLrZGYpO4Z{x67seohkocXnrX zAG*4p=n}Ogn>j(81y%BO$t%BKXS6~bU}`C(g?|9hxm{DV8sZIAy}r7HNq5GFs#&<^ zyu7&=Dr@>;+FeZ*0pS;5?PC+$3G88)D8Fon`wGo0Xd%0la^1@@4lDSwz1^>SGz?5R zI5i>)N;HjY7p{BhR-md2>{qvUbOVd)o?Nc%O1l=lO(4UpA3_?m6DzQ?`T>>l>-Khb z_6d`E3_D29_%Wm5gadu#BW7@3+6;N5?%+ClVa8&rdk_L-R_x4v_6LcH*MZI*-mh7l zQn+iNHh!v}wjl;fgvel03b_Jm2P=;N#&|vJJPjiXndaDyzNMvz4$4;u^|=l~-s^yr z4P>T@AA%K2KP>DNP-?6_`X}|Zye>@8{_sHWP^W_$m(j>q*!byF9qs6#$44+P4?8Mn zg$)ium+lIyE~y^k`M$piY0Ywen%CHO6aiK$%j@bZ-6!>c$a9E?cGG-B!AnUHp|2s zO(tM)3Zk??EVr+Zn9{EZ5;n424ym>fbI&>VigjQ4aD#g~0XM4^J#Fx{61K(pzf+a2$ev&AOOhJP8LnjY^|^V_Mr=fN&lnk;YYO$+}*X z8*{|XLyYhD*m6oi6*?O3FeKA5qZu5uSQj~ORn$1GAy(i}$1^+bzS{;?08?9r3iPcSvno-=BtQ(nTD4*I zGsf>nBhx}og0{{C2&T9EYZ>7NJwGrqcg*095NW$AF(=UD(yrB^IR^IU07T`*FE8kM2#+osLQb^7~opU{7q|# z`V0t%JU#Q6QgOkM%5~DSXQ|hoM?Vt;XxTgm-ohOn2?*vJ7cD8nh-aM^^uNMH1@%LRE^&f z?Nni*{ek7WFqO80+MSyxs+b2y5(n>w6OV!q(A?dG{w*>NL8Y>5e={BG59X#j>!dJa zhg7JNdMc{z&OM~!YOp&8gWJ?#ew_hPAR_WGVPrHr6i1;zT=7RNhjTonjY6OQg9wM1 zO<-4;PbL|&v8k}Vw>t$|W*!7tq8MNO46tnY!UU{&A2-mQ!W{Tp!t8#WI-LB!{M!G@ za>M^up5>QY4#h2)h5N`&b;wej1cZD4zKcL97sUl@d*j9y;!so}hakut1>>fyU&fCj znbrYB&VTN?l34@ulYjvw(p|t06<0*SP_B;8_)%1!NmD4-aX=_JZO~x*2KW6c?DX}+ zdK*S#;&01Un4}=U*gpx`#5IDHYMdOyUk9i}`^m73A)1%Cc^710&3uz z>nQ%EKXmtBVD@;}5$F{H5PJY&P~Z=_g0Qxamw=Y>)ukNES@wen-7>|QS#+i2smX{}W%c>l~pLo#tPo~qZKuwKksgIIiEc9lI=AvI_ z=&=zfIeVEM5H4#y!>0N8j^)WR+QEC(F6Dn}+TMtGU8fu@Vg0bTY(zF{{%W>hs+T*Z z6{_%s4Xo@9(ycrQQ4&>DJb*GbdOY7ER@YUJaKDsYEyOx5FUz^`(p&eQl5@?@2d-2f zH0=R&+g~As$Pw;j&LDa1M;!Q-s)?m}}<(R+r0?C#(LCK9U897PVmxsXgTI;9Sq zg(xofUbGj)(t3M?$0lnyk5}kbOUJP|(i%?dL>G zls}M&gx<>0hUHz90%|Qru<_IZIl%mh@$mlmKhLuK528IZz5kiTg!~KkE6@`s3jwnR ztNf*$iSL>Zebs+`(#Sz5Y^oqv*Fyy=h{TJck40On9paQjHLE<_zCKZ}G`reqm7HBw zva_BR4JW*SJgB>-1#7O(K?V^kHYU0|H~>-;o9%!2d?9nlqgNoBo)QB*Vzm`-3E>Us z2yL@a{MG^)c|;$xS*`mbJ&po4#KK`_bEi2}Llh5DXCN|?848^p2Jpkb4=$B1mH|Mg zeM1dNw5o&Zpe|-eB9ac+X23z)F!UB298`F&m)>O*l!1JHGb6etdnDk7!9Yy@)L&J$ zyk0GBfMFHK!Jb3!+<_c^G4T$nE)~Wds&SUk?p_N1TYFy~57qm(e=Ln9+b1b1TMLD< z?_?{It&bW)whXc_*_URNNXV4JFi1#dEZN4sgzSb$6C-5Fo-I2w&pmyg=lSFL`}gPX zpI$l4J?B2>KKJ|mzTVe$9pEGbS>PmXDb3S@sFJ%1%%k6{_#qi!3{ffzKrj~2hJ>K0 zQd#misSMVU1mFSVd)IfBpeANG+@EX)37La3U)*KWc3uN=s0fR)uK8mRb)doDJs(0s zZAE}fMz0wKYJG+OF$dwT$EEgia1avieacY+T4KT0HzNa-o%%( zcYw9up+hEu)HBBUG@6g@|E&5Z3;57kG*j^rs7?m_4*M@pvl{won;RWe3^o|Z*>9Av zp(%h{@C5r`3^+i10NPOc&LXtkAGmiw&fp56WFL^XYBkMpT#Pb6a1F96ZX^riJrelXvF7^n;Uprei#{;zR(V?`bOi`XAuaW!;sViBtIAk%)7}zPB`pXhiRBc z12fcw0fr9~p9?6??wccFe8Bhx+^43E-a^W3@$l$qtl%#?C`cHbxSC!WVJl;ZdXQ@a z%~!BOO-F%)$%Mto*4|`x0#;Vz*nEX6uqXmS0{~2@yqr>Mrsrt?Jx>IfNc5*9c8C_E zfb+`P!n5r$(>8XmA3ZO&9wfM^Hh~1#1b9?v-l}RA|YWzPk%m5=@M1g<)|A7C;bmZd+ z2u4fTpLQoxHQ>xttB#Ie)ID2r8g-v)^JBk}x>8FAlcU*pZ4Kmd`qt`dlZli5>(pA{ zB~{HV_0})`V1as|(fFLyVb*cOiWJ*R-QgrLqvOROw)5=Y7r=IK3yx&tbJIzwG}`C{ zI)WBR{U+%~MFYhr?5>UApk7LQ zS@=zjB*V=7lb=E9iY06NcEx6 z&tn<75t_4LcFs;TnY+qWiqm~Or7s7YNRqCGQ)l&uAdgUMDx1ycuME@e!w9 zIo`wxgr3mi>%Tw;dD{=*oB*8K%czo)*{8ozwml_AOotLI{CBB@uQ9!swD|f!l%P6T z!=ocw0PL~2M-QDe0%h#80dx7xTo-aWV7T)OVGCTpwYQH)<*#gOoD+sjKFTu5P_H}EJjL)6dsRr{HOY4S% ze&j$6{i(NyVMSVzr( zFtr~W-IA!AU^oO(M|c(eOvR8Z#K$GJ7S=22QFBb^AWN|6x*X%BX}?H+iDYY=1Urq7NHgcRj<2y z8280y86hiM#jJ37)x9og7VN|}m{+}|GVDb8vZ9&t(T~-;C7o2ff!;Hl-6dJ^W^PfT z&~TrL21WAt$p(gEa1rZX&9D-{N0Pdf7L}CQ49!?O76^w@t09vlj`|DV3JP}VF%yQwQt9`orRJPRP+8)m zbB3*ALW2x5?{ZvApFCOq8v~m#bKk<&lDM(7#$a|O?$`ID)o$cQ^|i|nebDPAH!&_Z za;fyCvE~(^*q?ZFhi>9R5%!E~`N{Q?XHPO33Bsc>iY2=~@E_LTInLZXdVBowW_&z; zjpSM3kw(NmQU!s*gM6Ll6%borf8DNpOi5`+xU94HY(pjGn8sM))aII$ji_X;ZYrT3 zpy&YyfjMsH)8HdP%L0cEf!pa#4=yJhbQdmC^_c_maErC=Zf=1s8dPQESa=Yv6F^Ex za5frrH(xvDVv=!g7!Zg|KnDN&2uc=&7s=N!PamG&(dY=JdFmC8hAn}yM&z~{_b<5= zJdzSD`gOBb_z7M9Cvw$iH$)!MLACvC1E!&9agR727?{wkd;kKPDvVxLTFQXbF=4OC z-K-k>kdnr0eD6@BCuhwz#50U<+$6g1Xe;sTQzy66r%9754=;1%03kE?d4^(5uo7;; z7%CI#$cOv3e$rLD9@x6iJZ*P=e&m52XL{w8;jG!^Epzv9W$BbY-2|v5bZpP9^<`4Q z#Cx0Orn$~$MzteXgsxdq_D2G|R#Dmil$gsDpFz$(H$E>;SJPn0i2#MGd*Kxlf?siW;K$plx&>b zgHTc9XBtcRoHbgn$O$NFcjlArc~xta6HMg$Bb5ptw0)vjt(_WPTs)MQ;4d z3-a6-38yhMsALPpJ$d{^{pNNC;YCi&HsEF~wYqK8@B9en)l|3BWQvOh&o>5k?~tm( zF>>{cujCYJ2*N?Sh>t^5LcM_KDceXob~yW6R=ayJ%fl6qA!=|`0qhn?`9Oj_fr+|% zn81*1GRYX^Jg2MM?TX?uC9MU|k&Ioh@bEsRGozmD?n;RerFNE{nn2{1k=!b9i=6b? zAJrO4W?8(+guuc2{S^kqIJH99GXN`wm?`!~(@h)!dx#{Pw0GkLGoCF7rS~%@F=L%ROeTYhp=@`k`8c%GE@D5&#)p?8bCQ+ zzL);E^iB!)-to*Ij}a5sX+yX2M5gy=4^30005W!-+}msOSNkg!)IDy=91cTl3`qxV zpfQ7QCH|bI|2~VYZqS+5OQfM79VLJGj z`cw#+^xCvV~nwgLJ+#3K4A#T;4)ti(B>SF zc2Zv6MPlSw3=&KF8N{){Zg^bsP{ncdO!+L;(F~70H1sa;b`pno79^Plso99%)E z5OnwX6SOhYfY0V1DZ`$HyG@=64@*N5pAJpGA0z=rR_C1#?DHpa#yau1=&lPyr$Zzq zFM-0VXMg{`5+%7mixBN#5a9!AFawYF#56z|&iK0U%R$BZCCARt8ae0w@)$gDEU6?d z-`v%#vH2M%+fvvx0m@xt2Gk`pa6%+%V3rQ8ImQGd1&m({cqQyRA~v|DfVyneiE0PH z!pAYWbPkS%7>?0bMU`|H$9t{;xe^C|v{XKYHvzx6chT=dp;%%XTPtZ-F-vtLe-j;i zk3@!=a}9uv5iRR$s8OEjG#Z#;&0FtPYb6DV`#BFJrV&FO{eECy%FdV43)vu%?A&JN z@H`ULa(jrc=^4lf$`AT~uv(I@yjQE%$Kh01e+;v-6?G5w=9_rF!nJToJvtm?W)qX| zWaIZdx40vqS_P&E{?muCea+n+l=`S@prGD#r`jbxJ)|KZ z^pF%V-0aVqwn)X_vt+I5kpW!wyI!j+lKz}^L3k*PSh)ZZhz5vaQr!iLP++I9^?^MPc8%j!e zM2ZFH#Bj-e3t@Gj^Z-X+ei-? z|H{OwV7t5gnfA8L(g|>8Bs2fK%rsLQn3!J(#k-vUeGyQRCO@JC3-Fqe15UiE5WdIG zO(HA@%so5o<@>N`tkd;Zz7{WM9fdHOIz=ZyyWYiP?nd>Uz;utCCGJuCu`xI(-7_(N zk!VIK{o;7xR^`krR*OVNydVb*&}ok7n)&lVey7(N&H)@qd&obrIzC>XbF4<X zkVIG)$(Z~2%FdwO<*VnH1a1w}qoc1BzpQzrYB)^eI;%{BuN8%$W}m@Pc5BzvWvPE% zdiuyl~ulj9!f13KkEy(6^-&%vW5NCt6gwi@~%2?*17U9*&?(_MeM_&wbnwJ>ds~#X1y!}8pr=%YZK4r!~LR{XZ-5A)P~UR zH2W9e>?3neunBlnH<^-wFYPfJiJ6-Q$UPTtS&8Df^rNQ**J%+LUp~p|9H!wB9fMh3 zv0V)El_i~*fSvtPk(}UDj?Y>|Ci@VAPVxW?bW^eH z%Vt2_&OY*th*ROz;#FhM8gbRxA-}pe5>FAEGu^88+DYmNl_K=~-Y`d80@!JBwO8a# z#Icp0%aJVeA4N1Wp*}hac;|V!<#1~gr>C;Uo&hxiRiDSH#zLq4Ig9nwv&2v}9^2aOa!`)} zr||I5C671rbqjhte5-6=7A_SbgZ?oa-`tUngNHA|=d%G9O++$067hw290w&)ql(Pk zK)egwzKD*$s$Ckiawull^kzq5N#&X+fa@UI9=&?wjKskOKZ!!`7xga({|ed!s-Auw z?U@!42P1VJ7g1UpUp1Ob=`?y2l7Bu;6J0kdFsNgTwfxUXfJi$zdT1vs?>jPj#sjnA z>K`EKw{j7k4!+P)5_Nd^MIe|kA~3j%Sr$>;ofkiE1?xOl^je?n5W@Vj#m8Fk2r3Wk zw0cQ;=@lN^da#P+Xonx|d`uqWQ37|=4dsIUdjZ*vH-t-&XdCbIIFUkJTY04VXJThI z^9U5Yr{Z@R*6C3ATA$8hE(>Fi1H5!w)>TF@uN;CpoB9V`rBCzue>5=EpsA=%qo}<( zzXrJh7NVHYVpyyRSq=#pNB%z&B1qnK)tKMb>5U5b+p@ZFqoj=BaQ;gxQU363&*XST9?EbR*=Bm< z1VgILo^0#(r?I<)?i^=p){eaz-PcrxKJWx*pl`zP86s~1hEk}gP?|YG<6^iTbZ>2K zyYF4hnyDxx(ETaf3oEr2*@qMF@N=7=*>~+xpsoQBNR#jl*#7k=&IDkrDpgjLfikJB zO)=bvlFTF1MU9vR#aZK5f^8IKskKvwkV|w~w-prJE?q%h-=_BD@(}wSZj{}d=}}^= z@?8%kA3aoNMJ%s~Zhw5FUHbJd0oTy0%y^@(>dqJLkYkMkAdjadvyuf{ncLNk3f)|G zG?^8?6X``h4ut3D(e(`m{SVpcU?~TAtbC*nUO6MAmhXFEO|gCC)P#__PJlM@51P*A z2FUNMGM`j(t9mW1ATtddn#yMzZERQk*9BiIBtKz1wL)YYor0iu|d?RUv1qmc3>Qr5>i2hpJACJ_?ZS_w53sjoZiajdENX`p)bNJUY zwttVHB|z}yN94-Fh1ldu^b05nJzQGrrsPK{zh}ZTxzO6tey4QVgd126%@QtK95NB# zu>#X^khqzR*KX_Z+6g6=`ig=A8dAqdt^p}EC>+5RmwxOl1FgSc<2olZI~$|b^aBlh z=&?@wejoA^-UjUZe7}7i`@yV*?;VY zI8pr4B8`3(ID9MZ?gOKM0{LDmsz1$1)*A-ah#`$H<~4b(mK1gK!pBl&_G);S;j=fr zF&*+ZG9a=8%6#%T_^Xllc)6yz#4ju#eHofUQ?MEiPben?e zZ0QK|3m`Q*Z{(dykpEPz^w6<)@c!e6C5(u{L3hvJpE)<0o<8fFUM%`&0uGX5Snh>1;%q zJ`FEyQU((w5yYm-lQ(9PE&zZ_oh-u2t%FLhY7RaggejgPEwpIE*VWNr1(9~G}y6Rn<(~se`vmmFeJ^q^UdHDiHm@$aX$^jL79$ac8 z=GA*5R0(tC+xcZ&6PSjpKu}`&_JKBP7QD~lN~#E|>FJk0ODrN(wZA@sHApJHYSN=Q zbka?ns^$s&l;N3j$v;@ZYY5W8BXM$phNNfVJfj~2&Q)djYEuR>;}Xo#MR0{F%T7c= zErHAK(x&c>c6$LB>L^&&S4J0}H5F@P7^)=YWv3jU7bO8W}(2X0=CW|?=o-Ji=cKl!}6TAF<|ZbroDq>w^ud%?t35Lh4SS4LGZtC`v6W~-0uZn=L;}|0DM73 zaqTjlt-|)o3g)}F05?b1i7bzFTVgeE-vSufJ1hD-ZBRXc$m{R^p05k^k~GZQ#b}*A zDQ4sgLcu%SVqgp7Mdx4d4r%>;)j|*1%vNakD4k5dD9G*cj-3z50&QBOWP5DEgMoZN zAzdmL9fGXvv5^jbu5jr@937#VnWY^i3~s$vocF6lvj+|3mO8e&LGeBROZM_}k0a9$ zv^L^!>OXm<&I8`zhp&QfpPd(#2uYmh}+Qvu|$0ZH*0+Eph+Ra~w+p9py+a_BP=gs8~G7QWyZ6|j3(_KehF zJ5>=(vlQ-^*Df-P@C}JtYF(iS|8f2Y5NvCTR+!sR_rNR*E}N_Wx8Pt{pP@STC?lx9 z*2w8q&x5yjD2J*da&f)xUS5QNUEdMi#G;q@lV_N*hCAdf&(wZ3ARH)ii!4!Q?-`Ik z#t?!3d7mLdFYB%~JkV$_LK-Z`Y?=QR_qe2Kl0D&~3^n}?c4W=EFTGQJeN)A@5~BP6 z4f!f!?4Ju*pYsTnHx439?jyxv3}rhMWI&J^Jk~F~HO5fo=Y4_Vm%%l%TLdc_h;H!8m~}j{X)$o3f1{v-qTsE#8b4N^{*RW JE7G=m{9n8>3@ZQt literal 0 HcmV?d00001 diff --git a/docs/setup.md b/docs/setup.md index d0079e0f0c7a..bd2f142e0412 100644 --- a/docs/setup.md +++ b/docs/setup.md @@ -36,40 +36,43 @@ Get your OpenAI API key from: [https://platform.openai.com/account/api-keys](htt 1. Make sure you have Docker installed, see [requirements](#requirements) 2. Create a project directory for Auto-GPT - :::shell - mkdir Auto-GPT - cd Auto-GPT + ```shell + mkdir Auto-GPT + cd Auto-GPT + ``` 3. In the project directory, create a file called `docker-compose.yml` with the following contents: - :::yaml - version: "3.9" - services: - auto-gpt: - image: significantgravitas/auto-gpt - env_file: - - .env - profiles: ["exclude-from-up"] - volumes: - - ./auto_gpt_workspace:/app/auto_gpt_workspace - - ./data:/app/data - ## allow auto-gpt to write logs to disk - - ./logs:/app/logs - ## uncomment following lines if you want to make use of these files - ## you must have them existing in the same folder as this docker-compose.yml - #- type: bind - # source: ./azure.yaml - # target: /app/azure.yaml - #- type: bind - # source: ./ai_settings.yaml - # target: /app/ai_settings.yaml + ```yaml + version: "3.9" + services: + auto-gpt: + image: significantgravitas/auto-gpt + env_file: + - .env + profiles: ["exclude-from-up"] + volumes: + - ./auto_gpt_workspace:/app/auto_gpt_workspace + - ./data:/app/data + ## allow auto-gpt to write logs to disk + - ./logs:/app/logs + ## uncomment following lines if you want to make use of these files + ## you must have them existing in the same folder as this docker-compose.yml + #- type: bind + # source: ./azure.yaml + # target: /app/azure.yaml + #- type: bind + # source: ./ai_settings.yaml + # target: /app/ai_settings.yaml + ``` 4. Create the necessary [configuration](#configuration) files. If needed, you can find templates in the [repository]. 5. Pull the latest image from [Docker Hub] - :::shell - docker pull significantgravitas/auto-gpt + ```shell + docker pull significantgravitas/auto-gpt + ``` 6. Continue to [Run with Docker](#run-with-docker) @@ -92,14 +95,15 @@ Get your OpenAI API key from: [https://platform.openai.com/account/api-keys](htt 1. Clone the repository - :::shell - git clone -b stable https://github.com/Significant-Gravitas/Auto-GPT.git + ```shell + git clone -b stable https://github.com/Significant-Gravitas/Auto-GPT.git + ``` 2. Navigate to the directory where you downloaded the repository - :::shell - cd Auto-GPT - + ```shell + cd Auto-GPT + ``` ### Set up without Git/Docker @@ -139,12 +143,13 @@ Get your OpenAI API key from: [https://platform.openai.com/account/api-keys](htt Example: - :::yaml - # Please specify all of these values as double-quoted strings - # Replace string in angled brackets (<>) to your own deployment Name - azure_model_map: - fast_llm_deployment_id: "" - ... + ```yaml + # Please specify all of these values as double-quoted strings + # Replace string in angled brackets (<>) to your own deployment Name + azure_model_map: + fast_llm_deployment_id: "" + ... + ``` Details can be found in the [openai-python docs], and in the [Azure OpenAI docs] for the embedding model. If you're on Windows you may need to install an [MSVC library](https://learn.microsoft.com/en-us/cpp/windows/latest-supported-vc-redist?view=msvc-170). @@ -164,7 +169,9 @@ Easiest is to use `docker compose`. Important: Docker Compose version 1.29.0 or later is required to use version 3.9 of the Compose file format. You can check the version of Docker Compose installed on your system by running the following command: - docker compose version +```shell +docker compose version +``` This will display the version of Docker Compose that is currently installed on your system. @@ -174,13 +181,15 @@ Once you have a recent version of Docker Compose, run the commands below in your 1. Build the image. If you have pulled the image from Docker Hub, skip this step (NOTE: You *will* need to do this if you are modifying requirements.txt to add/remove dependencies like Python libs/frameworks) - :::shell - docker compose build auto-gpt - + ```shell + docker compose build auto-gpt + ``` + 2. Run Auto-GPT - :::shell - docker compose run --rm auto-gpt + ```shell + docker compose run --rm auto-gpt + ``` By default, this will also start and attach a Redis memory backend. If you do not want this, comment or remove the `depends: - redis` and `redis:` sections from @@ -189,12 +198,14 @@ Once you have a recent version of Docker Compose, run the commands below in your For related settings, see [Memory > Redis setup](./configuration/memory.md#redis-setup). You can pass extra arguments, e.g. running with `--gpt3only` and `--continuous`: -``` shell + +```shell docker compose run --rm auto-gpt --gpt3only --continuous ``` If you dare, you can also build and run it with "vanilla" docker commands: -``` shell + +```shell docker build -t auto-gpt . docker run -it --env-file=.env -v $PWD:/app auto-gpt docker run -it --env-file=.env -v $PWD:/app --rm auto-gpt --gpt3only --continuous @@ -218,7 +229,7 @@ docker run -it --env-file=.env -v $PWD:/app --rm auto-gpt --gpt3only --continuou Create a virtual environment to run in. -``` shell +```shell python -m venv venvAutoGPT source venvAutoGPT/bin/activate pip3 install --upgrade pip @@ -232,13 +243,15 @@ packages and launch Auto-GPT. - On Linux/MacOS: - :::shell - ./run.sh + ```shell + ./run.sh + ``` - On Windows: - :::shell - .\run.bat + ```shell + .\run.bat + ``` If this gives errors, make sure you have a compatible Python version installed. See also the [requirements](./installation.md#requirements). diff --git a/docs/share-your-logs.md b/docs/share-your-logs.md index f673e375cf45..ebcce83933b4 100644 --- a/docs/share-your-logs.md +++ b/docs/share-your-logs.md @@ -8,7 +8,7 @@ Activity, Error, and Debug logs are located in `./logs` To print out debug logs: -``` shell +```shell ./run.sh --debug # on Linux / macOS .\run.bat --debug # on Windows diff --git a/docs/testing.md b/docs/testing.md index 9a1735966e95..ef8176abfe5b 100644 --- a/docs/testing.md +++ b/docs/testing.md @@ -2,12 +2,13 @@ To run all tests, use the following command: -``` shell +```shell pytest ``` If `pytest` is not found: -``` shell + +```shell python -m pytest ``` @@ -15,18 +16,21 @@ python -m pytest - To run without integration tests: - :::shell - pytest --without-integration +```shell +pytest --without-integration +``` - To run without *slow* integration tests: - :::shell - pytest --without-slow-integration +```shell +pytest --without-slow-integration +``` - To run tests and see coverage: - :::shell - pytest --cov=autogpt --without-integration --without-slow-integration +```shell +pytest --cov=autogpt --without-integration --without-slow-integration +``` ## Running the linter @@ -36,11 +40,12 @@ See the [flake8 rules](https://www.flake8rules.com/) for more information. To run the linter: -``` shell +```shell flake8 . ``` Or: -``` shell + +```shell python -m flake8 . ``` diff --git a/docs/usage.md b/docs/usage.md index cb74ef7f6feb..f280bc8f5ae7 100644 --- a/docs/usage.md +++ b/docs/usage.md @@ -3,7 +3,7 @@ ## Command Line Arguments Running with `--help` lists all the possible command line arguments you can pass: -``` shell +```shell ./run.sh --help # on Linux / macOS .\run.bat --help # on Windows @@ -13,9 +13,10 @@ Running with `--help` lists all the possible command line arguments you can pass For use with Docker, replace the script in the examples with `docker compose run --rm auto-gpt`: - :::shell - docker compose run --rm auto-gpt --help - docker compose run --rm auto-gpt --ai-settings + ```shell + docker compose run --rm auto-gpt --help + docker compose run --rm auto-gpt --ai-settings + ``` !!! note Replace anything in angled brackets (<>) to a value you want to specify @@ -23,18 +24,22 @@ Running with `--help` lists all the possible command line arguments you can pass Here are some common arguments you can use when running Auto-GPT: * Run Auto-GPT with a different AI Settings file - ``` shell - ./run.sh --ai-settings - ``` + +```shell +./run.sh --ai-settings +``` + * Run Auto-GPT with a different Prompt Settings file - ``` shell - ./run.sh --prompt-settings - ``` -* Specify a memory backend - :::shell - ./run.sh --use-memory +```shell +./run.sh --prompt-settings +``` + +* Specify a memory backend +```shell +./run.sh --use-memory +``` !!! note There are shorthands for some of these flags, for example `-m` for `--use-memory`. @@ -44,7 +49,7 @@ Here are some common arguments you can use when running Auto-GPT: Enter this command to use TTS _(Text-to-Speech)_ for Auto-GPT -``` shell +```shell ./run.sh --speak ``` @@ -55,9 +60,10 @@ Continuous mode is NOT recommended. It is potentially dangerous and may cause your AI to run forever or carry out actions you would not usually authorize. Use at your own risk. -``` shell +```shell ./run.sh --continuous ``` + To exit the program, press ++ctrl+c++ ### ♻️ Self-Feedback Mode ⚠️ @@ -68,7 +74,7 @@ Running Self-Feedback will **INCREASE** token use and thus cost more. This featu If you don't have access to GPT-4, this mode allows you to use Auto-GPT! -``` shell +```shell ./run.sh --gpt3only ``` @@ -79,7 +85,7 @@ You can achieve the same by setting `SMART_LLM` in `.env` to `gpt-3.5-turbo`. If you have access to GPT-4, this mode allows you to use Auto-GPT solely with GPT-4. This may give your bot increased intelligence. -``` shell +```shell ./run.sh --gpt4only ``` @@ -97,7 +103,7 @@ Activity, Error, and Debug logs are located in `./logs` To print out debug logs: -``` shell +```shell ./run.sh --debug # on Linux / macOS .\run.bat --debug # on Windows diff --git a/mkdocs.yml b/mkdocs.yml index 2265a63fa690..0a9bb9e12d74 100644 --- a/mkdocs.yml +++ b/mkdocs.yml @@ -7,39 +7,110 @@ nav: - Usage: usage.md - Plugins: plugins.md - Configuration: - - Options: configuration/options.md - - Search: configuration/search.md - - Memory: configuration/memory.md - - Voice: configuration/voice.md - - Image Generation: configuration/imagegen.md + - Options: configuration/options.md + - Search: configuration/search.md + - Memory: configuration/memory.md + - Voice: configuration/voice.md + - Image Generation: configuration/imagegen.md - Help us improve Auto-GPT: - - Share your debug logs with us: share-your-logs.md - - Contribution guide: contributing.md - - Running tests: testing.md - - Code of Conduct: code-of-conduct.md + - Share your debug logs with us: share-your-logs.md + - Contribution guide: contributing.md + - Running tests: testing.md + - Code of Conduct: code-of-conduct.md - Challenges: - - Introduction: challenges/introduction.md - - List of Challenges: - - Memory: - - Introduction: challenges/memory/introduction.md - - Memory Challenge A: challenges/memory/challenge_a.md - - Memory Challenge B: challenges/memory/challenge_b.md - - Memory Challenge C: challenges/memory/challenge_c.md - - Memory Challenge D: challenges/memory/challenge_d.md - - Information retrieval: - - Introduction: challenges/information_retrieval/introduction.md - - Information Retrieval Challenge A: challenges/information_retrieval/challenge_a.md - - Information Retrieval Challenge B: challenges/information_retrieval/challenge_b.md + - Introduction: challenges/introduction.md + - List of Challenges: + - Memory: + - Introduction: challenges/memory/introduction.md + - Memory Challenge A: challenges/memory/challenge_a.md + - Memory Challenge B: challenges/memory/challenge_b.md + - Memory Challenge C: challenges/memory/challenge_c.md + - Memory Challenge D: challenges/memory/challenge_d.md + - Information retrieval: + - Introduction: challenges/information_retrieval/introduction.md + - Information Retrieval Challenge A: challenges/information_retrieval/challenge_a.md + - Information Retrieval Challenge B: challenges/information_retrieval/challenge_b.md - Submit a Challenge: challenges/submit.md - Beat a Challenge: challenges/beat.md - License: https://github.com/Significant-Gravitas/Auto-GPT/blob/master/LICENSE -theme: readthedocs +theme: + name: material + icon: + logo: material/book-open-variant + favicon: imgs/Auto_GPT_Logo.png + features: + - navigation.sections + - toc.follow + - navigation.top + - content.code.copy + palette: + # Palette toggle for light mode + - media: "(prefers-color-scheme: light)" + scheme: default + toggle: + icon: material/weather-night + name: Switch to dark mode + + # Palette toggle for dark mode + - media: "(prefers-color-scheme: dark)" + scheme: slate + toggle: + icon: material/weather-sunny + name: Switch to light mode markdown_extensions: - admonition: - codehilite: - pymdownx.keys: + # Python Markdown + - abbr + - admonition + - attr_list + - def_list + - footnotes + - md_in_html + - toc: + permalink: true + - tables + + # Python Markdown Extensions + - pymdownx.arithmatex: + generic: true + - pymdownx.betterem: + smart_enable: all + - pymdownx.critic + - pymdownx.caret + - pymdownx.details + - pymdownx.emoji: + emoji_index: !!python/name:materialx.emoji.twemoji + emoji_generator: !!python/name:materialx.emoji.to_svg + - pymdownx.highlight + - pymdownx.inlinehilite + - pymdownx.keys + - pymdownx.mark + - pymdownx.smartsymbols + - pymdownx.snippets: + auto_append: + - includes/abbreviations.md + - pymdownx.superfences: + custom_fences: + - name: mermaid + class: mermaid + format: !!python/name:pymdownx.superfences.fence_code_format + - pymdownx.tabbed: + alternate_style: true + - pymdownx.tasklist: + custom_checkbox: true + - pymdownx.tilde + +plugins: + - table-reader + - search + +extra_javascript: + - https://unpkg.com/tablesort@5.3.0/dist/tablesort.min.js + - _javascript/tablesort.js + - _javascript/mathjax.js + - https://polyfill.io/v3/polyfill.min.js?features=es6 + - https://cdn.jsdelivr.net/npm/mathjax@3/es5/tex-mml-chtml.js diff --git a/requirements.txt b/requirements.txt index e401e26685aa..5dc87ff62da1 100644 --- a/requirements.txt +++ b/requirements.txt @@ -48,6 +48,8 @@ isort gitpython==3.1.31 auto-gpt-plugin-template @ git+https://github.com/Significant-Gravitas/Auto-GPT-Plugin-Template@0.1.0 mkdocs +mkdocs-material +mkdocs-table-reader-plugin pymdown-extensions mypy types-Markdown diff --git a/tests/unit/test_config.py b/tests/unit/test_config.py index 80de7073a791..9d63b26a362b 100644 --- a/tests/unit/test_config.py +++ b/tests/unit/test_config.py @@ -3,6 +3,7 @@ for the AI and ensures it behaves as a singleton. """ import os +from typing import Any from unittest import mock from unittest.mock import patch @@ -13,7 +14,7 @@ from autogpt.workspace.workspace import Workspace -def test_initial_values(config: Config): +def test_initial_values(config: Config) -> None: """ Test if the initial values of the config class attributes are set correctly. """ @@ -24,7 +25,7 @@ def test_initial_values(config: Config): assert config.smart_llm == "gpt-4-0314" -def test_set_continuous_mode(config: Config): +def test_set_continuous_mode(config: Config) -> None: """ Test if the set_continuous_mode() method updates the continuous_mode attribute. """ @@ -38,7 +39,7 @@ def test_set_continuous_mode(config: Config): config.continuous_mode = continuous_mode -def test_set_speak_mode(config: Config): +def test_set_speak_mode(config: Config) -> None: """ Test if the set_speak_mode() method updates the speak_mode attribute. """ @@ -52,7 +53,7 @@ def test_set_speak_mode(config: Config): config.speak_mode = speak_mode -def test_set_fast_llm(config: Config): +def test_set_fast_llm(config: Config) -> None: """ Test if the set_fast_llm() method updates the fast_llm attribute. """ @@ -66,7 +67,7 @@ def test_set_fast_llm(config: Config): config.fast_llm = fast_llm -def test_set_smart_llm(config: Config): +def test_set_smart_llm(config: Config) -> None: """ Test if the set_smart_llm() method updates the smart_llm attribute. """ @@ -80,7 +81,7 @@ def test_set_smart_llm(config: Config): config.smart_llm = smart_llm -def test_set_debug_mode(config: Config): +def test_set_debug_mode(config: Config) -> None: """ Test if the set_debug_mode() method updates the debug_mode attribute. """ @@ -95,7 +96,7 @@ def test_set_debug_mode(config: Config): @patch("openai.Model.list") -def test_smart_and_fast_llms_set_to_gpt4(mock_list_models, config: Config): +def test_smart_and_fast_llms_set_to_gpt4(mock_list_models: Any, config: Config) -> None: """ Test if models update to gpt-3.5-turbo if both are set to gpt-4. """ @@ -132,7 +133,7 @@ def test_smart_and_fast_llms_set_to_gpt4(mock_list_models, config: Config): config.smart_llm = smart_llm -def test_missing_azure_config(workspace: Workspace): +def test_missing_azure_config(workspace: Workspace) -> None: config_file = workspace.get_path("azure_config.yaml") with pytest.raises(FileNotFoundError): ConfigBuilder.load_azure_config(str(config_file)) From a593c3272735bf4ffbd839a6c5b895a84ca1cfe5 Mon Sep 17 00:00:00 2001 From: James Collins Date: Tue, 1 Aug 2023 10:48:20 -0700 Subject: [PATCH 22/24] Move more app files to app package (#5036) Co-authored-by: Nicholas Tindle --- autogpt/app/main.py | 16 +-- autogpt/app/setup.py | 2 +- autogpt/{ => app}/spinner.py | 0 autogpt/app/utils.py | 147 +++++++++++++++++++++++++++ autogpt/processing/text.py | 10 +- autogpt/utils.py | 170 +------------------------------- tests/challenges/utils.py | 4 +- tests/integration/test_setup.py | 6 +- tests/unit/test_spinner.py | 2 +- tests/unit/test_utils.py | 30 +++--- 10 files changed, 186 insertions(+), 201 deletions(-) rename autogpt/{ => app}/spinner.py (100%) create mode 100644 autogpt/app/utils.py diff --git a/autogpt/app/main.py b/autogpt/app/main.py index e7d51ce6361c..ed77cd438132 100644 --- a/autogpt/app/main.py +++ b/autogpt/app/main.py @@ -13,6 +13,14 @@ from autogpt.agents import Agent, AgentThoughts, CommandArgs, CommandName from autogpt.app.configurator import create_config from autogpt.app.setup import prompt_user +from autogpt.app.spinner import Spinner +from autogpt.app.utils import ( + clean_input, + get_current_git_branch, + get_latest_bulletin, + get_legal_warning, + markdown_to_ansi_style, +) from autogpt.commands import COMMAND_CATEGORIES from autogpt.config import AIConfig, Config, ConfigBuilder, check_openai_api_key from autogpt.llm.api_manager import ApiManager @@ -22,14 +30,6 @@ from autogpt.plugins import scan_plugins from autogpt.prompts.prompt import DEFAULT_TRIGGERING_PROMPT from autogpt.speech import say_text -from autogpt.spinner import Spinner -from autogpt.utils import ( - clean_input, - get_current_git_branch, - get_latest_bulletin, - get_legal_warning, - markdown_to_ansi_style, -) from autogpt.workspace import Workspace from scripts.install_plugin_deps import install_plugin_dependencies diff --git a/autogpt/app/setup.py b/autogpt/app/setup.py index cb6073adc0dd..f2879af6b361 100644 --- a/autogpt/app/setup.py +++ b/autogpt/app/setup.py @@ -5,7 +5,7 @@ from colorama import Fore, Style from jinja2 import Template -from autogpt import utils +from autogpt.app import utils from autogpt.config import Config from autogpt.config.ai_config import AIConfig from autogpt.llm.base import ChatSequence, Message diff --git a/autogpt/spinner.py b/autogpt/app/spinner.py similarity index 100% rename from autogpt/spinner.py rename to autogpt/app/spinner.py diff --git a/autogpt/app/utils.py b/autogpt/app/utils.py new file mode 100644 index 000000000000..5bf0d6c7cc79 --- /dev/null +++ b/autogpt/app/utils.py @@ -0,0 +1,147 @@ +import os +import re + +import requests +from colorama import Fore, Style +from git.repo import Repo +from prompt_toolkit import ANSI, PromptSession +from prompt_toolkit.history import InMemoryHistory + +from autogpt.config import Config +from autogpt.logs import logger + +session = PromptSession(history=InMemoryHistory()) + + +def clean_input(config: Config, prompt: str = "", talk=False): + try: + if config.chat_messages_enabled: + for plugin in config.plugins: + if not hasattr(plugin, "can_handle_user_input"): + continue + if not plugin.can_handle_user_input(user_input=prompt): + continue + plugin_response = plugin.user_input(user_input=prompt) + if not plugin_response: + continue + if plugin_response.lower() in [ + "yes", + "yeah", + "y", + "ok", + "okay", + "sure", + "alright", + ]: + return config.authorise_key + elif plugin_response.lower() in [ + "no", + "nope", + "n", + "negative", + ]: + return config.exit_key + return plugin_response + + # ask for input, default when just pressing Enter is y + logger.info("Asking user via keyboard...") + + # handle_sigint must be set to False, so the signal handler in the + # autogpt/main.py could be employed properly. This referes to + # https://github.com/Significant-Gravitas/Auto-GPT/pull/4799/files/3966cdfd694c2a80c0333823c3bc3da090f85ed3#r1264278776 + answer = session.prompt(ANSI(prompt), handle_sigint=False) + return answer + except KeyboardInterrupt: + logger.info("You interrupted Auto-GPT") + logger.info("Quitting...") + exit(0) + + +def get_bulletin_from_web(): + try: + response = requests.get( + "https://raw.githubusercontent.com/Significant-Gravitas/Auto-GPT/master/BULLETIN.md" + ) + if response.status_code == 200: + return response.text + except requests.exceptions.RequestException: + pass + + return "" + + +def get_current_git_branch() -> str: + try: + repo = Repo(search_parent_directories=True) + branch = repo.active_branch + return branch.name + except: + return "" + + +def get_latest_bulletin() -> tuple[str, bool]: + exists = os.path.exists("data/CURRENT_BULLETIN.md") + current_bulletin = "" + if exists: + current_bulletin = open( + "data/CURRENT_BULLETIN.md", "r", encoding="utf-8" + ).read() + new_bulletin = get_bulletin_from_web() + is_new_news = new_bulletin != "" and new_bulletin != current_bulletin + + news_header = Fore.YELLOW + "Welcome to Auto-GPT!\n" + if new_bulletin or current_bulletin: + news_header += ( + "Below you'll find the latest Auto-GPT News and updates regarding features!\n" + "If you don't wish to see this message, you " + "can run Auto-GPT with the *--skip-news* flag.\n" + ) + + if new_bulletin and is_new_news: + open("data/CURRENT_BULLETIN.md", "w", encoding="utf-8").write(new_bulletin) + current_bulletin = f"{Fore.RED}::NEW BULLETIN::{Fore.RESET}\n\n{new_bulletin}" + + return f"{news_header}\n{current_bulletin}", is_new_news + + +def markdown_to_ansi_style(markdown: str): + ansi_lines: list[str] = [] + for line in markdown.split("\n"): + line_style = "" + + if line.startswith("# "): + line_style += Style.BRIGHT + else: + line = re.sub( + r"(? str: + legal_text = """ +## DISCLAIMER AND INDEMNIFICATION AGREEMENT +### PLEASE READ THIS DISCLAIMER AND INDEMNIFICATION AGREEMENT CAREFULLY BEFORE USING THE AUTOGPT SYSTEM. BY USING THE AUTOGPT SYSTEM, YOU AGREE TO BE BOUND BY THIS AGREEMENT. + +## Introduction +AutoGPT (the "System") is a project that connects a GPT-like artificial intelligence system to the internet and allows it to automate tasks. While the System is designed to be useful and efficient, there may be instances where the System could perform actions that may cause harm or have unintended consequences. + +## No Liability for Actions of the System +The developers, contributors, and maintainers of the AutoGPT project (collectively, the "Project Parties") make no warranties or representations, express or implied, about the System's performance, accuracy, reliability, or safety. By using the System, you understand and agree that the Project Parties shall not be liable for any actions taken by the System or any consequences resulting from such actions. + +## User Responsibility and Respondeat Superior Liability +As a user of the System, you are responsible for supervising and monitoring the actions of the System while it is operating on your +behalf. You acknowledge that using the System could expose you to potential liability including but not limited to respondeat superior and you agree to assume all risks and liabilities associated with such potential liability. + +## Indemnification +By using the System, you agree to indemnify, defend, and hold harmless the Project Parties from and against any and all claims, liabilities, damages, losses, or expenses (including reasonable attorneys' fees and costs) arising out of or in connection with your use of the System, including, without limitation, any actions taken by the System on your behalf, any failure to properly supervise or monitor the System, and any resulting harm or unintended consequences. + """ + return legal_text diff --git a/autogpt/processing/text.py b/autogpt/processing/text.py index faaa50e000db..dc245bb2aed8 100644 --- a/autogpt/processing/text.py +++ b/autogpt/processing/text.py @@ -10,7 +10,15 @@ from autogpt.llm.providers.openai import OPEN_AI_MODELS from autogpt.llm.utils import count_string_tokens, create_chat_completion from autogpt.logs import logger -from autogpt.utils import batch + + +def batch(iterable, max_batch_length: int, overlap: int = 0): + """Batch data from iterable into slices of length N. The last batch may be shorter.""" + # batched('ABCDEFG', 3) --> ABC DEF G + if max_batch_length < 1: + raise ValueError("n must be at least one") + for i in range(0, len(iterable), max_batch_length - overlap): + yield iterable[i : i + max_batch_length] def _max_chunk_length(model: str, max: Optional[int] = None) -> int: diff --git a/autogpt/utils.py b/autogpt/utils.py index 28c4be517fee..f69fe50f8622 100644 --- a/autogpt/utils.py +++ b/autogpt/utils.py @@ -1,70 +1,5 @@ -import os -import re - -import requests import yaml -from colorama import Fore, Style -from git.repo import Repo -from prompt_toolkit import ANSI, PromptSession -from prompt_toolkit.history import InMemoryHistory - -from autogpt.config import Config -from autogpt.logs import logger - -session = PromptSession(history=InMemoryHistory()) - - -def batch(iterable, max_batch_length: int, overlap: int = 0): - """Batch data from iterable into slices of length N. The last batch may be shorter.""" - # batched('ABCDEFG', 3) --> ABC DEF G - if max_batch_length < 1: - raise ValueError("n must be at least one") - for i in range(0, len(iterable), max_batch_length - overlap): - yield iterable[i : i + max_batch_length] - - -def clean_input(config: Config, prompt: str = "", talk=False): - try: - if config.chat_messages_enabled: - for plugin in config.plugins: - if not hasattr(plugin, "can_handle_user_input"): - continue - if not plugin.can_handle_user_input(user_input=prompt): - continue - plugin_response = plugin.user_input(user_input=prompt) - if not plugin_response: - continue - if plugin_response.lower() in [ - "yes", - "yeah", - "y", - "ok", - "okay", - "sure", - "alright", - ]: - return config.authorise_key - elif plugin_response.lower() in [ - "no", - "nope", - "n", - "negative", - ]: - return config.exit_key - return plugin_response - - # ask for input, default when just pressing Enter is y - logger.info("Asking user via keyboard...") - - # handle_sigint must be set to False, so the signal handler in the - # autogpt/main.py could be employed properly. This referes to - # https://github.com/Significant-Gravitas/Auto-GPT/pull/4799/files/3966cdfd694c2a80c0333823c3bc3da090f85ed3#r1264278776 - answer = session.prompt(ANSI(prompt), handle_sigint=False) - return answer - except KeyboardInterrupt: - logger.info("You interrupted Auto-GPT") - logger.info("Quitting...") - exit(0) +from colorama import Fore def validate_yaml_file(file: str): @@ -80,106 +15,3 @@ def validate_yaml_file(file: str): ) return (True, f"Successfully validated {Fore.CYAN}`{file}`{Fore.RESET}!") - - -def readable_file_size(size, decimal_places=2): - """Converts the given size in bytes to a readable format. - Args: - size: Size in bytes - decimal_places (int): Number of decimal places to display - """ - for unit in ["B", "KB", "MB", "GB", "TB"]: - if size < 1024.0: - break - size /= 1024.0 - return f"{size:.{decimal_places}f} {unit}" - - -def get_bulletin_from_web(): - try: - response = requests.get( - "https://raw.githubusercontent.com/Significant-Gravitas/Auto-GPT/master/BULLETIN.md" - ) - if response.status_code == 200: - return response.text - except requests.exceptions.RequestException: - pass - - return "" - - -def get_current_git_branch() -> str: - try: - repo = Repo(search_parent_directories=True) - branch = repo.active_branch - return branch.name - except: - return "" - - -def get_latest_bulletin() -> tuple[str, bool]: - exists = os.path.exists("data/CURRENT_BULLETIN.md") - current_bulletin = "" - if exists: - current_bulletin = open( - "data/CURRENT_BULLETIN.md", "r", encoding="utf-8" - ).read() - new_bulletin = get_bulletin_from_web() - is_new_news = new_bulletin != "" and new_bulletin != current_bulletin - - news_header = Fore.YELLOW + "Welcome to Auto-GPT!\n" - if new_bulletin or current_bulletin: - news_header += ( - "Below you'll find the latest Auto-GPT News and updates regarding features!\n" - "If you don't wish to see this message, you " - "can run Auto-GPT with the *--skip-news* flag.\n" - ) - - if new_bulletin and is_new_news: - open("data/CURRENT_BULLETIN.md", "w", encoding="utf-8").write(new_bulletin) - current_bulletin = f"{Fore.RED}::NEW BULLETIN::{Fore.RESET}\n\n{new_bulletin}" - - return f"{news_header}\n{current_bulletin}", is_new_news - - -def markdown_to_ansi_style(markdown: str): - ansi_lines: list[str] = [] - for line in markdown.split("\n"): - line_style = "" - - if line.startswith("# "): - line_style += Style.BRIGHT - else: - line = re.sub( - r"(? str: - legal_text = """ -## DISCLAIMER AND INDEMNIFICATION AGREEMENT -### PLEASE READ THIS DISCLAIMER AND INDEMNIFICATION AGREEMENT CAREFULLY BEFORE USING THE AUTOGPT SYSTEM. BY USING THE AUTOGPT SYSTEM, YOU AGREE TO BE BOUND BY THIS AGREEMENT. - -## Introduction -AutoGPT (the "System") is a project that connects a GPT-like artificial intelligence system to the internet and allows it to automate tasks. While the System is designed to be useful and efficient, there may be instances where the System could perform actions that may cause harm or have unintended consequences. - -## No Liability for Actions of the System -The developers, contributors, and maintainers of the AutoGPT project (collectively, the "Project Parties") make no warranties or representations, express or implied, about the System's performance, accuracy, reliability, or safety. By using the System, you understand and agree that the Project Parties shall not be liable for any actions taken by the System or any consequences resulting from such actions. - -## User Responsibility and Respondeat Superior Liability -As a user of the System, you are responsible for supervising and monitoring the actions of the System while it is operating on your -behalf. You acknowledge that using the System could expose you to potential liability including but not limited to respondeat superior and you agree to assume all risks and liabilities associated with such potential liability. - -## Indemnification -By using the System, you agree to indemnify, defend, and hold harmless the Project Parties from and against any and all claims, liabilities, damages, losses, or expenses (including reasonable attorneys' fees and costs) arising out of or in connection with your use of the System, including, without limitation, any actions taken by the System on your behalf, any failure to properly supervise or monitor the System, and any resulting harm or unintended consequences. - """ - return legal_text diff --git a/tests/challenges/utils.py b/tests/challenges/utils.py index dd661b6e3fc6..67d7425c8dd3 100644 --- a/tests/challenges/utils.py +++ b/tests/challenges/utils.py @@ -38,7 +38,9 @@ def input_generator() -> Generator[str, None, None]: yield from input_sequence gen = input_generator() - monkeypatch.setattr("autogpt.utils.session.prompt", lambda _, **kwargs: next(gen)) + monkeypatch.setattr( + "autogpt.app.utils.session.prompt", lambda _, **kwargs: next(gen) + ) def setup_mock_log_cycle_agent_name( diff --git a/tests/integration/test_setup.py b/tests/integration/test_setup.py index f4bb9a5c8ba4..ff83eee6b53b 100644 --- a/tests/integration/test_setup.py +++ b/tests/integration/test_setup.py @@ -10,7 +10,7 @@ @pytest.mark.requires_openai_api_key def test_generate_aiconfig_automatic_default(patched_api_requestor, config): user_inputs = [""] - with patch("autogpt.utils.session.prompt", side_effect=user_inputs): + with patch("autogpt.app.utils.session.prompt", side_effect=user_inputs): ai_config = prompt_user(config) assert isinstance(ai_config, AIConfig) @@ -43,7 +43,7 @@ def test_generate_aiconfig_automatic_fallback(patched_api_requestor, config): "", "", ] - with patch("autogpt.utils.session.prompt", side_effect=user_inputs): + with patch("autogpt.app.utils.session.prompt", side_effect=user_inputs): ai_config = prompt_user(config) assert isinstance(ai_config, AIConfig) @@ -64,7 +64,7 @@ def test_prompt_user_manual_mode(patched_api_requestor, config): "", "", ] - with patch("autogpt.utils.session.prompt", side_effect=user_inputs): + with patch("autogpt.app.utils.session.prompt", side_effect=user_inputs): ai_config = prompt_user(config) assert isinstance(ai_config, AIConfig) diff --git a/tests/unit/test_spinner.py b/tests/unit/test_spinner.py index 4b22f24cbd78..8f894b79d56f 100644 --- a/tests/unit/test_spinner.py +++ b/tests/unit/test_spinner.py @@ -1,7 +1,7 @@ # Generated by CodiumAI import time -from autogpt.spinner import Spinner +from autogpt.app.spinner import Spinner """ Code Analysis diff --git a/tests/unit/test_utils.py b/tests/unit/test_utils.py index eb49908f3942..43d8dc39ed0c 100644 --- a/tests/unit/test_utils.py +++ b/tests/unit/test_utils.py @@ -4,15 +4,14 @@ import pytest import requests -from autogpt.config import Config -from autogpt.json_utils.utilities import extract_dict_from_response, validate_dict -from autogpt.utils import ( +from autogpt.app.utils import ( get_bulletin_from_web, get_current_git_branch, get_latest_bulletin, - readable_file_size, - validate_yaml_file, ) +from autogpt.config import Config +from autogpt.json_utils.utilities import extract_dict_from_response, validate_dict +from autogpt.utils import validate_yaml_file from tests.utils import skip_in_ci @@ -77,13 +76,6 @@ def test_validate_yaml_file_invalid(): assert "There was an issue while trying to read" in message -def test_readable_file_size(): - size_in_bytes = 1024 * 1024 * 3.5 # 3.5 MB - readable_size = readable_file_size(size_in_bytes) - - assert readable_size == "3.50 MB" - - @patch("requests.get") def test_get_bulletin_from_web_success(mock_get): expected_content = "Test bulletin from web" @@ -127,7 +119,7 @@ def test_get_latest_bulletin_with_file(): with open("data/CURRENT_BULLETIN.md", "w", encoding="utf-8") as f: f.write(expected_content) - with patch("autogpt.utils.get_bulletin_from_web", return_value=""): + with patch("autogpt.app.utils.get_bulletin_from_web", return_value=""): bulletin, is_new = get_latest_bulletin() assert expected_content in bulletin assert is_new == False @@ -140,7 +132,9 @@ def test_get_latest_bulletin_with_new_bulletin(): f.write("Old bulletin") expected_content = "New bulletin from web" - with patch("autogpt.utils.get_bulletin_from_web", return_value=expected_content): + with patch( + "autogpt.app.utils.get_bulletin_from_web", return_value=expected_content + ): bulletin, is_new = get_latest_bulletin() assert "::NEW BULLETIN::" in bulletin assert expected_content in bulletin @@ -154,7 +148,9 @@ def test_get_latest_bulletin_new_bulletin_same_as_old_bulletin(): with open("data/CURRENT_BULLETIN.md", "w", encoding="utf-8") as f: f.write(expected_content) - with patch("autogpt.utils.get_bulletin_from_web", return_value=expected_content): + with patch( + "autogpt.app.utils.get_bulletin_from_web", return_value=expected_content + ): bulletin, is_new = get_latest_bulletin() assert expected_content in bulletin assert is_new == False @@ -170,7 +166,7 @@ def test_get_current_git_branch(): assert branch_name != "" -@patch("autogpt.utils.Repo") +@patch("autogpt.app.utils.Repo") def test_get_current_git_branch_success(mock_repo): mock_repo.return_value.active_branch.name = "test-branch" branch_name = get_current_git_branch() @@ -178,7 +174,7 @@ def test_get_current_git_branch_success(mock_repo): assert branch_name == "test-branch" -@patch("autogpt.utils.Repo") +@patch("autogpt.app.utils.Repo") def test_get_current_git_branch_failure(mock_repo): mock_repo.side_effect = Exception() branch_name = get_current_git_branch() From 3a2d08fb415071cc94dd6fcee24cfbdd1fb487dd Mon Sep 17 00:00:00 2001 From: Luke <2609441+lc0rp@users.noreply.github.com> Date: Tue, 1 Aug 2023 14:48:13 -0400 Subject: [PATCH 23/24] Pass TestSearch benchmark consistently (Add browse_website TOKENS_TO_TRIGGER_SUMMARY) (#5092) * Added SUMMARIZATION_TRIGGER_LENGTH browse_website won't summarize content that's shorter than SUMMARIZATION_TRIGGER_LENGTH. It defaults to 250 characters, which is approximately 50 tokens. * Refactor BrowserOptions * Use tokens instead of length to trigger summarization * Bugfix * fix: Always return links even if not summarizing feat: Increase the number of links returned from 5 to 20 --------- Co-authored-by: lc0rp <2609411+lc0rp@users.noreply.github.com> Co-authored-by: James Collins --- autogpt/commands/web_selenium.py | 38 +++++++++++++++++++------------- 1 file changed, 23 insertions(+), 15 deletions(-) diff --git a/autogpt/commands/web_selenium.py b/autogpt/commands/web_selenium.py index 2d978494a9d3..92aa5bece0e7 100644 --- a/autogpt/commands/web_selenium.py +++ b/autogpt/commands/web_selenium.py @@ -2,13 +2,15 @@ from __future__ import annotations +from autogpt.llm.utils.token_counter import count_string_tokens + COMMAND_CATEGORY = "web_browse" COMMAND_CATEGORY_TITLE = "Web Browsing" import logging from pathlib import Path from sys import platform -from typing import Optional, Type +from typing import Optional from bs4 import BeautifulSoup from selenium.common.exceptions import WebDriverException @@ -16,6 +18,7 @@ from selenium.webdriver.chrome.service import Service as ChromeDriverService from selenium.webdriver.chrome.webdriver import WebDriver as ChromeDriver from selenium.webdriver.common.by import By +from selenium.webdriver.common.options import ArgOptions as BrowserOptions from selenium.webdriver.edge.options import Options as EdgeOptions from selenium.webdriver.edge.service import Service as EdgeDriverService from selenium.webdriver.edge.webdriver import WebDriver as EdgeDriver @@ -38,9 +41,9 @@ from autogpt.processing.html import extract_hyperlinks, format_hyperlinks from autogpt.url_utils.validators import validate_url -BrowserOptions = ChromeOptions | EdgeOptions | FirefoxOptions | SafariOptions - FILE_DIR = Path(__file__).parent.parent +TOKENS_TO_TRIGGER_SUMMARY = 50 +LINKS_TO_RETURN = 20 @command( @@ -64,25 +67,30 @@ def browse_website(url: str, question: str, agent: Agent) -> str: question (str): The question asked by the user Returns: - Tuple[str, WebDriver]: The answer and links to the user and the webdriver + str: The answer and links to the user and the webdriver """ + driver = None try: driver, text = scrape_text_with_selenium(url, agent) + add_header(driver) + if TOKENS_TO_TRIGGER_SUMMARY < count_string_tokens(text, agent.llm.name): + text = summarize_memorize_webpage(url, text, question, agent, driver) + + links = scrape_links_with_selenium(driver, url) + + # Limit links to LINKS_TO_RETURN + if len(links) > LINKS_TO_RETURN: + links = links[:LINKS_TO_RETURN] + + return f"Answer gathered from website: {text}\n\nLinks: {links}" except WebDriverException as e: # These errors are often quite long and include lots of context. # Just grab the first line. msg = e.msg.split("\n")[0] return f"Error: {msg}" - - add_header(driver) - summary = summarize_memorize_webpage(url, text, question, agent, driver) - links = scrape_links_with_selenium(driver, url) - - # Limit links to 5 - if len(links) > 5: - links = links[:5] - close_browser(driver) - return f"Answer gathered from website: {summary}\n\nLinks: {links}" + finally: + if driver: + close_browser(driver) def scrape_text_with_selenium(url: str, agent: Agent) -> tuple[WebDriver, str]: @@ -96,7 +104,7 @@ def scrape_text_with_selenium(url: str, agent: Agent) -> tuple[WebDriver, str]: """ logging.getLogger("selenium").setLevel(logging.CRITICAL) - options_available: dict[str, Type[BrowserOptions]] = { + options_available: dict[str, BrowserOptions] = { "chrome": ChromeOptions, "edge": EdgeOptions, "firefox": FirefoxOptions, From f26ccda09759248a53af95e19af57f1791c3167d Mon Sep 17 00:00:00 2001 From: Luke <2609441+lc0rp@users.noreply.github.com> Date: Fri, 11 Aug 2023 08:44:27 -0400 Subject: [PATCH 24/24] Bulleting update & version bump (#5112) * Bulleting update & version bump * Bulletin.md updates * Format bulletin * Added info about new documentation theme. --------- Co-authored-by: lc0rp <2609411+lc0rp@users.noreply.github.com> --- .gitignore | 7 +++++++ BULLETIN.md | 24 +++++++++--------------- docs/configuration/memory.md | 2 +- pyproject.toml | 2 +- 4 files changed, 18 insertions(+), 17 deletions(-) diff --git a/.gitignore b/.gitignore index 195ecb71787c..3b4363131884 100644 --- a/.gitignore +++ b/.gitignore @@ -160,3 +160,10 @@ openai/ # news CURRENT_BULLETIN.md + +# AgBenchmark +agbenchmark/reports/ + +# Nodejs +package-lock.json +package.json \ No newline at end of file diff --git a/BULLETIN.md b/BULLETIN.md index 11cc62777625..9a24b4986df7 100644 --- a/BULLETIN.md +++ b/BULLETIN.md @@ -4,24 +4,18 @@ 📖 *User Guide*: https://docs.agpt.co. 👩 *Contributors Wiki*: https://github.com/Significant-Gravitas/Auto-GPT/wiki/Contributing. -# v0.4.6 RELEASE HIGHLIGHTS! 🚀 +# v0.4.7 RELEASE HIGHLIGHTS! 🚀 # ----------------------------- -This release includes under-the-hood improvements and bug fixes, including better UTF-8 -special character support, workspace write access for sandboxed Python execution, -more robust path resolution for config files and the workspace, and a full restructure -of the Agent class, the "brain" of Auto-GPT, to make it more extensible. +This release introduces initial REST API support, powered by e2b's agent +protocol SDK (https://github.com/e2b-dev/agent-protocol#sdk). -We have also released some documentation updates, including: +It also includes improvements to prompt generation and support +for our new benchmarking tool, Auto-GPT-Benchmarks +(https://github.com/Significant-Gravitas/Auto-GPT-Benchmarks). -- *How to share your system logs* - Visit [docs/share-your-logs.md] to learn how to how to share logs with us - via a log analyzer graciously contributed by https://www.e2b.dev/ +We've also moved our documentation to Material Theme, at https://docs.agpt.co. -- *Auto-GPT re-architecture documentation* - You can learn more about the inner-workings of the Auto-GPT re-architecture - released last cycle, via these links: - * [autogpt/core/README.md] - * [autogpt/core/ARCHITECTURE_NOTES.md] +As usual, we've squashed a few bugs and made some under-the-hood improvements. -Take a look at the Release Notes on Github for the full changelog! +Take a look at the Release Notes on Github for the full changelog: https://github.com/Significant-Gravitas/Auto-GPT/releases. diff --git a/docs/configuration/memory.md b/docs/configuration/memory.md index 3fa908b26169..1a5e716abf8a 100644 --- a/docs/configuration/memory.md +++ b/docs/configuration/memory.md @@ -188,7 +188,7 @@ View memory usage by using the `--debug` flag :) ## 🧠 Memory pre-seeding !!! warning - Data ingestion is broken in v0.4.6 and possibly earlier versions. This is a known issue that will be addressed in future releases. Follow these issues for updates. + Data ingestion is broken in v0.4.7 and possibly earlier versions. This is a known issue that will be addressed in future releases. Follow these issues for updates. [Issue 4435](https://github.com/Significant-Gravitas/Auto-GPT/issues/4435) [Issue 4024](https://github.com/Significant-Gravitas/Auto-GPT/issues/4024) [Issue 2076](https://github.com/Significant-Gravitas/Auto-GPT/issues/2076) diff --git a/pyproject.toml b/pyproject.toml index da0fcdd68819..ede3e62da4ae 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -4,7 +4,7 @@ build-backend = "hatchling.build" [project] name = "agpt" -version = "0.4.6" +version = "0.4.7" authors = [ { name="Torantulino", email="support@agpt.co" }, ]