Merge pull request #802 from sweetlilmre/more_azure

More fixes for Azure hosting
This commit is contained in:
Richard Beales 2023-04-13 07:58:41 +01:00 committed by GitHub
commit c8b8673286
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
7 changed files with 62 additions and 16 deletions

View File

@ -11,11 +11,6 @@ GOOGLE_API_KEY=
CUSTOM_SEARCH_ENGINE_ID=
USE_AZURE=False
EXECUTE_LOCAL_COMMANDS=False
OPENAI_AZURE_API_BASE=your-base-url-for-azure
OPENAI_AZURE_API_VERSION=api-version-for-azure
OPENAI_AZURE_DEPLOYMENT_ID=deployment-id-for-azure
OPENAI_AZURE_CHAT_DEPLOYMENT_ID=deployment-id-for-azure-chat
OPENAI_AZURE_EMBEDDINGS_DEPLOYMENT_ID=deployment-id-for-azure-embeddigs
IMAGE_PROVIDER=dalle
HUGGINGFACE_API_TOKEN=
USE_MAC_OS_TTS=False

2
.gitignore vendored
View File

@ -7,9 +7,11 @@ package-lock.json
auto_gpt_workspace/*
*.mpeg
.env
azure.yaml
*venv/*
outputs/*
ai_settings.yaml
last_run_ai_settings.yaml
.vscode
.idea/*
auto-gpt.json

View File

@ -96,10 +96,15 @@ pip install -r requirements.txt
```
4. Rename `.env.template` to `.env` and fill in your `OPENAI_API_KEY`. If you plan to use Speech Mode, fill in your `ELEVEN_LABS_API_KEY` as well.
- Obtain your OpenAI API key from: https://platform.openai.com/account/api-keys.
- Obtain your ElevenLabs API key from: https://elevenlabs.io. You can view your xi-api-key using the "Profile" tab on the website.
- If you want to use GPT on an Azure instance, set `USE_AZURE` to `True` and provide the `OPENAI_AZURE_API_BASE`, `OPENAI_AZURE_API_VERSION` and `OPENAI_AZURE_DEPLOYMENT_ID` values as explained here: https://pypi.org/project/openai/ in the `Microsoft Azure Endpoints` section. Additionally you need separate deployments for both embeddings and chat. Add their ID values to `OPENAI_AZURE_CHAT_DEPLOYMENT_ID` and `OPENAI_AZURE_EMBEDDINGS_DEPLOYMENT_ID` respectively
- Obtain your OpenAI API key from: https://platform.openai.com/account/api-keys.
- Obtain your ElevenLabs API key from: https://elevenlabs.io. You can view your xi-api-key using the "Profile" tab on the website.
- If you want to use GPT on an Azure instance, set `USE_AZURE` to `True` and then:
- Rename `azure.yaml.template` to `azure.yaml` and provide the relevant `azure_api_base`, `azure_api_version` and all of the deployment ids for the relevant models in the `azure_model_map` section:
- `fast_llm_model_deployment_id` - your gpt-3.5-turbo or gpt-4 deployment id
- `smart_llm_model_deployment_id` - your gpt-4 deployment id
- `embedding_model_deployment_id` - your text-embedding-ada-002 v2 deployment id
- Please specify all of these values as double quoted strings
- details can be found here: https://pypi.org/project/openai/ in the `Microsoft Azure Endpoints` section and here: https://learn.microsoft.com/en-us/azure/cognitive-services/openai/tutorials/embeddings?tabs=command-line for the embedding model.
## 🔧 Usage

6
azure.yaml.template Normal file
View File

@ -0,0 +1,6 @@
azure_api_base: your-base-url-for-azure
azure_api_version: api-version-for-azure
azure_model_map:
fast_llm_model_deployment_id: gpt35-deployment-id-for-azure
smart_llm_model_deployment_id: gpt4-deployment-id-for-azure
embedding_model_deployment_id: embedding-deployment-id-for-azure

View File

@ -1,6 +1,7 @@
import abc
import os
import openai
import yaml
from dotenv import load_dotenv
# Load environment variables from .env file
load_dotenv()
@ -49,11 +50,7 @@ class Config(metaclass=Singleton):
self.execute_local_commands = os.getenv('EXECUTE_LOCAL_COMMANDS', 'False') == 'True'
if self.use_azure:
self.openai_api_base = os.getenv("OPENAI_AZURE_API_BASE")
self.openai_api_version = os.getenv("OPENAI_AZURE_API_VERSION")
self.openai_deployment_id = os.getenv("OPENAI_AZURE_DEPLOYMENT_ID")
self.azure_chat_deployment_id = os.getenv("OPENAI_AZURE_CHAT_DEPLOYMENT_ID")
self.azure_embeddigs_deployment_id = os.getenv("OPENAI_AZURE_EMBEDDINGS_DEPLOYMENT_ID")
self.load_azure_config()
openai.api_type = "azure"
openai.api_base = self.openai_api_base
openai.api_version = self.openai_api_version
@ -88,6 +85,46 @@ class Config(metaclass=Singleton):
# Initialize the OpenAI API client
openai.api_key = self.openai_api_key
def get_azure_deployment_id_for_model(self, model: str) -> str:
"""
Returns the relevant deployment id for the model specified.
Parameters:
model(str): The model to map to the deployment id.
Returns:
The matching deployment id if found, otherwise an empty string.
"""
if model == self.fast_llm_model:
return self.azure_model_to_deployment_id_map["fast_llm_model_deployment_id"]
elif model == self.smart_llm_model:
return self.azure_model_to_deployment_id_map["smart_llm_model_deployment_id"]
elif model == "text-embedding-ada-002":
return self.azure_model_to_deployment_id_map["embedding_model_deployment_id"]
else:
return ""
AZURE_CONFIG_FILE = os.path.join(os.path.dirname(__file__), '..', 'azure.yaml')
def load_azure_config(self, config_file: str=AZURE_CONFIG_FILE) -> None:
"""
Loads the configuration parameters for Azure hosting from the specified file path as a yaml file.
Parameters:
config_file(str): The path to the config yaml file. DEFAULT: "../azure.yaml"
Returns:
None
"""
try:
with open(config_file) as file:
config_params = yaml.load(file, Loader=yaml.FullLoader)
except FileNotFoundError:
config_params = {}
self.openai_api_base = config_params.get("azure_api_base", "")
self.openai_api_version = config_params.get("azure_api_version", "")
self.azure_model_to_deployment_id_map = config_params.get("azure_model_map", [])
def set_continuous_mode(self, value: bool):
"""Set the continuous mode value."""
self.continuous_mode = value

View File

@ -9,7 +9,7 @@ def create_chat_completion(messages, model=None, temperature=cfg.temperature, ma
"""Create a chat completion using the OpenAI API"""
if cfg.use_azure:
response = openai.ChatCompletion.create(
deployment_id=cfg.azure_chat_deployment_id,
deployment_id=cfg.get_azure_deployment_id_for_model(model),
model=model,
messages=messages,
temperature=temperature,

View File

@ -4,11 +4,12 @@ from config import AbstractSingleton, Config
import openai
cfg = Config()
cfg = Config()
def get_ada_embedding(text):
text = text.replace("\n", " ")
if cfg.use_azure:
return openai.Embedding.create(input=[text], engine=cfg.azure_embeddigs_deployment_id, model="text-embedding-ada-002")["data"][0]["embedding"]
return openai.Embedding.create(input=[text], engine=cfg.get_azure_deployment_id_for_model("text-embedding-ada-002"))["data"][0]["embedding"]
else:
return openai.Embedding.create(input=[text], model="text-embedding-ada-002")["data"][0]["embedding"]