mirror of
https://github.com/Significant-Gravitas/Auto-GPT.git
synced 2025-04-03 23:41:45 +08:00
feat(platform)!: Lock Supabase docker-compose code (#9620)
We have been submoduling Supabase for provisioning local Supabase instances using docker-compose. Aside from the huge size of unrelated code being pulled, there is also the risk of pulling unintentional breaking change from the upstream to the platform. The latest Supabase changes hide the 5432 port from the supabase-db container and shift it to the supavisor, the instance that we are currently not using. This causes an error in the existing setup. ## BREAKING CHANGES This change will introduce different volume locations for the database content, pulling this change will make the data content fresh from the start. To keep your old data with this change, execute this command: ``` cp -r supabase/docker/volumes/db/data db/docker/volumes/db/data ``` ### Changes 🏗️ The scope of this PR is snapshotting the current docker-compose code obtained from the Supabase repository and embedding it into our repository. This will eliminate the need for submodule / recursive cloning and bringing the entire Supabase repository into the platform. ### Checklist 📋 #### For code changes: - [x] I have clearly listed my changes in the PR description - [x] I have made a test plan - [x] I have tested my changes according to the test plan: <!-- Put your test plan here: --> - [x] Existing CI
This commit is contained in:
parent
52b4351961
commit
780fddc2a0
24
.github/dependabot.yml
vendored
24
.github/dependabot.yml
vendored
@ -129,30 +129,6 @@ updates:
|
||||
- "minor"
|
||||
- "patch"
|
||||
|
||||
|
||||
# Submodules
|
||||
- package-ecosystem: "gitsubmodule"
|
||||
directory: "autogpt_platform/supabase"
|
||||
schedule:
|
||||
interval: "weekly"
|
||||
open-pull-requests-limit: 1
|
||||
target-branch: "dev"
|
||||
commit-message:
|
||||
prefix: "chore(platform/deps)"
|
||||
prefix-development: "chore(platform/deps-dev)"
|
||||
groups:
|
||||
production-dependencies:
|
||||
dependency-type: "production"
|
||||
update-types:
|
||||
- "minor"
|
||||
- "patch"
|
||||
development-dependencies:
|
||||
dependency-type: "development"
|
||||
update-types:
|
||||
- "minor"
|
||||
- "patch"
|
||||
|
||||
|
||||
# Docs
|
||||
- package-ecosystem: 'pip'
|
||||
directory: "docs/"
|
||||
|
2
.github/workflows/platform-frontend-ci.yml
vendored
2
.github/workflows/platform-frontend-ci.yml
vendored
@ -82,7 +82,7 @@ jobs:
|
||||
|
||||
- name: Copy default supabase .env
|
||||
run: |
|
||||
cp ../supabase/docker/.env.example ../.env
|
||||
cp ../.env.example ../.env
|
||||
|
||||
- name: Copy backend .env
|
||||
run: |
|
||||
|
3
.gitmodules
vendored
3
.gitmodules
vendored
@ -1,6 +1,3 @@
|
||||
[submodule "classic/forge/tests/vcr_cassettes"]
|
||||
path = classic/forge/tests/vcr_cassettes
|
||||
url = https://github.com/Significant-Gravitas/Auto-GPT-test-cassettes
|
||||
[submodule "autogpt_platform/supabase"]
|
||||
path = autogpt_platform/supabase
|
||||
url = https://github.com/supabase/supabase.git
|
||||
|
123
autogpt_platform/.env.example
Normal file
123
autogpt_platform/.env.example
Normal file
@ -0,0 +1,123 @@
|
||||
############
|
||||
# Secrets
|
||||
# YOU MUST CHANGE THESE BEFORE GOING INTO PRODUCTION
|
||||
############
|
||||
|
||||
POSTGRES_PASSWORD=your-super-secret-and-long-postgres-password
|
||||
JWT_SECRET=your-super-secret-jwt-token-with-at-least-32-characters-long
|
||||
ANON_KEY=eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyAgCiAgICAicm9sZSI6ICJhbm9uIiwKICAgICJpc3MiOiAic3VwYWJhc2UtZGVtbyIsCiAgICAiaWF0IjogMTY0MTc2OTIwMCwKICAgICJleHAiOiAxNzk5NTM1NjAwCn0.dc_X5iR_VP_qT0zsiyj_I_OZ2T9FtRU2BBNWN8Bu4GE
|
||||
SERVICE_ROLE_KEY=eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyAgCiAgICAicm9sZSI6ICJzZXJ2aWNlX3JvbGUiLAogICAgImlzcyI6ICJzdXBhYmFzZS1kZW1vIiwKICAgICJpYXQiOiAxNjQxNzY5MjAwLAogICAgImV4cCI6IDE3OTk1MzU2MDAKfQ.DaYlNEoUrrEn2Ig7tqibS-PHK5vgusbcbo7X36XVt4Q
|
||||
DASHBOARD_USERNAME=supabase
|
||||
DASHBOARD_PASSWORD=this_password_is_insecure_and_should_be_updated
|
||||
SECRET_KEY_BASE=UpNVntn3cDxHJpq99YMc1T1AQgQpc8kfYTuRgBiYa15BLrx8etQoXz3gZv1/u2oq
|
||||
VAULT_ENC_KEY=your-encryption-key-32-chars-min
|
||||
|
||||
|
||||
############
|
||||
# Database - You can change these to any PostgreSQL database that has logical replication enabled.
|
||||
############
|
||||
|
||||
POSTGRES_HOST=db
|
||||
POSTGRES_DB=postgres
|
||||
POSTGRES_PORT=5432
|
||||
# default user is postgres
|
||||
|
||||
|
||||
############
|
||||
# Supavisor -- Database pooler
|
||||
############
|
||||
POOLER_PROXY_PORT_TRANSACTION=6543
|
||||
POOLER_DEFAULT_POOL_SIZE=20
|
||||
POOLER_MAX_CLIENT_CONN=100
|
||||
POOLER_TENANT_ID=your-tenant-id
|
||||
|
||||
|
||||
############
|
||||
# API Proxy - Configuration for the Kong Reverse proxy.
|
||||
############
|
||||
|
||||
KONG_HTTP_PORT=8000
|
||||
KONG_HTTPS_PORT=8443
|
||||
|
||||
|
||||
############
|
||||
# API - Configuration for PostgREST.
|
||||
############
|
||||
|
||||
PGRST_DB_SCHEMAS=public,storage,graphql_public
|
||||
|
||||
|
||||
############
|
||||
# Auth - Configuration for the GoTrue authentication server.
|
||||
############
|
||||
|
||||
## General
|
||||
SITE_URL=http://localhost:3000
|
||||
ADDITIONAL_REDIRECT_URLS=
|
||||
JWT_EXPIRY=3600
|
||||
DISABLE_SIGNUP=false
|
||||
API_EXTERNAL_URL=http://localhost:8000
|
||||
|
||||
## Mailer Config
|
||||
MAILER_URLPATHS_CONFIRMATION="/auth/v1/verify"
|
||||
MAILER_URLPATHS_INVITE="/auth/v1/verify"
|
||||
MAILER_URLPATHS_RECOVERY="/auth/v1/verify"
|
||||
MAILER_URLPATHS_EMAIL_CHANGE="/auth/v1/verify"
|
||||
|
||||
## Email auth
|
||||
ENABLE_EMAIL_SIGNUP=true
|
||||
ENABLE_EMAIL_AUTOCONFIRM=false
|
||||
SMTP_ADMIN_EMAIL=admin@example.com
|
||||
SMTP_HOST=supabase-mail
|
||||
SMTP_PORT=2500
|
||||
SMTP_USER=fake_mail_user
|
||||
SMTP_PASS=fake_mail_password
|
||||
SMTP_SENDER_NAME=fake_sender
|
||||
ENABLE_ANONYMOUS_USERS=false
|
||||
|
||||
## Phone auth
|
||||
ENABLE_PHONE_SIGNUP=true
|
||||
ENABLE_PHONE_AUTOCONFIRM=true
|
||||
|
||||
|
||||
############
|
||||
# Studio - Configuration for the Dashboard
|
||||
############
|
||||
|
||||
STUDIO_DEFAULT_ORGANIZATION=Default Organization
|
||||
STUDIO_DEFAULT_PROJECT=Default Project
|
||||
|
||||
STUDIO_PORT=3000
|
||||
# replace if you intend to use Studio outside of localhost
|
||||
SUPABASE_PUBLIC_URL=http://localhost:8000
|
||||
|
||||
# Enable webp support
|
||||
IMGPROXY_ENABLE_WEBP_DETECTION=true
|
||||
|
||||
# Add your OpenAI API key to enable SQL Editor Assistant
|
||||
OPENAI_API_KEY=
|
||||
|
||||
|
||||
############
|
||||
# Functions - Configuration for Functions
|
||||
############
|
||||
# NOTE: VERIFY_JWT applies to all functions. Per-function VERIFY_JWT is not supported yet.
|
||||
FUNCTIONS_VERIFY_JWT=false
|
||||
|
||||
|
||||
############
|
||||
# Logs - Configuration for Logflare
|
||||
# Please refer to https://supabase.com/docs/reference/self-hosting-analytics/introduction
|
||||
############
|
||||
|
||||
LOGFLARE_LOGGER_BACKEND_API_KEY=your-super-secret-and-long-logflare-key
|
||||
|
||||
# Change vector.toml sinks to reflect this change
|
||||
LOGFLARE_API_KEY=your-super-secret-and-long-logflare-key
|
||||
|
||||
# Docker socket location - this value will differ depending on your OS
|
||||
DOCKER_SOCKET_LOCATION=/var/run/docker.sock
|
||||
|
||||
# Google Cloud Project details
|
||||
GOOGLE_PROJECT_ID=GOOGLE_PROJECT_ID
|
||||
GOOGLE_PROJECT_NUMBER=GOOGLE_PROJECT_NUMBER
|
@ -22,35 +22,29 @@ To run the AutoGPT Platform, follow these steps:
|
||||
|
||||
2. Run the following command:
|
||||
```
|
||||
git submodule update --init --recursive --progress
|
||||
cp .env.example .env
|
||||
```
|
||||
This command will initialize and update the submodules in the repository. The `supabase` folder will be cloned to the root directory.
|
||||
This command will copy the `.env.example` file to `.env`. You can modify the `.env` file to add your own environment variables.
|
||||
|
||||
3. Run the following command:
|
||||
```
|
||||
cp supabase/docker/.env.example .env
|
||||
```
|
||||
This command will copy the `.env.example` file to `.env` in the `supabase/docker` directory. You can modify the `.env` file to add your own environment variables.
|
||||
|
||||
4. Run the following command:
|
||||
```
|
||||
docker compose up -d
|
||||
```
|
||||
This command will start all the necessary backend services defined in the `docker-compose.yml` file in detached mode.
|
||||
|
||||
5. Navigate to `frontend` within the `autogpt_platform` directory:
|
||||
4. Navigate to `frontend` within the `autogpt_platform` directory:
|
||||
```
|
||||
cd frontend
|
||||
```
|
||||
You will need to run your frontend application separately on your local machine.
|
||||
|
||||
6. Run the following command:
|
||||
5. Run the following command:
|
||||
```
|
||||
cp .env.example .env.local
|
||||
```
|
||||
This command will copy the `.env.example` file to `.env.local` in the `frontend` directory. You can modify the `.env.local` within this folder to add your own environment variables for the frontend application.
|
||||
|
||||
7. Run the following command:
|
||||
6. Run the following command:
|
||||
```
|
||||
npm install
|
||||
npm run dev
|
||||
@ -61,7 +55,7 @@ To run the AutoGPT Platform, follow these steps:
|
||||
yarn install && yarn dev
|
||||
```
|
||||
|
||||
8. Open your browser and navigate to `http://localhost:3000` to access the AutoGPT Platform frontend.
|
||||
7. Open your browser and navigate to `http://localhost:3000` to access the AutoGPT Platform frontend.
|
||||
|
||||
### Docker Compose Commands
|
||||
|
||||
|
123
autogpt_platform/db/docker/.env.example
Normal file
123
autogpt_platform/db/docker/.env.example
Normal file
@ -0,0 +1,123 @@
|
||||
############
|
||||
# Secrets
|
||||
# YOU MUST CHANGE THESE BEFORE GOING INTO PRODUCTION
|
||||
############
|
||||
|
||||
POSTGRES_PASSWORD=your-super-secret-and-long-postgres-password
|
||||
JWT_SECRET=your-super-secret-jwt-token-with-at-least-32-characters-long
|
||||
ANON_KEY=eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyAgCiAgICAicm9sZSI6ICJhbm9uIiwKICAgICJpc3MiOiAic3VwYWJhc2UtZGVtbyIsCiAgICAiaWF0IjogMTY0MTc2OTIwMCwKICAgICJleHAiOiAxNzk5NTM1NjAwCn0.dc_X5iR_VP_qT0zsiyj_I_OZ2T9FtRU2BBNWN8Bu4GE
|
||||
SERVICE_ROLE_KEY=eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyAgCiAgICAicm9sZSI6ICJzZXJ2aWNlX3JvbGUiLAogICAgImlzcyI6ICJzdXBhYmFzZS1kZW1vIiwKICAgICJpYXQiOiAxNjQxNzY5MjAwLAogICAgImV4cCI6IDE3OTk1MzU2MDAKfQ.DaYlNEoUrrEn2Ig7tqibS-PHK5vgusbcbo7X36XVt4Q
|
||||
DASHBOARD_USERNAME=supabase
|
||||
DASHBOARD_PASSWORD=this_password_is_insecure_and_should_be_updated
|
||||
SECRET_KEY_BASE=UpNVntn3cDxHJpq99YMc1T1AQgQpc8kfYTuRgBiYa15BLrx8etQoXz3gZv1/u2oq
|
||||
VAULT_ENC_KEY=your-encryption-key-32-chars-min
|
||||
|
||||
|
||||
############
|
||||
# Database - You can change these to any PostgreSQL database that has logical replication enabled.
|
||||
############
|
||||
|
||||
POSTGRES_HOST=db
|
||||
POSTGRES_DB=postgres
|
||||
POSTGRES_PORT=5432
|
||||
# default user is postgres
|
||||
|
||||
|
||||
############
|
||||
# Supavisor -- Database pooler
|
||||
############
|
||||
POOLER_PROXY_PORT_TRANSACTION=6543
|
||||
POOLER_DEFAULT_POOL_SIZE=20
|
||||
POOLER_MAX_CLIENT_CONN=100
|
||||
POOLER_TENANT_ID=your-tenant-id
|
||||
|
||||
|
||||
############
|
||||
# API Proxy - Configuration for the Kong Reverse proxy.
|
||||
############
|
||||
|
||||
KONG_HTTP_PORT=8000
|
||||
KONG_HTTPS_PORT=8443
|
||||
|
||||
|
||||
############
|
||||
# API - Configuration for PostgREST.
|
||||
############
|
||||
|
||||
PGRST_DB_SCHEMAS=public,storage,graphql_public
|
||||
|
||||
|
||||
############
|
||||
# Auth - Configuration for the GoTrue authentication server.
|
||||
############
|
||||
|
||||
## General
|
||||
SITE_URL=http://localhost:3000
|
||||
ADDITIONAL_REDIRECT_URLS=
|
||||
JWT_EXPIRY=3600
|
||||
DISABLE_SIGNUP=false
|
||||
API_EXTERNAL_URL=http://localhost:8000
|
||||
|
||||
## Mailer Config
|
||||
MAILER_URLPATHS_CONFIRMATION="/auth/v1/verify"
|
||||
MAILER_URLPATHS_INVITE="/auth/v1/verify"
|
||||
MAILER_URLPATHS_RECOVERY="/auth/v1/verify"
|
||||
MAILER_URLPATHS_EMAIL_CHANGE="/auth/v1/verify"
|
||||
|
||||
## Email auth
|
||||
ENABLE_EMAIL_SIGNUP=true
|
||||
ENABLE_EMAIL_AUTOCONFIRM=false
|
||||
SMTP_ADMIN_EMAIL=admin@example.com
|
||||
SMTP_HOST=supabase-mail
|
||||
SMTP_PORT=2500
|
||||
SMTP_USER=fake_mail_user
|
||||
SMTP_PASS=fake_mail_password
|
||||
SMTP_SENDER_NAME=fake_sender
|
||||
ENABLE_ANONYMOUS_USERS=false
|
||||
|
||||
## Phone auth
|
||||
ENABLE_PHONE_SIGNUP=true
|
||||
ENABLE_PHONE_AUTOCONFIRM=true
|
||||
|
||||
|
||||
############
|
||||
# Studio - Configuration for the Dashboard
|
||||
############
|
||||
|
||||
STUDIO_DEFAULT_ORGANIZATION=Default Organization
|
||||
STUDIO_DEFAULT_PROJECT=Default Project
|
||||
|
||||
STUDIO_PORT=3000
|
||||
# replace if you intend to use Studio outside of localhost
|
||||
SUPABASE_PUBLIC_URL=http://localhost:8000
|
||||
|
||||
# Enable webp support
|
||||
IMGPROXY_ENABLE_WEBP_DETECTION=true
|
||||
|
||||
# Add your OpenAI API key to enable SQL Editor Assistant
|
||||
OPENAI_API_KEY=
|
||||
|
||||
|
||||
############
|
||||
# Functions - Configuration for Functions
|
||||
############
|
||||
# NOTE: VERIFY_JWT applies to all functions. Per-function VERIFY_JWT is not supported yet.
|
||||
FUNCTIONS_VERIFY_JWT=false
|
||||
|
||||
|
||||
############
|
||||
# Logs - Configuration for Logflare
|
||||
# Please refer to https://supabase.com/docs/reference/self-hosting-analytics/introduction
|
||||
############
|
||||
|
||||
LOGFLARE_LOGGER_BACKEND_API_KEY=your-super-secret-and-long-logflare-key
|
||||
|
||||
# Change vector.toml sinks to reflect this change
|
||||
LOGFLARE_API_KEY=your-super-secret-and-long-logflare-key
|
||||
|
||||
# Docker socket location - this value will differ depending on your OS
|
||||
DOCKER_SOCKET_LOCATION=/var/run/docker.sock
|
||||
|
||||
# Google Cloud Project details
|
||||
GOOGLE_PROJECT_ID=GOOGLE_PROJECT_ID
|
||||
GOOGLE_PROJECT_NUMBER=GOOGLE_PROJECT_NUMBER
|
5
autogpt_platform/db/docker/.gitignore
vendored
Normal file
5
autogpt_platform/db/docker/.gitignore
vendored
Normal file
@ -0,0 +1,5 @@
|
||||
volumes/db/data
|
||||
volumes/storage
|
||||
.env
|
||||
test.http
|
||||
docker-compose.override.yml
|
3
autogpt_platform/db/docker/README.md
Normal file
3
autogpt_platform/db/docker/README.md
Normal file
@ -0,0 +1,3 @@
|
||||
# Supabase Docker
|
||||
|
||||
This is a minimal Docker Compose setup for self-hosting Supabase. Follow the steps [here](https://supabase.com/docs/guides/hosting/docker) to get started.
|
48
autogpt_platform/db/docker/dev/data.sql
Normal file
48
autogpt_platform/db/docker/dev/data.sql
Normal file
@ -0,0 +1,48 @@
|
||||
create table profiles (
|
||||
id uuid references auth.users not null,
|
||||
updated_at timestamp with time zone,
|
||||
username text unique,
|
||||
avatar_url text,
|
||||
website text,
|
||||
|
||||
primary key (id),
|
||||
unique(username),
|
||||
constraint username_length check (char_length(username) >= 3)
|
||||
);
|
||||
|
||||
alter table profiles enable row level security;
|
||||
|
||||
create policy "Public profiles are viewable by the owner."
|
||||
on profiles for select
|
||||
using ( auth.uid() = id );
|
||||
|
||||
create policy "Users can insert their own profile."
|
||||
on profiles for insert
|
||||
with check ( auth.uid() = id );
|
||||
|
||||
create policy "Users can update own profile."
|
||||
on profiles for update
|
||||
using ( auth.uid() = id );
|
||||
|
||||
-- Set up Realtime
|
||||
begin;
|
||||
drop publication if exists supabase_realtime;
|
||||
create publication supabase_realtime;
|
||||
commit;
|
||||
alter publication supabase_realtime add table profiles;
|
||||
|
||||
-- Set up Storage
|
||||
insert into storage.buckets (id, name)
|
||||
values ('avatars', 'avatars');
|
||||
|
||||
create policy "Avatar images are publicly accessible."
|
||||
on storage.objects for select
|
||||
using ( bucket_id = 'avatars' );
|
||||
|
||||
create policy "Anyone can upload an avatar."
|
||||
on storage.objects for insert
|
||||
with check ( bucket_id = 'avatars' );
|
||||
|
||||
create policy "Anyone can update an avatar."
|
||||
on storage.objects for update
|
||||
with check ( bucket_id = 'avatars' );
|
34
autogpt_platform/db/docker/dev/docker-compose.dev.yml
Normal file
34
autogpt_platform/db/docker/dev/docker-compose.dev.yml
Normal file
@ -0,0 +1,34 @@
|
||||
version: "3.8"
|
||||
|
||||
services:
|
||||
studio:
|
||||
build:
|
||||
context: ..
|
||||
dockerfile: studio/Dockerfile
|
||||
target: dev
|
||||
ports:
|
||||
- 8082:8082
|
||||
mail:
|
||||
container_name: supabase-mail
|
||||
image: inbucket/inbucket:3.0.3
|
||||
ports:
|
||||
- '2500:2500' # SMTP
|
||||
- '9000:9000' # web interface
|
||||
- '1100:1100' # POP3
|
||||
auth:
|
||||
environment:
|
||||
- GOTRUE_SMTP_USER=
|
||||
- GOTRUE_SMTP_PASS=
|
||||
meta:
|
||||
ports:
|
||||
- 5555:8080
|
||||
db:
|
||||
restart: 'no'
|
||||
volumes:
|
||||
# Always use a fresh database when developing
|
||||
- /var/lib/postgresql/data
|
||||
# Seed data should be inserted last (alphabetical order)
|
||||
- ./dev/data.sql:/docker-entrypoint-initdb.d/seed.sql
|
||||
storage:
|
||||
volumes:
|
||||
- /var/lib/storage
|
94
autogpt_platform/db/docker/docker-compose.s3.yml
Normal file
94
autogpt_platform/db/docker/docker-compose.s3.yml
Normal file
@ -0,0 +1,94 @@
|
||||
services:
|
||||
|
||||
minio:
|
||||
image: minio/minio
|
||||
ports:
|
||||
- '9000:9000'
|
||||
- '9001:9001'
|
||||
environment:
|
||||
MINIO_ROOT_USER: supa-storage
|
||||
MINIO_ROOT_PASSWORD: secret1234
|
||||
command: server --console-address ":9001" /data
|
||||
healthcheck:
|
||||
test: [ "CMD", "curl", "-f", "http://minio:9000/minio/health/live" ]
|
||||
interval: 2s
|
||||
timeout: 10s
|
||||
retries: 5
|
||||
volumes:
|
||||
- ./volumes/storage:/data:z
|
||||
|
||||
minio-createbucket:
|
||||
image: minio/mc
|
||||
depends_on:
|
||||
minio:
|
||||
condition: service_healthy
|
||||
entrypoint: >
|
||||
/bin/sh -c "
|
||||
/usr/bin/mc alias set supa-minio http://minio:9000 supa-storage secret1234;
|
||||
/usr/bin/mc mb supa-minio/stub;
|
||||
exit 0;
|
||||
"
|
||||
|
||||
storage:
|
||||
container_name: supabase-storage
|
||||
image: supabase/storage-api:v1.11.13
|
||||
depends_on:
|
||||
db:
|
||||
# Disable this if you are using an external Postgres database
|
||||
condition: service_healthy
|
||||
rest:
|
||||
condition: service_started
|
||||
imgproxy:
|
||||
condition: service_started
|
||||
minio:
|
||||
condition: service_healthy
|
||||
healthcheck:
|
||||
test:
|
||||
[
|
||||
"CMD",
|
||||
"wget",
|
||||
"--no-verbose",
|
||||
"--tries=1",
|
||||
"--spider",
|
||||
"http://localhost:5000/status"
|
||||
]
|
||||
timeout: 5s
|
||||
interval: 5s
|
||||
retries: 3
|
||||
restart: unless-stopped
|
||||
environment:
|
||||
ANON_KEY: ${ANON_KEY}
|
||||
SERVICE_KEY: ${SERVICE_ROLE_KEY}
|
||||
POSTGREST_URL: http://rest:3000
|
||||
PGRST_JWT_SECRET: ${JWT_SECRET}
|
||||
DATABASE_URL: postgres://supabase_storage_admin:${POSTGRES_PASSWORD}@${POSTGRES_HOST}:${POSTGRES_PORT}/${POSTGRES_DB}
|
||||
FILE_SIZE_LIMIT: 52428800
|
||||
STORAGE_BACKEND: s3
|
||||
GLOBAL_S3_BUCKET: stub
|
||||
GLOBAL_S3_ENDPOINT: http://minio:9000
|
||||
GLOBAL_S3_PROTOCOL: http
|
||||
GLOBAL_S3_FORCE_PATH_STYLE: true
|
||||
AWS_ACCESS_KEY_ID: supa-storage
|
||||
AWS_SECRET_ACCESS_KEY: secret1234
|
||||
AWS_DEFAULT_REGION: stub
|
||||
FILE_STORAGE_BACKEND_PATH: /var/lib/storage
|
||||
TENANT_ID: stub
|
||||
# TODO: https://github.com/supabase/storage-api/issues/55
|
||||
REGION: stub
|
||||
ENABLE_IMAGE_TRANSFORMATION: "true"
|
||||
IMGPROXY_URL: http://imgproxy:5001
|
||||
volumes:
|
||||
- ./volumes/storage:/var/lib/storage:z
|
||||
|
||||
imgproxy:
|
||||
container_name: supabase-imgproxy
|
||||
image: darthsim/imgproxy:v3.8.0
|
||||
healthcheck:
|
||||
test: [ "CMD", "imgproxy", "health" ]
|
||||
timeout: 5s
|
||||
interval: 5s
|
||||
retries: 3
|
||||
environment:
|
||||
IMGPROXY_BIND: ":5001"
|
||||
IMGPROXY_USE_ETAG: "true"
|
||||
IMGPROXY_ENABLE_WEBP_DETECTION: ${IMGPROXY_ENABLE_WEBP_DETECTION}
|
526
autogpt_platform/db/docker/docker-compose.yml
Normal file
526
autogpt_platform/db/docker/docker-compose.yml
Normal file
@ -0,0 +1,526 @@
|
||||
# Usage
|
||||
# Start: docker compose up
|
||||
# With helpers: docker compose -f docker-compose.yml -f ./dev/docker-compose.dev.yml up
|
||||
# Stop: docker compose down
|
||||
# Destroy: docker compose -f docker-compose.yml -f ./dev/docker-compose.dev.yml down -v --remove-orphans
|
||||
# Reset everything: ./reset.sh
|
||||
|
||||
name: supabase
|
||||
|
||||
services:
|
||||
|
||||
studio:
|
||||
container_name: supabase-studio
|
||||
image: supabase/studio:20250224-d10db0f
|
||||
restart: unless-stopped
|
||||
healthcheck:
|
||||
test:
|
||||
[
|
||||
"CMD",
|
||||
"node",
|
||||
"-e",
|
||||
"fetch('http://studio:3000/api/platform/profile').then((r) => {if (r.status !== 200) throw new Error(r.status)})"
|
||||
]
|
||||
timeout: 10s
|
||||
interval: 5s
|
||||
retries: 3
|
||||
depends_on:
|
||||
analytics:
|
||||
condition: service_healthy
|
||||
environment:
|
||||
STUDIO_PG_META_URL: http://meta:8080
|
||||
POSTGRES_PASSWORD: ${POSTGRES_PASSWORD}
|
||||
|
||||
DEFAULT_ORGANIZATION_NAME: ${STUDIO_DEFAULT_ORGANIZATION}
|
||||
DEFAULT_PROJECT_NAME: ${STUDIO_DEFAULT_PROJECT}
|
||||
OPENAI_API_KEY: ${OPENAI_API_KEY:-}
|
||||
|
||||
SUPABASE_URL: http://kong:8000
|
||||
SUPABASE_PUBLIC_URL: ${SUPABASE_PUBLIC_URL}
|
||||
SUPABASE_ANON_KEY: ${ANON_KEY}
|
||||
SUPABASE_SERVICE_KEY: ${SERVICE_ROLE_KEY}
|
||||
AUTH_JWT_SECRET: ${JWT_SECRET}
|
||||
|
||||
LOGFLARE_API_KEY: ${LOGFLARE_API_KEY}
|
||||
LOGFLARE_URL: http://analytics:4000
|
||||
NEXT_PUBLIC_ENABLE_LOGS: true
|
||||
# Comment to use Big Query backend for analytics
|
||||
NEXT_ANALYTICS_BACKEND_PROVIDER: postgres
|
||||
# Uncomment to use Big Query backend for analytics
|
||||
# NEXT_ANALYTICS_BACKEND_PROVIDER: bigquery
|
||||
|
||||
kong:
|
||||
container_name: supabase-kong
|
||||
image: kong:2.8.1
|
||||
restart: unless-stopped
|
||||
ports:
|
||||
- ${KONG_HTTP_PORT}:8000/tcp
|
||||
- ${KONG_HTTPS_PORT}:8443/tcp
|
||||
volumes:
|
||||
# https://github.com/supabase/supabase/issues/12661
|
||||
- ./volumes/api/kong.yml:/home/kong/temp.yml:ro
|
||||
depends_on:
|
||||
analytics:
|
||||
condition: service_healthy
|
||||
environment:
|
||||
KONG_DATABASE: "off"
|
||||
KONG_DECLARATIVE_CONFIG: /home/kong/kong.yml
|
||||
# https://github.com/supabase/cli/issues/14
|
||||
KONG_DNS_ORDER: LAST,A,CNAME
|
||||
KONG_PLUGINS: request-transformer,cors,key-auth,acl,basic-auth
|
||||
KONG_NGINX_PROXY_PROXY_BUFFER_SIZE: 160k
|
||||
KONG_NGINX_PROXY_PROXY_BUFFERS: 64 160k
|
||||
SUPABASE_ANON_KEY: ${ANON_KEY}
|
||||
SUPABASE_SERVICE_KEY: ${SERVICE_ROLE_KEY}
|
||||
DASHBOARD_USERNAME: ${DASHBOARD_USERNAME}
|
||||
DASHBOARD_PASSWORD: ${DASHBOARD_PASSWORD}
|
||||
# https://unix.stackexchange.com/a/294837
|
||||
entrypoint: bash -c 'eval "echo \"$$(cat ~/temp.yml)\"" > ~/kong.yml && /docker-entrypoint.sh kong docker-start'
|
||||
|
||||
auth:
|
||||
container_name: supabase-auth
|
||||
image: supabase/gotrue:v2.170.0
|
||||
restart: unless-stopped
|
||||
healthcheck:
|
||||
test:
|
||||
[
|
||||
"CMD",
|
||||
"wget",
|
||||
"--no-verbose",
|
||||
"--tries=1",
|
||||
"--spider",
|
||||
"http://localhost:9999/health"
|
||||
]
|
||||
timeout: 5s
|
||||
interval: 5s
|
||||
retries: 3
|
||||
depends_on:
|
||||
db:
|
||||
# Disable this if you are using an external Postgres database
|
||||
condition: service_healthy
|
||||
analytics:
|
||||
condition: service_healthy
|
||||
environment:
|
||||
GOTRUE_API_HOST: 0.0.0.0
|
||||
GOTRUE_API_PORT: 9999
|
||||
API_EXTERNAL_URL: ${API_EXTERNAL_URL}
|
||||
|
||||
GOTRUE_DB_DRIVER: postgres
|
||||
GOTRUE_DB_DATABASE_URL: postgres://supabase_auth_admin:${POSTGRES_PASSWORD}@${POSTGRES_HOST}:${POSTGRES_PORT}/${POSTGRES_DB}
|
||||
|
||||
GOTRUE_SITE_URL: ${SITE_URL}
|
||||
GOTRUE_URI_ALLOW_LIST: ${ADDITIONAL_REDIRECT_URLS}
|
||||
GOTRUE_DISABLE_SIGNUP: ${DISABLE_SIGNUP}
|
||||
|
||||
GOTRUE_JWT_ADMIN_ROLES: service_role
|
||||
GOTRUE_JWT_AUD: authenticated
|
||||
GOTRUE_JWT_DEFAULT_GROUP_NAME: authenticated
|
||||
GOTRUE_JWT_EXP: ${JWT_EXPIRY}
|
||||
GOTRUE_JWT_SECRET: ${JWT_SECRET}
|
||||
|
||||
GOTRUE_EXTERNAL_EMAIL_ENABLED: ${ENABLE_EMAIL_SIGNUP}
|
||||
GOTRUE_EXTERNAL_ANONYMOUS_USERS_ENABLED: ${ENABLE_ANONYMOUS_USERS}
|
||||
GOTRUE_MAILER_AUTOCONFIRM: ${ENABLE_EMAIL_AUTOCONFIRM}
|
||||
|
||||
# Uncomment to bypass nonce check in ID Token flow. Commonly set to true when using Google Sign In on mobile.
|
||||
# GOTRUE_EXTERNAL_SKIP_NONCE_CHECK: true
|
||||
|
||||
# GOTRUE_MAILER_SECURE_EMAIL_CHANGE_ENABLED: true
|
||||
# GOTRUE_SMTP_MAX_FREQUENCY: 1s
|
||||
GOTRUE_SMTP_ADMIN_EMAIL: ${SMTP_ADMIN_EMAIL}
|
||||
GOTRUE_SMTP_HOST: ${SMTP_HOST}
|
||||
GOTRUE_SMTP_PORT: ${SMTP_PORT}
|
||||
GOTRUE_SMTP_USER: ${SMTP_USER}
|
||||
GOTRUE_SMTP_PASS: ${SMTP_PASS}
|
||||
GOTRUE_SMTP_SENDER_NAME: ${SMTP_SENDER_NAME}
|
||||
GOTRUE_MAILER_URLPATHS_INVITE: ${MAILER_URLPATHS_INVITE}
|
||||
GOTRUE_MAILER_URLPATHS_CONFIRMATION: ${MAILER_URLPATHS_CONFIRMATION}
|
||||
GOTRUE_MAILER_URLPATHS_RECOVERY: ${MAILER_URLPATHS_RECOVERY}
|
||||
GOTRUE_MAILER_URLPATHS_EMAIL_CHANGE: ${MAILER_URLPATHS_EMAIL_CHANGE}
|
||||
|
||||
GOTRUE_EXTERNAL_PHONE_ENABLED: ${ENABLE_PHONE_SIGNUP}
|
||||
GOTRUE_SMS_AUTOCONFIRM: ${ENABLE_PHONE_AUTOCONFIRM}
|
||||
# Uncomment to enable custom access token hook. Please see: https://supabase.com/docs/guides/auth/auth-hooks for full list of hooks and additional details about custom_access_token_hook
|
||||
|
||||
# GOTRUE_HOOK_CUSTOM_ACCESS_TOKEN_ENABLED: "true"
|
||||
# GOTRUE_HOOK_CUSTOM_ACCESS_TOKEN_URI: "pg-functions://postgres/public/custom_access_token_hook"
|
||||
# GOTRUE_HOOK_CUSTOM_ACCESS_TOKEN_SECRETS: "<standard-base64-secret>"
|
||||
|
||||
# GOTRUE_HOOK_MFA_VERIFICATION_ATTEMPT_ENABLED: "true"
|
||||
# GOTRUE_HOOK_MFA_VERIFICATION_ATTEMPT_URI: "pg-functions://postgres/public/mfa_verification_attempt"
|
||||
|
||||
# GOTRUE_HOOK_PASSWORD_VERIFICATION_ATTEMPT_ENABLED: "true"
|
||||
# GOTRUE_HOOK_PASSWORD_VERIFICATION_ATTEMPT_URI: "pg-functions://postgres/public/password_verification_attempt"
|
||||
|
||||
# GOTRUE_HOOK_SEND_SMS_ENABLED: "false"
|
||||
# GOTRUE_HOOK_SEND_SMS_URI: "pg-functions://postgres/public/custom_access_token_hook"
|
||||
# GOTRUE_HOOK_SEND_SMS_SECRETS: "v1,whsec_VGhpcyBpcyBhbiBleGFtcGxlIG9mIGEgc2hvcnRlciBCYXNlNjQgc3RyaW5n"
|
||||
|
||||
# GOTRUE_HOOK_SEND_EMAIL_ENABLED: "false"
|
||||
# GOTRUE_HOOK_SEND_EMAIL_URI: "http://host.docker.internal:54321/functions/v1/email_sender"
|
||||
# GOTRUE_HOOK_SEND_EMAIL_SECRETS: "v1,whsec_VGhpcyBpcyBhbiBleGFtcGxlIG9mIGEgc2hvcnRlciBCYXNlNjQgc3RyaW5n"
|
||||
|
||||
rest:
|
||||
container_name: supabase-rest
|
||||
image: postgrest/postgrest:v12.2.8
|
||||
restart: unless-stopped
|
||||
depends_on:
|
||||
db:
|
||||
# Disable this if you are using an external Postgres database
|
||||
condition: service_healthy
|
||||
analytics:
|
||||
condition: service_healthy
|
||||
environment:
|
||||
PGRST_DB_URI: postgres://authenticator:${POSTGRES_PASSWORD}@${POSTGRES_HOST}:${POSTGRES_PORT}/${POSTGRES_DB}
|
||||
PGRST_DB_SCHEMAS: ${PGRST_DB_SCHEMAS}
|
||||
PGRST_DB_ANON_ROLE: anon
|
||||
PGRST_JWT_SECRET: ${JWT_SECRET}
|
||||
PGRST_DB_USE_LEGACY_GUCS: "false"
|
||||
PGRST_APP_SETTINGS_JWT_SECRET: ${JWT_SECRET}
|
||||
PGRST_APP_SETTINGS_JWT_EXP: ${JWT_EXPIRY}
|
||||
command:
|
||||
[
|
||||
"postgrest"
|
||||
]
|
||||
|
||||
realtime:
|
||||
# This container name looks inconsistent but is correct because realtime constructs tenant id by parsing the subdomain
|
||||
container_name: realtime-dev.supabase-realtime
|
||||
image: supabase/realtime:v2.34.40
|
||||
restart: unless-stopped
|
||||
depends_on:
|
||||
db:
|
||||
# Disable this if you are using an external Postgres database
|
||||
condition: service_healthy
|
||||
analytics:
|
||||
condition: service_healthy
|
||||
healthcheck:
|
||||
test:
|
||||
[
|
||||
"CMD",
|
||||
"curl",
|
||||
"-sSfL",
|
||||
"--head",
|
||||
"-o",
|
||||
"/dev/null",
|
||||
"-H",
|
||||
"Authorization: Bearer ${ANON_KEY}",
|
||||
"http://localhost:4000/api/tenants/realtime-dev/health"
|
||||
]
|
||||
timeout: 5s
|
||||
interval: 5s
|
||||
retries: 3
|
||||
environment:
|
||||
PORT: 4000
|
||||
DB_HOST: ${POSTGRES_HOST}
|
||||
DB_PORT: ${POSTGRES_PORT}
|
||||
DB_USER: supabase_admin
|
||||
DB_PASSWORD: ${POSTGRES_PASSWORD}
|
||||
DB_NAME: ${POSTGRES_DB}
|
||||
DB_AFTER_CONNECT_QUERY: 'SET search_path TO _realtime'
|
||||
DB_ENC_KEY: supabaserealtime
|
||||
API_JWT_SECRET: ${JWT_SECRET}
|
||||
SECRET_KEY_BASE: ${SECRET_KEY_BASE}
|
||||
ERL_AFLAGS: -proto_dist inet_tcp
|
||||
DNS_NODES: "''"
|
||||
RLIMIT_NOFILE: "10000"
|
||||
APP_NAME: realtime
|
||||
SEED_SELF_HOST: true
|
||||
RUN_JANITOR: true
|
||||
|
||||
# To use S3 backed storage: docker compose -f docker-compose.yml -f docker-compose.s3.yml up
|
||||
storage:
|
||||
container_name: supabase-storage
|
||||
image: supabase/storage-api:v1.19.3
|
||||
restart: unless-stopped
|
||||
volumes:
|
||||
- ./volumes/storage:/var/lib/storage:z
|
||||
healthcheck:
|
||||
test:
|
||||
[
|
||||
"CMD",
|
||||
"wget",
|
||||
"--no-verbose",
|
||||
"--tries=1",
|
||||
"--spider",
|
||||
"http://storage:5000/status"
|
||||
]
|
||||
timeout: 5s
|
||||
interval: 5s
|
||||
retries: 3
|
||||
depends_on:
|
||||
db:
|
||||
# Disable this if you are using an external Postgres database
|
||||
condition: service_healthy
|
||||
rest:
|
||||
condition: service_started
|
||||
imgproxy:
|
||||
condition: service_started
|
||||
environment:
|
||||
ANON_KEY: ${ANON_KEY}
|
||||
SERVICE_KEY: ${SERVICE_ROLE_KEY}
|
||||
POSTGREST_URL: http://rest:3000
|
||||
PGRST_JWT_SECRET: ${JWT_SECRET}
|
||||
DATABASE_URL: postgres://supabase_storage_admin:${POSTGRES_PASSWORD}@${POSTGRES_HOST}:${POSTGRES_PORT}/${POSTGRES_DB}
|
||||
FILE_SIZE_LIMIT: 52428800
|
||||
STORAGE_BACKEND: file
|
||||
FILE_STORAGE_BACKEND_PATH: /var/lib/storage
|
||||
TENANT_ID: stub
|
||||
# TODO: https://github.com/supabase/storage-api/issues/55
|
||||
REGION: stub
|
||||
GLOBAL_S3_BUCKET: stub
|
||||
ENABLE_IMAGE_TRANSFORMATION: "true"
|
||||
IMGPROXY_URL: http://imgproxy:5001
|
||||
|
||||
imgproxy:
|
||||
container_name: supabase-imgproxy
|
||||
image: darthsim/imgproxy:v3.8.0
|
||||
restart: unless-stopped
|
||||
volumes:
|
||||
- ./volumes/storage:/var/lib/storage:z
|
||||
healthcheck:
|
||||
test:
|
||||
[
|
||||
"CMD",
|
||||
"imgproxy",
|
||||
"health"
|
||||
]
|
||||
timeout: 5s
|
||||
interval: 5s
|
||||
retries: 3
|
||||
environment:
|
||||
IMGPROXY_BIND: ":5001"
|
||||
IMGPROXY_LOCAL_FILESYSTEM_ROOT: /
|
||||
IMGPROXY_USE_ETAG: "true"
|
||||
IMGPROXY_ENABLE_WEBP_DETECTION: ${IMGPROXY_ENABLE_WEBP_DETECTION}
|
||||
|
||||
meta:
|
||||
container_name: supabase-meta
|
||||
image: supabase/postgres-meta:v0.86.1
|
||||
restart: unless-stopped
|
||||
depends_on:
|
||||
db:
|
||||
# Disable this if you are using an external Postgres database
|
||||
condition: service_healthy
|
||||
analytics:
|
||||
condition: service_healthy
|
||||
environment:
|
||||
PG_META_PORT: 8080
|
||||
PG_META_DB_HOST: ${POSTGRES_HOST}
|
||||
PG_META_DB_PORT: ${POSTGRES_PORT}
|
||||
PG_META_DB_NAME: ${POSTGRES_DB}
|
||||
PG_META_DB_USER: supabase_admin
|
||||
PG_META_DB_PASSWORD: ${POSTGRES_PASSWORD}
|
||||
|
||||
functions:
|
||||
container_name: supabase-edge-functions
|
||||
image: supabase/edge-runtime:v1.67.2
|
||||
restart: unless-stopped
|
||||
volumes:
|
||||
- ./volumes/functions:/home/deno/functions:Z
|
||||
depends_on:
|
||||
analytics:
|
||||
condition: service_healthy
|
||||
environment:
|
||||
JWT_SECRET: ${JWT_SECRET}
|
||||
SUPABASE_URL: http://kong:8000
|
||||
SUPABASE_ANON_KEY: ${ANON_KEY}
|
||||
SUPABASE_SERVICE_ROLE_KEY: ${SERVICE_ROLE_KEY}
|
||||
SUPABASE_DB_URL: postgresql://postgres:${POSTGRES_PASSWORD}@${POSTGRES_HOST}:${POSTGRES_PORT}/${POSTGRES_DB}
|
||||
# TODO: Allow configuring VERIFY_JWT per function. This PR might help: https://github.com/supabase/cli/pull/786
|
||||
VERIFY_JWT: "${FUNCTIONS_VERIFY_JWT}"
|
||||
command:
|
||||
[
|
||||
"start",
|
||||
"--main-service",
|
||||
"/home/deno/functions/main"
|
||||
]
|
||||
|
||||
analytics:
|
||||
container_name: supabase-analytics
|
||||
image: supabase/logflare:1.12.5
|
||||
restart: unless-stopped
|
||||
ports:
|
||||
- 4000:4000
|
||||
# Uncomment to use Big Query backend for analytics
|
||||
# volumes:
|
||||
# - type: bind
|
||||
# source: ${PWD}/gcloud.json
|
||||
# target: /opt/app/rel/logflare/bin/gcloud.json
|
||||
# read_only: true
|
||||
healthcheck:
|
||||
test:
|
||||
[
|
||||
"CMD",
|
||||
"curl",
|
||||
"http://localhost:4000/health"
|
||||
]
|
||||
timeout: 5s
|
||||
interval: 5s
|
||||
retries: 10
|
||||
depends_on:
|
||||
db:
|
||||
# Disable this if you are using an external Postgres database
|
||||
condition: service_healthy
|
||||
environment:
|
||||
LOGFLARE_NODE_HOST: 127.0.0.1
|
||||
DB_USERNAME: supabase_admin
|
||||
DB_DATABASE: _supabase
|
||||
DB_HOSTNAME: ${POSTGRES_HOST}
|
||||
DB_PORT: ${POSTGRES_PORT}
|
||||
DB_PASSWORD: ${POSTGRES_PASSWORD}
|
||||
DB_SCHEMA: _analytics
|
||||
LOGFLARE_API_KEY: ${LOGFLARE_API_KEY}
|
||||
LOGFLARE_SINGLE_TENANT: true
|
||||
LOGFLARE_SUPABASE_MODE: true
|
||||
LOGFLARE_MIN_CLUSTER_SIZE: 1
|
||||
|
||||
# Comment variables to use Big Query backend for analytics
|
||||
POSTGRES_BACKEND_URL: postgresql://supabase_admin:${POSTGRES_PASSWORD}@${POSTGRES_HOST}:${POSTGRES_PORT}/_supabase
|
||||
POSTGRES_BACKEND_SCHEMA: _analytics
|
||||
LOGFLARE_FEATURE_FLAG_OVERRIDE: multibackend=true
|
||||
# Uncomment to use Big Query backend for analytics
|
||||
# GOOGLE_PROJECT_ID: ${GOOGLE_PROJECT_ID}
|
||||
# GOOGLE_PROJECT_NUMBER: ${GOOGLE_PROJECT_NUMBER}
|
||||
|
||||
# Comment out everything below this point if you are using an external Postgres database
|
||||
db:
|
||||
container_name: supabase-db
|
||||
image: supabase/postgres:15.8.1.049
|
||||
restart: unless-stopped
|
||||
volumes:
|
||||
- ./volumes/db/realtime.sql:/docker-entrypoint-initdb.d/migrations/99-realtime.sql:Z
|
||||
# Must be superuser to create event trigger
|
||||
- ./volumes/db/webhooks.sql:/docker-entrypoint-initdb.d/init-scripts/98-webhooks.sql:Z
|
||||
# Must be superuser to alter reserved role
|
||||
- ./volumes/db/roles.sql:/docker-entrypoint-initdb.d/init-scripts/99-roles.sql:Z
|
||||
# Initialize the database settings with JWT_SECRET and JWT_EXP
|
||||
- ./volumes/db/jwt.sql:/docker-entrypoint-initdb.d/init-scripts/99-jwt.sql:Z
|
||||
# PGDATA directory is persisted between restarts
|
||||
- ./volumes/db/data:/var/lib/postgresql/data:Z
|
||||
# Changes required for internal supabase data such as _analytics
|
||||
- ./volumes/db/_supabase.sql:/docker-entrypoint-initdb.d/migrations/97-_supabase.sql:Z
|
||||
# Changes required for Analytics support
|
||||
- ./volumes/db/logs.sql:/docker-entrypoint-initdb.d/migrations/99-logs.sql:Z
|
||||
# Changes required for Pooler support
|
||||
- ./volumes/db/pooler.sql:/docker-entrypoint-initdb.d/migrations/99-pooler.sql:Z
|
||||
# Use named volume to persist pgsodium decryption key between restarts
|
||||
- db-config:/etc/postgresql-custom
|
||||
healthcheck:
|
||||
test:
|
||||
[
|
||||
"CMD",
|
||||
"pg_isready",
|
||||
"-U",
|
||||
"postgres",
|
||||
"-h",
|
||||
"localhost"
|
||||
]
|
||||
interval: 5s
|
||||
timeout: 5s
|
||||
retries: 10
|
||||
depends_on:
|
||||
vector:
|
||||
condition: service_healthy
|
||||
environment:
|
||||
POSTGRES_HOST: /var/run/postgresql
|
||||
PGPORT: ${POSTGRES_PORT}
|
||||
POSTGRES_PORT: ${POSTGRES_PORT}
|
||||
PGPASSWORD: ${POSTGRES_PASSWORD}
|
||||
POSTGRES_PASSWORD: ${POSTGRES_PASSWORD}
|
||||
PGDATABASE: ${POSTGRES_DB}
|
||||
POSTGRES_DB: ${POSTGRES_DB}
|
||||
JWT_SECRET: ${JWT_SECRET}
|
||||
JWT_EXP: ${JWT_EXPIRY}
|
||||
command:
|
||||
[
|
||||
"postgres",
|
||||
"-c",
|
||||
"config_file=/etc/postgresql/postgresql.conf",
|
||||
"-c",
|
||||
"log_min_messages=fatal" # prevents Realtime polling queries from appearing in logs
|
||||
]
|
||||
|
||||
vector:
|
||||
container_name: supabase-vector
|
||||
image: timberio/vector:0.28.1-alpine
|
||||
restart: unless-stopped
|
||||
volumes:
|
||||
- ./volumes/logs/vector.yml:/etc/vector/vector.yml:ro
|
||||
- ${DOCKER_SOCKET_LOCATION}:/var/run/docker.sock:ro
|
||||
healthcheck:
|
||||
test:
|
||||
[
|
||||
"CMD",
|
||||
"wget",
|
||||
"--no-verbose",
|
||||
"--tries=1",
|
||||
"--spider",
|
||||
"http://vector:9001/health"
|
||||
]
|
||||
timeout: 5s
|
||||
interval: 5s
|
||||
retries: 3
|
||||
environment:
|
||||
LOGFLARE_API_KEY: ${LOGFLARE_API_KEY}
|
||||
command:
|
||||
[
|
||||
"--config",
|
||||
"/etc/vector/vector.yml"
|
||||
]
|
||||
|
||||
# Update the DATABASE_URL if you are using an external Postgres database
|
||||
supavisor:
|
||||
container_name: supabase-pooler
|
||||
image: supabase/supavisor:2.4.12
|
||||
restart: unless-stopped
|
||||
ports:
|
||||
- ${POSTGRES_PORT}:5432
|
||||
- ${POOLER_PROXY_PORT_TRANSACTION}:6543
|
||||
volumes:
|
||||
- ./volumes/pooler/pooler.exs:/etc/pooler/pooler.exs:ro
|
||||
healthcheck:
|
||||
test:
|
||||
[
|
||||
"CMD",
|
||||
"curl",
|
||||
"-sSfL",
|
||||
"--head",
|
||||
"-o",
|
||||
"/dev/null",
|
||||
"http://127.0.0.1:4000/api/health"
|
||||
]
|
||||
interval: 10s
|
||||
timeout: 5s
|
||||
retries: 5
|
||||
depends_on:
|
||||
db:
|
||||
condition: service_healthy
|
||||
analytics:
|
||||
condition: service_healthy
|
||||
environment:
|
||||
PORT: 4000
|
||||
POSTGRES_PORT: ${POSTGRES_PORT}
|
||||
POSTGRES_DB: ${POSTGRES_DB}
|
||||
POSTGRES_PASSWORD: ${POSTGRES_PASSWORD}
|
||||
DATABASE_URL: ecto://supabase_admin:${POSTGRES_PASSWORD}@db:${POSTGRES_PORT}/_supabase
|
||||
CLUSTER_POSTGRES: true
|
||||
SECRET_KEY_BASE: ${SECRET_KEY_BASE}
|
||||
VAULT_ENC_KEY: ${VAULT_ENC_KEY}
|
||||
API_JWT_SECRET: ${JWT_SECRET}
|
||||
METRICS_JWT_SECRET: ${JWT_SECRET}
|
||||
REGION: local
|
||||
ERL_AFLAGS: -proto_dist inet_tcp
|
||||
POOLER_TENANT_ID: ${POOLER_TENANT_ID}
|
||||
POOLER_DEFAULT_POOL_SIZE: ${POOLER_DEFAULT_POOL_SIZE}
|
||||
POOLER_MAX_CLIENT_CONN: ${POOLER_MAX_CLIENT_CONN}
|
||||
POOLER_POOL_MODE: transaction
|
||||
command:
|
||||
[
|
||||
"/bin/sh",
|
||||
"-c",
|
||||
"/app/bin/migrate && /app/bin/supavisor eval \"$$(cat /etc/pooler/pooler.exs)\" && /app/bin/server"
|
||||
]
|
||||
|
||||
volumes:
|
||||
db-config:
|
44
autogpt_platform/db/docker/reset.sh
Executable file
44
autogpt_platform/db/docker/reset.sh
Executable file
@ -0,0 +1,44 @@
|
||||
#!/bin/bash
|
||||
|
||||
echo "WARNING: This will remove all containers and container data, and will reset the .env file. This action cannot be undone!"
|
||||
read -p "Are you sure you want to proceed? (y/N) " -n 1 -r
|
||||
echo # Move to a new line
|
||||
if [[ ! $REPLY =~ ^[Yy]$ ]]
|
||||
then
|
||||
echo "Operation cancelled."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo "Stopping and removing all containers..."
|
||||
docker compose -f docker-compose.yml -f ./dev/docker-compose.dev.yml down -v --remove-orphans
|
||||
|
||||
echo "Cleaning up bind-mounted directories..."
|
||||
BIND_MOUNTS=(
|
||||
"./volumes/db/data"
|
||||
)
|
||||
|
||||
for DIR in "${BIND_MOUNTS[@]}"; do
|
||||
if [ -d "$DIR" ]; then
|
||||
echo "Deleting $DIR..."
|
||||
rm -rf "$DIR"
|
||||
else
|
||||
echo "Directory $DIR does not exist. Skipping bind mount deletion step..."
|
||||
fi
|
||||
done
|
||||
|
||||
echo "Resetting .env file..."
|
||||
if [ -f ".env" ]; then
|
||||
echo "Removing existing .env file..."
|
||||
rm -f .env
|
||||
else
|
||||
echo "No .env file found. Skipping .env removal step..."
|
||||
fi
|
||||
|
||||
if [ -f ".env.example" ]; then
|
||||
echo "Copying .env.example to .env..."
|
||||
cp .env.example .env
|
||||
else
|
||||
echo ".env.example file not found. Skipping .env reset step..."
|
||||
fi
|
||||
|
||||
echo "Cleanup complete!"
|
241
autogpt_platform/db/docker/volumes/api/kong.yml
Normal file
241
autogpt_platform/db/docker/volumes/api/kong.yml
Normal file
@ -0,0 +1,241 @@
|
||||
_format_version: '2.1'
|
||||
_transform: true
|
||||
|
||||
###
|
||||
### Consumers / Users
|
||||
###
|
||||
consumers:
|
||||
- username: DASHBOARD
|
||||
- username: anon
|
||||
keyauth_credentials:
|
||||
- key: $SUPABASE_ANON_KEY
|
||||
- username: service_role
|
||||
keyauth_credentials:
|
||||
- key: $SUPABASE_SERVICE_KEY
|
||||
|
||||
###
|
||||
### Access Control List
|
||||
###
|
||||
acls:
|
||||
- consumer: anon
|
||||
group: anon
|
||||
- consumer: service_role
|
||||
group: admin
|
||||
|
||||
###
|
||||
### Dashboard credentials
|
||||
###
|
||||
basicauth_credentials:
|
||||
- consumer: DASHBOARD
|
||||
username: $DASHBOARD_USERNAME
|
||||
password: $DASHBOARD_PASSWORD
|
||||
|
||||
###
|
||||
### API Routes
|
||||
###
|
||||
services:
|
||||
## Open Auth routes
|
||||
- name: auth-v1-open
|
||||
url: http://auth:9999/verify
|
||||
routes:
|
||||
- name: auth-v1-open
|
||||
strip_path: true
|
||||
paths:
|
||||
- /auth/v1/verify
|
||||
plugins:
|
||||
- name: cors
|
||||
- name: auth-v1-open-callback
|
||||
url: http://auth:9999/callback
|
||||
routes:
|
||||
- name: auth-v1-open-callback
|
||||
strip_path: true
|
||||
paths:
|
||||
- /auth/v1/callback
|
||||
plugins:
|
||||
- name: cors
|
||||
- name: auth-v1-open-authorize
|
||||
url: http://auth:9999/authorize
|
||||
routes:
|
||||
- name: auth-v1-open-authorize
|
||||
strip_path: true
|
||||
paths:
|
||||
- /auth/v1/authorize
|
||||
plugins:
|
||||
- name: cors
|
||||
|
||||
## Secure Auth routes
|
||||
- name: auth-v1
|
||||
_comment: 'GoTrue: /auth/v1/* -> http://auth:9999/*'
|
||||
url: http://auth:9999/
|
||||
routes:
|
||||
- name: auth-v1-all
|
||||
strip_path: true
|
||||
paths:
|
||||
- /auth/v1/
|
||||
plugins:
|
||||
- name: cors
|
||||
- name: key-auth
|
||||
config:
|
||||
hide_credentials: false
|
||||
- name: acl
|
||||
config:
|
||||
hide_groups_header: true
|
||||
allow:
|
||||
- admin
|
||||
- anon
|
||||
|
||||
## Secure REST routes
|
||||
- name: rest-v1
|
||||
_comment: 'PostgREST: /rest/v1/* -> http://rest:3000/*'
|
||||
url: http://rest:3000/
|
||||
routes:
|
||||
- name: rest-v1-all
|
||||
strip_path: true
|
||||
paths:
|
||||
- /rest/v1/
|
||||
plugins:
|
||||
- name: cors
|
||||
- name: key-auth
|
||||
config:
|
||||
hide_credentials: true
|
||||
- name: acl
|
||||
config:
|
||||
hide_groups_header: true
|
||||
allow:
|
||||
- admin
|
||||
- anon
|
||||
|
||||
## Secure GraphQL routes
|
||||
- name: graphql-v1
|
||||
_comment: 'PostgREST: /graphql/v1/* -> http://rest:3000/rpc/graphql'
|
||||
url: http://rest:3000/rpc/graphql
|
||||
routes:
|
||||
- name: graphql-v1-all
|
||||
strip_path: true
|
||||
paths:
|
||||
- /graphql/v1
|
||||
plugins:
|
||||
- name: cors
|
||||
- name: key-auth
|
||||
config:
|
||||
hide_credentials: true
|
||||
- name: request-transformer
|
||||
config:
|
||||
add:
|
||||
headers:
|
||||
- Content-Profile:graphql_public
|
||||
- name: acl
|
||||
config:
|
||||
hide_groups_header: true
|
||||
allow:
|
||||
- admin
|
||||
- anon
|
||||
|
||||
## Secure Realtime routes
|
||||
- name: realtime-v1-ws
|
||||
_comment: 'Realtime: /realtime/v1/* -> ws://realtime:4000/socket/*'
|
||||
url: http://realtime-dev.supabase-realtime:4000/socket
|
||||
protocol: ws
|
||||
routes:
|
||||
- name: realtime-v1-ws
|
||||
strip_path: true
|
||||
paths:
|
||||
- /realtime/v1/
|
||||
plugins:
|
||||
- name: cors
|
||||
- name: key-auth
|
||||
config:
|
||||
hide_credentials: false
|
||||
- name: acl
|
||||
config:
|
||||
hide_groups_header: true
|
||||
allow:
|
||||
- admin
|
||||
- anon
|
||||
- name: realtime-v1-rest
|
||||
_comment: 'Realtime: /realtime/v1/* -> ws://realtime:4000/socket/*'
|
||||
url: http://realtime-dev.supabase-realtime:4000/api
|
||||
protocol: http
|
||||
routes:
|
||||
- name: realtime-v1-rest
|
||||
strip_path: true
|
||||
paths:
|
||||
- /realtime/v1/api
|
||||
plugins:
|
||||
- name: cors
|
||||
- name: key-auth
|
||||
config:
|
||||
hide_credentials: false
|
||||
- name: acl
|
||||
config:
|
||||
hide_groups_header: true
|
||||
allow:
|
||||
- admin
|
||||
- anon
|
||||
## Storage routes: the storage server manages its own auth
|
||||
- name: storage-v1
|
||||
_comment: 'Storage: /storage/v1/* -> http://storage:5000/*'
|
||||
url: http://storage:5000/
|
||||
routes:
|
||||
- name: storage-v1-all
|
||||
strip_path: true
|
||||
paths:
|
||||
- /storage/v1/
|
||||
plugins:
|
||||
- name: cors
|
||||
|
||||
## Edge Functions routes
|
||||
- name: functions-v1
|
||||
_comment: 'Edge Functions: /functions/v1/* -> http://functions:9000/*'
|
||||
url: http://functions:9000/
|
||||
routes:
|
||||
- name: functions-v1-all
|
||||
strip_path: true
|
||||
paths:
|
||||
- /functions/v1/
|
||||
plugins:
|
||||
- name: cors
|
||||
|
||||
## Analytics routes
|
||||
- name: analytics-v1
|
||||
_comment: 'Analytics: /analytics/v1/* -> http://logflare:4000/*'
|
||||
url: http://analytics:4000/
|
||||
routes:
|
||||
- name: analytics-v1-all
|
||||
strip_path: true
|
||||
paths:
|
||||
- /analytics/v1/
|
||||
|
||||
## Secure Database routes
|
||||
- name: meta
|
||||
_comment: 'pg-meta: /pg/* -> http://pg-meta:8080/*'
|
||||
url: http://meta:8080/
|
||||
routes:
|
||||
- name: meta-all
|
||||
strip_path: true
|
||||
paths:
|
||||
- /pg/
|
||||
plugins:
|
||||
- name: key-auth
|
||||
config:
|
||||
hide_credentials: false
|
||||
- name: acl
|
||||
config:
|
||||
hide_groups_header: true
|
||||
allow:
|
||||
- admin
|
||||
|
||||
## Protected Dashboard - catch all remaining routes
|
||||
- name: dashboard
|
||||
_comment: 'Studio: /* -> http://studio:3000/*'
|
||||
url: http://studio:3000/
|
||||
routes:
|
||||
- name: dashboard-all
|
||||
strip_path: true
|
||||
paths:
|
||||
- /
|
||||
plugins:
|
||||
- name: cors
|
||||
- name: basic-auth
|
||||
config:
|
||||
hide_credentials: true
|
3
autogpt_platform/db/docker/volumes/db/_supabase.sql
Normal file
3
autogpt_platform/db/docker/volumes/db/_supabase.sql
Normal file
@ -0,0 +1,3 @@
|
||||
\set pguser `echo "$POSTGRES_USER"`
|
||||
|
||||
CREATE DATABASE _supabase WITH OWNER :pguser;
|
0
autogpt_platform/db/docker/volumes/db/init/data.sql
Executable file
0
autogpt_platform/db/docker/volumes/db/init/data.sql
Executable file
5
autogpt_platform/db/docker/volumes/db/jwt.sql
Normal file
5
autogpt_platform/db/docker/volumes/db/jwt.sql
Normal file
@ -0,0 +1,5 @@
|
||||
\set jwt_secret `echo "$JWT_SECRET"`
|
||||
\set jwt_exp `echo "$JWT_EXP"`
|
||||
|
||||
ALTER DATABASE postgres SET "app.settings.jwt_secret" TO :'jwt_secret';
|
||||
ALTER DATABASE postgres SET "app.settings.jwt_exp" TO :'jwt_exp';
|
6
autogpt_platform/db/docker/volumes/db/logs.sql
Normal file
6
autogpt_platform/db/docker/volumes/db/logs.sql
Normal file
@ -0,0 +1,6 @@
|
||||
\set pguser `echo "$POSTGRES_USER"`
|
||||
|
||||
\c _supabase
|
||||
create schema if not exists _analytics;
|
||||
alter schema _analytics owner to :pguser;
|
||||
\c postgres
|
6
autogpt_platform/db/docker/volumes/db/pooler.sql
Normal file
6
autogpt_platform/db/docker/volumes/db/pooler.sql
Normal file
@ -0,0 +1,6 @@
|
||||
\set pguser `echo "$POSTGRES_USER"`
|
||||
|
||||
\c _supabase
|
||||
create schema if not exists _supavisor;
|
||||
alter schema _supavisor owner to :pguser;
|
||||
\c postgres
|
4
autogpt_platform/db/docker/volumes/db/realtime.sql
Normal file
4
autogpt_platform/db/docker/volumes/db/realtime.sql
Normal file
@ -0,0 +1,4 @@
|
||||
\set pguser `echo "$POSTGRES_USER"`
|
||||
|
||||
create schema if not exists _realtime;
|
||||
alter schema _realtime owner to :pguser;
|
8
autogpt_platform/db/docker/volumes/db/roles.sql
Normal file
8
autogpt_platform/db/docker/volumes/db/roles.sql
Normal file
@ -0,0 +1,8 @@
|
||||
-- NOTE: change to your own passwords for production environments
|
||||
\set pgpass `echo "$POSTGRES_PASSWORD"`
|
||||
|
||||
ALTER USER authenticator WITH PASSWORD :'pgpass';
|
||||
ALTER USER pgbouncer WITH PASSWORD :'pgpass';
|
||||
ALTER USER supabase_auth_admin WITH PASSWORD :'pgpass';
|
||||
ALTER USER supabase_functions_admin WITH PASSWORD :'pgpass';
|
||||
ALTER USER supabase_storage_admin WITH PASSWORD :'pgpass';
|
208
autogpt_platform/db/docker/volumes/db/webhooks.sql
Normal file
208
autogpt_platform/db/docker/volumes/db/webhooks.sql
Normal file
@ -0,0 +1,208 @@
|
||||
BEGIN;
|
||||
-- Create pg_net extension
|
||||
CREATE EXTENSION IF NOT EXISTS pg_net SCHEMA extensions;
|
||||
-- Create supabase_functions schema
|
||||
CREATE SCHEMA supabase_functions AUTHORIZATION supabase_admin;
|
||||
GRANT USAGE ON SCHEMA supabase_functions TO postgres, anon, authenticated, service_role;
|
||||
ALTER DEFAULT PRIVILEGES IN SCHEMA supabase_functions GRANT ALL ON TABLES TO postgres, anon, authenticated, service_role;
|
||||
ALTER DEFAULT PRIVILEGES IN SCHEMA supabase_functions GRANT ALL ON FUNCTIONS TO postgres, anon, authenticated, service_role;
|
||||
ALTER DEFAULT PRIVILEGES IN SCHEMA supabase_functions GRANT ALL ON SEQUENCES TO postgres, anon, authenticated, service_role;
|
||||
-- supabase_functions.migrations definition
|
||||
CREATE TABLE supabase_functions.migrations (
|
||||
version text PRIMARY KEY,
|
||||
inserted_at timestamptz NOT NULL DEFAULT NOW()
|
||||
);
|
||||
-- Initial supabase_functions migration
|
||||
INSERT INTO supabase_functions.migrations (version) VALUES ('initial');
|
||||
-- supabase_functions.hooks definition
|
||||
CREATE TABLE supabase_functions.hooks (
|
||||
id bigserial PRIMARY KEY,
|
||||
hook_table_id integer NOT NULL,
|
||||
hook_name text NOT NULL,
|
||||
created_at timestamptz NOT NULL DEFAULT NOW(),
|
||||
request_id bigint
|
||||
);
|
||||
CREATE INDEX supabase_functions_hooks_request_id_idx ON supabase_functions.hooks USING btree (request_id);
|
||||
CREATE INDEX supabase_functions_hooks_h_table_id_h_name_idx ON supabase_functions.hooks USING btree (hook_table_id, hook_name);
|
||||
COMMENT ON TABLE supabase_functions.hooks IS 'Supabase Functions Hooks: Audit trail for triggered hooks.';
|
||||
CREATE FUNCTION supabase_functions.http_request()
|
||||
RETURNS trigger
|
||||
LANGUAGE plpgsql
|
||||
AS $function$
|
||||
DECLARE
|
||||
request_id bigint;
|
||||
payload jsonb;
|
||||
url text := TG_ARGV[0]::text;
|
||||
method text := TG_ARGV[1]::text;
|
||||
headers jsonb DEFAULT '{}'::jsonb;
|
||||
params jsonb DEFAULT '{}'::jsonb;
|
||||
timeout_ms integer DEFAULT 1000;
|
||||
BEGIN
|
||||
IF url IS NULL OR url = 'null' THEN
|
||||
RAISE EXCEPTION 'url argument is missing';
|
||||
END IF;
|
||||
|
||||
IF method IS NULL OR method = 'null' THEN
|
||||
RAISE EXCEPTION 'method argument is missing';
|
||||
END IF;
|
||||
|
||||
IF TG_ARGV[2] IS NULL OR TG_ARGV[2] = 'null' THEN
|
||||
headers = '{"Content-Type": "application/json"}'::jsonb;
|
||||
ELSE
|
||||
headers = TG_ARGV[2]::jsonb;
|
||||
END IF;
|
||||
|
||||
IF TG_ARGV[3] IS NULL OR TG_ARGV[3] = 'null' THEN
|
||||
params = '{}'::jsonb;
|
||||
ELSE
|
||||
params = TG_ARGV[3]::jsonb;
|
||||
END IF;
|
||||
|
||||
IF TG_ARGV[4] IS NULL OR TG_ARGV[4] = 'null' THEN
|
||||
timeout_ms = 1000;
|
||||
ELSE
|
||||
timeout_ms = TG_ARGV[4]::integer;
|
||||
END IF;
|
||||
|
||||
CASE
|
||||
WHEN method = 'GET' THEN
|
||||
SELECT http_get INTO request_id FROM net.http_get(
|
||||
url,
|
||||
params,
|
||||
headers,
|
||||
timeout_ms
|
||||
);
|
||||
WHEN method = 'POST' THEN
|
||||
payload = jsonb_build_object(
|
||||
'old_record', OLD,
|
||||
'record', NEW,
|
||||
'type', TG_OP,
|
||||
'table', TG_TABLE_NAME,
|
||||
'schema', TG_TABLE_SCHEMA
|
||||
);
|
||||
|
||||
SELECT http_post INTO request_id FROM net.http_post(
|
||||
url,
|
||||
payload,
|
||||
params,
|
||||
headers,
|
||||
timeout_ms
|
||||
);
|
||||
ELSE
|
||||
RAISE EXCEPTION 'method argument % is invalid', method;
|
||||
END CASE;
|
||||
|
||||
INSERT INTO supabase_functions.hooks
|
||||
(hook_table_id, hook_name, request_id)
|
||||
VALUES
|
||||
(TG_RELID, TG_NAME, request_id);
|
||||
|
||||
RETURN NEW;
|
||||
END
|
||||
$function$;
|
||||
-- Supabase super admin
|
||||
DO
|
||||
$$
|
||||
BEGIN
|
||||
IF NOT EXISTS (
|
||||
SELECT 1
|
||||
FROM pg_roles
|
||||
WHERE rolname = 'supabase_functions_admin'
|
||||
)
|
||||
THEN
|
||||
CREATE USER supabase_functions_admin NOINHERIT CREATEROLE LOGIN NOREPLICATION;
|
||||
END IF;
|
||||
END
|
||||
$$;
|
||||
GRANT ALL PRIVILEGES ON SCHEMA supabase_functions TO supabase_functions_admin;
|
||||
GRANT ALL PRIVILEGES ON ALL TABLES IN SCHEMA supabase_functions TO supabase_functions_admin;
|
||||
GRANT ALL PRIVILEGES ON ALL SEQUENCES IN SCHEMA supabase_functions TO supabase_functions_admin;
|
||||
ALTER USER supabase_functions_admin SET search_path = "supabase_functions";
|
||||
ALTER table "supabase_functions".migrations OWNER TO supabase_functions_admin;
|
||||
ALTER table "supabase_functions".hooks OWNER TO supabase_functions_admin;
|
||||
ALTER function "supabase_functions".http_request() OWNER TO supabase_functions_admin;
|
||||
GRANT supabase_functions_admin TO postgres;
|
||||
-- Remove unused supabase_pg_net_admin role
|
||||
DO
|
||||
$$
|
||||
BEGIN
|
||||
IF EXISTS (
|
||||
SELECT 1
|
||||
FROM pg_roles
|
||||
WHERE rolname = 'supabase_pg_net_admin'
|
||||
)
|
||||
THEN
|
||||
REASSIGN OWNED BY supabase_pg_net_admin TO supabase_admin;
|
||||
DROP OWNED BY supabase_pg_net_admin;
|
||||
DROP ROLE supabase_pg_net_admin;
|
||||
END IF;
|
||||
END
|
||||
$$;
|
||||
-- pg_net grants when extension is already enabled
|
||||
DO
|
||||
$$
|
||||
BEGIN
|
||||
IF EXISTS (
|
||||
SELECT 1
|
||||
FROM pg_extension
|
||||
WHERE extname = 'pg_net'
|
||||
)
|
||||
THEN
|
||||
GRANT USAGE ON SCHEMA net TO supabase_functions_admin, postgres, anon, authenticated, service_role;
|
||||
ALTER function net.http_get(url text, params jsonb, headers jsonb, timeout_milliseconds integer) SECURITY DEFINER;
|
||||
ALTER function net.http_post(url text, body jsonb, params jsonb, headers jsonb, timeout_milliseconds integer) SECURITY DEFINER;
|
||||
ALTER function net.http_get(url text, params jsonb, headers jsonb, timeout_milliseconds integer) SET search_path = net;
|
||||
ALTER function net.http_post(url text, body jsonb, params jsonb, headers jsonb, timeout_milliseconds integer) SET search_path = net;
|
||||
REVOKE ALL ON FUNCTION net.http_get(url text, params jsonb, headers jsonb, timeout_milliseconds integer) FROM PUBLIC;
|
||||
REVOKE ALL ON FUNCTION net.http_post(url text, body jsonb, params jsonb, headers jsonb, timeout_milliseconds integer) FROM PUBLIC;
|
||||
GRANT EXECUTE ON FUNCTION net.http_get(url text, params jsonb, headers jsonb, timeout_milliseconds integer) TO supabase_functions_admin, postgres, anon, authenticated, service_role;
|
||||
GRANT EXECUTE ON FUNCTION net.http_post(url text, body jsonb, params jsonb, headers jsonb, timeout_milliseconds integer) TO supabase_functions_admin, postgres, anon, authenticated, service_role;
|
||||
END IF;
|
||||
END
|
||||
$$;
|
||||
-- Event trigger for pg_net
|
||||
CREATE OR REPLACE FUNCTION extensions.grant_pg_net_access()
|
||||
RETURNS event_trigger
|
||||
LANGUAGE plpgsql
|
||||
AS $$
|
||||
BEGIN
|
||||
IF EXISTS (
|
||||
SELECT 1
|
||||
FROM pg_event_trigger_ddl_commands() AS ev
|
||||
JOIN pg_extension AS ext
|
||||
ON ev.objid = ext.oid
|
||||
WHERE ext.extname = 'pg_net'
|
||||
)
|
||||
THEN
|
||||
GRANT USAGE ON SCHEMA net TO supabase_functions_admin, postgres, anon, authenticated, service_role;
|
||||
ALTER function net.http_get(url text, params jsonb, headers jsonb, timeout_milliseconds integer) SECURITY DEFINER;
|
||||
ALTER function net.http_post(url text, body jsonb, params jsonb, headers jsonb, timeout_milliseconds integer) SECURITY DEFINER;
|
||||
ALTER function net.http_get(url text, params jsonb, headers jsonb, timeout_milliseconds integer) SET search_path = net;
|
||||
ALTER function net.http_post(url text, body jsonb, params jsonb, headers jsonb, timeout_milliseconds integer) SET search_path = net;
|
||||
REVOKE ALL ON FUNCTION net.http_get(url text, params jsonb, headers jsonb, timeout_milliseconds integer) FROM PUBLIC;
|
||||
REVOKE ALL ON FUNCTION net.http_post(url text, body jsonb, params jsonb, headers jsonb, timeout_milliseconds integer) FROM PUBLIC;
|
||||
GRANT EXECUTE ON FUNCTION net.http_get(url text, params jsonb, headers jsonb, timeout_milliseconds integer) TO supabase_functions_admin, postgres, anon, authenticated, service_role;
|
||||
GRANT EXECUTE ON FUNCTION net.http_post(url text, body jsonb, params jsonb, headers jsonb, timeout_milliseconds integer) TO supabase_functions_admin, postgres, anon, authenticated, service_role;
|
||||
END IF;
|
||||
END;
|
||||
$$;
|
||||
COMMENT ON FUNCTION extensions.grant_pg_net_access IS 'Grants access to pg_net';
|
||||
DO
|
||||
$$
|
||||
BEGIN
|
||||
IF NOT EXISTS (
|
||||
SELECT 1
|
||||
FROM pg_event_trigger
|
||||
WHERE evtname = 'issue_pg_net_access'
|
||||
) THEN
|
||||
CREATE EVENT TRIGGER issue_pg_net_access ON ddl_command_end WHEN TAG IN ('CREATE EXTENSION')
|
||||
EXECUTE PROCEDURE extensions.grant_pg_net_access();
|
||||
END IF;
|
||||
END
|
||||
$$;
|
||||
INSERT INTO supabase_functions.migrations (version) VALUES ('20210809183423_update_grants');
|
||||
ALTER function supabase_functions.http_request() SECURITY DEFINER;
|
||||
ALTER function supabase_functions.http_request() SET search_path = supabase_functions;
|
||||
REVOKE ALL ON FUNCTION supabase_functions.http_request() FROM PUBLIC;
|
||||
GRANT EXECUTE ON FUNCTION supabase_functions.http_request() TO postgres, anon, authenticated, service_role;
|
||||
COMMIT;
|
16
autogpt_platform/db/docker/volumes/functions/hello/index.ts
Normal file
16
autogpt_platform/db/docker/volumes/functions/hello/index.ts
Normal file
@ -0,0 +1,16 @@
|
||||
// Follow this setup guide to integrate the Deno language server with your editor:
|
||||
// https://deno.land/manual/getting_started/setup_your_environment
|
||||
// This enables autocomplete, go to definition, etc.
|
||||
|
||||
import { serve } from "https://deno.land/std@0.177.1/http/server.ts"
|
||||
|
||||
serve(async () => {
|
||||
return new Response(
|
||||
`"Hello from Edge Functions!"`,
|
||||
{ headers: { "Content-Type": "application/json" } },
|
||||
)
|
||||
})
|
||||
|
||||
// To invoke:
|
||||
// curl 'http://localhost:<KONG_HTTP_PORT>/functions/v1/hello' \
|
||||
// --header 'Authorization: Bearer <anon/service_role API key>'
|
94
autogpt_platform/db/docker/volumes/functions/main/index.ts
Normal file
94
autogpt_platform/db/docker/volumes/functions/main/index.ts
Normal file
@ -0,0 +1,94 @@
|
||||
import { serve } from 'https://deno.land/std@0.131.0/http/server.ts'
|
||||
import * as jose from 'https://deno.land/x/jose@v4.14.4/index.ts'
|
||||
|
||||
console.log('main function started')
|
||||
|
||||
const JWT_SECRET = Deno.env.get('JWT_SECRET')
|
||||
const VERIFY_JWT = Deno.env.get('VERIFY_JWT') === 'true'
|
||||
|
||||
function getAuthToken(req: Request) {
|
||||
const authHeader = req.headers.get('authorization')
|
||||
if (!authHeader) {
|
||||
throw new Error('Missing authorization header')
|
||||
}
|
||||
const [bearer, token] = authHeader.split(' ')
|
||||
if (bearer !== 'Bearer') {
|
||||
throw new Error(`Auth header is not 'Bearer {token}'`)
|
||||
}
|
||||
return token
|
||||
}
|
||||
|
||||
async function verifyJWT(jwt: string): Promise<boolean> {
|
||||
const encoder = new TextEncoder()
|
||||
const secretKey = encoder.encode(JWT_SECRET)
|
||||
try {
|
||||
await jose.jwtVerify(jwt, secretKey)
|
||||
} catch (err) {
|
||||
console.error(err)
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
serve(async (req: Request) => {
|
||||
if (req.method !== 'OPTIONS' && VERIFY_JWT) {
|
||||
try {
|
||||
const token = getAuthToken(req)
|
||||
const isValidJWT = await verifyJWT(token)
|
||||
|
||||
if (!isValidJWT) {
|
||||
return new Response(JSON.stringify({ msg: 'Invalid JWT' }), {
|
||||
status: 401,
|
||||
headers: { 'Content-Type': 'application/json' },
|
||||
})
|
||||
}
|
||||
} catch (e) {
|
||||
console.error(e)
|
||||
return new Response(JSON.stringify({ msg: e.toString() }), {
|
||||
status: 401,
|
||||
headers: { 'Content-Type': 'application/json' },
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
const url = new URL(req.url)
|
||||
const { pathname } = url
|
||||
const path_parts = pathname.split('/')
|
||||
const service_name = path_parts[1]
|
||||
|
||||
if (!service_name || service_name === '') {
|
||||
const error = { msg: 'missing function name in request' }
|
||||
return new Response(JSON.stringify(error), {
|
||||
status: 400,
|
||||
headers: { 'Content-Type': 'application/json' },
|
||||
})
|
||||
}
|
||||
|
||||
const servicePath = `/home/deno/functions/${service_name}`
|
||||
console.error(`serving the request with ${servicePath}`)
|
||||
|
||||
const memoryLimitMb = 150
|
||||
const workerTimeoutMs = 1 * 60 * 1000
|
||||
const noModuleCache = false
|
||||
const importMapPath = null
|
||||
const envVarsObj = Deno.env.toObject()
|
||||
const envVars = Object.keys(envVarsObj).map((k) => [k, envVarsObj[k]])
|
||||
|
||||
try {
|
||||
const worker = await EdgeRuntime.userWorkers.create({
|
||||
servicePath,
|
||||
memoryLimitMb,
|
||||
workerTimeoutMs,
|
||||
noModuleCache,
|
||||
importMapPath,
|
||||
envVars,
|
||||
})
|
||||
return await worker.fetch(req)
|
||||
} catch (e) {
|
||||
const error = { msg: e.toString() }
|
||||
return new Response(JSON.stringify(error), {
|
||||
status: 500,
|
||||
headers: { 'Content-Type': 'application/json' },
|
||||
})
|
||||
}
|
||||
})
|
232
autogpt_platform/db/docker/volumes/logs/vector.yml
Normal file
232
autogpt_platform/db/docker/volumes/logs/vector.yml
Normal file
@ -0,0 +1,232 @@
|
||||
api:
|
||||
enabled: true
|
||||
address: 0.0.0.0:9001
|
||||
|
||||
sources:
|
||||
docker_host:
|
||||
type: docker_logs
|
||||
exclude_containers:
|
||||
- supabase-vector
|
||||
|
||||
transforms:
|
||||
project_logs:
|
||||
type: remap
|
||||
inputs:
|
||||
- docker_host
|
||||
source: |-
|
||||
.project = "default"
|
||||
.event_message = del(.message)
|
||||
.appname = del(.container_name)
|
||||
del(.container_created_at)
|
||||
del(.container_id)
|
||||
del(.source_type)
|
||||
del(.stream)
|
||||
del(.label)
|
||||
del(.image)
|
||||
del(.host)
|
||||
del(.stream)
|
||||
router:
|
||||
type: route
|
||||
inputs:
|
||||
- project_logs
|
||||
route:
|
||||
kong: '.appname == "supabase-kong"'
|
||||
auth: '.appname == "supabase-auth"'
|
||||
rest: '.appname == "supabase-rest"'
|
||||
realtime: '.appname == "supabase-realtime"'
|
||||
storage: '.appname == "supabase-storage"'
|
||||
functions: '.appname == "supabase-functions"'
|
||||
db: '.appname == "supabase-db"'
|
||||
# Ignores non nginx errors since they are related with kong booting up
|
||||
kong_logs:
|
||||
type: remap
|
||||
inputs:
|
||||
- router.kong
|
||||
source: |-
|
||||
req, err = parse_nginx_log(.event_message, "combined")
|
||||
if err == null {
|
||||
.timestamp = req.timestamp
|
||||
.metadata.request.headers.referer = req.referer
|
||||
.metadata.request.headers.user_agent = req.agent
|
||||
.metadata.request.headers.cf_connecting_ip = req.client
|
||||
.metadata.request.method = req.method
|
||||
.metadata.request.path = req.path
|
||||
.metadata.request.protocol = req.protocol
|
||||
.metadata.response.status_code = req.status
|
||||
}
|
||||
if err != null {
|
||||
abort
|
||||
}
|
||||
# Ignores non nginx errors since they are related with kong booting up
|
||||
kong_err:
|
||||
type: remap
|
||||
inputs:
|
||||
- router.kong
|
||||
source: |-
|
||||
.metadata.request.method = "GET"
|
||||
.metadata.response.status_code = 200
|
||||
parsed, err = parse_nginx_log(.event_message, "error")
|
||||
if err == null {
|
||||
.timestamp = parsed.timestamp
|
||||
.severity = parsed.severity
|
||||
.metadata.request.host = parsed.host
|
||||
.metadata.request.headers.cf_connecting_ip = parsed.client
|
||||
url, err = split(parsed.request, " ")
|
||||
if err == null {
|
||||
.metadata.request.method = url[0]
|
||||
.metadata.request.path = url[1]
|
||||
.metadata.request.protocol = url[2]
|
||||
}
|
||||
}
|
||||
if err != null {
|
||||
abort
|
||||
}
|
||||
# Gotrue logs are structured json strings which frontend parses directly. But we keep metadata for consistency.
|
||||
auth_logs:
|
||||
type: remap
|
||||
inputs:
|
||||
- router.auth
|
||||
source: |-
|
||||
parsed, err = parse_json(.event_message)
|
||||
if err == null {
|
||||
.metadata.timestamp = parsed.time
|
||||
.metadata = merge!(.metadata, parsed)
|
||||
}
|
||||
# PostgREST logs are structured so we separate timestamp from message using regex
|
||||
rest_logs:
|
||||
type: remap
|
||||
inputs:
|
||||
- router.rest
|
||||
source: |-
|
||||
parsed, err = parse_regex(.event_message, r'^(?P<time>.*): (?P<msg>.*)$')
|
||||
if err == null {
|
||||
.event_message = parsed.msg
|
||||
.timestamp = to_timestamp!(parsed.time)
|
||||
.metadata.host = .project
|
||||
}
|
||||
# Realtime logs are structured so we parse the severity level using regex (ignore time because it has no date)
|
||||
realtime_logs:
|
||||
type: remap
|
||||
inputs:
|
||||
- router.realtime
|
||||
source: |-
|
||||
.metadata.project = del(.project)
|
||||
.metadata.external_id = .metadata.project
|
||||
parsed, err = parse_regex(.event_message, r'^(?P<time>\d+:\d+:\d+\.\d+) \[(?P<level>\w+)\] (?P<msg>.*)$')
|
||||
if err == null {
|
||||
.event_message = parsed.msg
|
||||
.metadata.level = parsed.level
|
||||
}
|
||||
# Storage logs may contain json objects so we parse them for completeness
|
||||
storage_logs:
|
||||
type: remap
|
||||
inputs:
|
||||
- router.storage
|
||||
source: |-
|
||||
.metadata.project = del(.project)
|
||||
.metadata.tenantId = .metadata.project
|
||||
parsed, err = parse_json(.event_message)
|
||||
if err == null {
|
||||
.event_message = parsed.msg
|
||||
.metadata.level = parsed.level
|
||||
.metadata.timestamp = parsed.time
|
||||
.metadata.context[0].host = parsed.hostname
|
||||
.metadata.context[0].pid = parsed.pid
|
||||
}
|
||||
# Postgres logs some messages to stderr which we map to warning severity level
|
||||
db_logs:
|
||||
type: remap
|
||||
inputs:
|
||||
- router.db
|
||||
source: |-
|
||||
.metadata.host = "db-default"
|
||||
.metadata.parsed.timestamp = .timestamp
|
||||
|
||||
parsed, err = parse_regex(.event_message, r'.*(?P<level>INFO|NOTICE|WARNING|ERROR|LOG|FATAL|PANIC?):.*', numeric_groups: true)
|
||||
|
||||
if err != null || parsed == null {
|
||||
.metadata.parsed.error_severity = "info"
|
||||
}
|
||||
if parsed != null {
|
||||
.metadata.parsed.error_severity = parsed.level
|
||||
}
|
||||
if .metadata.parsed.error_severity == "info" {
|
||||
.metadata.parsed.error_severity = "log"
|
||||
}
|
||||
.metadata.parsed.error_severity = upcase!(.metadata.parsed.error_severity)
|
||||
|
||||
sinks:
|
||||
logflare_auth:
|
||||
type: 'http'
|
||||
inputs:
|
||||
- auth_logs
|
||||
encoding:
|
||||
codec: 'json'
|
||||
method: 'post'
|
||||
request:
|
||||
retry_max_duration_secs: 10
|
||||
uri: 'http://analytics:4000/api/logs?source_name=gotrue.logs.prod&api_key=${LOGFLARE_API_KEY?LOGFLARE_API_KEY is required}'
|
||||
logflare_realtime:
|
||||
type: 'http'
|
||||
inputs:
|
||||
- realtime_logs
|
||||
encoding:
|
||||
codec: 'json'
|
||||
method: 'post'
|
||||
request:
|
||||
retry_max_duration_secs: 10
|
||||
uri: 'http://analytics:4000/api/logs?source_name=realtime.logs.prod&api_key=${LOGFLARE_API_KEY?LOGFLARE_API_KEY is required}'
|
||||
logflare_rest:
|
||||
type: 'http'
|
||||
inputs:
|
||||
- rest_logs
|
||||
encoding:
|
||||
codec: 'json'
|
||||
method: 'post'
|
||||
request:
|
||||
retry_max_duration_secs: 10
|
||||
uri: 'http://analytics:4000/api/logs?source_name=postgREST.logs.prod&api_key=${LOGFLARE_API_KEY?LOGFLARE_API_KEY is required}'
|
||||
logflare_db:
|
||||
type: 'http'
|
||||
inputs:
|
||||
- db_logs
|
||||
encoding:
|
||||
codec: 'json'
|
||||
method: 'post'
|
||||
request:
|
||||
retry_max_duration_secs: 10
|
||||
# We must route the sink through kong because ingesting logs before logflare is fully initialised will
|
||||
# lead to broken queries from studio. This works by the assumption that containers are started in the
|
||||
# following order: vector > db > logflare > kong
|
||||
uri: 'http://kong:8000/analytics/v1/api/logs?source_name=postgres.logs&api_key=${LOGFLARE_API_KEY?LOGFLARE_API_KEY is required}'
|
||||
logflare_functions:
|
||||
type: 'http'
|
||||
inputs:
|
||||
- router.functions
|
||||
encoding:
|
||||
codec: 'json'
|
||||
method: 'post'
|
||||
request:
|
||||
retry_max_duration_secs: 10
|
||||
uri: 'http://analytics:4000/api/logs?source_name=deno-relay-logs&api_key=${LOGFLARE_API_KEY?LOGFLARE_API_KEY is required}'
|
||||
logflare_storage:
|
||||
type: 'http'
|
||||
inputs:
|
||||
- storage_logs
|
||||
encoding:
|
||||
codec: 'json'
|
||||
method: 'post'
|
||||
request:
|
||||
retry_max_duration_secs: 10
|
||||
uri: 'http://analytics:4000/api/logs?source_name=storage.logs.prod.2&api_key=${LOGFLARE_API_KEY?LOGFLARE_API_KEY is required}'
|
||||
logflare_kong:
|
||||
type: 'http'
|
||||
inputs:
|
||||
- kong_logs
|
||||
- kong_err
|
||||
encoding:
|
||||
codec: 'json'
|
||||
method: 'post'
|
||||
request:
|
||||
retry_max_duration_secs: 10
|
||||
uri: 'http://analytics:4000/api/logs?source_name=cloudflare.logs.prod&api_key=${LOGFLARE_API_KEY?LOGFLARE_API_KEY is required}'
|
30
autogpt_platform/db/docker/volumes/pooler/pooler.exs
Normal file
30
autogpt_platform/db/docker/volumes/pooler/pooler.exs
Normal file
@ -0,0 +1,30 @@
|
||||
{:ok, _} = Application.ensure_all_started(:supavisor)
|
||||
|
||||
{:ok, version} =
|
||||
case Supavisor.Repo.query!("select version()") do
|
||||
%{rows: [[ver]]} -> Supavisor.Helpers.parse_pg_version(ver)
|
||||
_ -> nil
|
||||
end
|
||||
|
||||
params = %{
|
||||
"external_id" => System.get_env("POOLER_TENANT_ID"),
|
||||
"db_host" => "db",
|
||||
"db_port" => System.get_env("POSTGRES_PORT"),
|
||||
"db_database" => System.get_env("POSTGRES_DB"),
|
||||
"require_user" => false,
|
||||
"auth_query" => "SELECT * FROM pgbouncer.get_auth($1)",
|
||||
"default_max_clients" => System.get_env("POOLER_MAX_CLIENT_CONN"),
|
||||
"default_pool_size" => System.get_env("POOLER_DEFAULT_POOL_SIZE"),
|
||||
"default_parameter_status" => %{"server_version" => version},
|
||||
"users" => [%{
|
||||
"db_user" => "pgbouncer",
|
||||
"db_password" => System.get_env("POSTGRES_PASSWORD"),
|
||||
"mode_type" => System.get_env("POOLER_POOL_MODE"),
|
||||
"pool_size" => System.get_env("POOLER_DEFAULT_POOL_SIZE"),
|
||||
"is_manager" => true
|
||||
}]
|
||||
}
|
||||
|
||||
if !Supavisor.Tenants.get_tenant_by_external_id(params["external_id"]) do
|
||||
{:ok, _} = Supavisor.Tenants.create_tenant(params)
|
||||
end
|
@ -67,19 +67,19 @@ services:
|
||||
studio:
|
||||
<<: *supabase-services
|
||||
extends:
|
||||
file: ./supabase/docker/docker-compose.yml
|
||||
file: ./db/docker/docker-compose.yml
|
||||
service: studio
|
||||
|
||||
kong:
|
||||
<<: *supabase-services
|
||||
extends:
|
||||
file: ./supabase/docker/docker-compose.yml
|
||||
file: ./db/docker/docker-compose.yml
|
||||
service: kong
|
||||
|
||||
auth:
|
||||
<<: *supabase-services
|
||||
extends:
|
||||
file: ./supabase/docker/docker-compose.yml
|
||||
file: ./db/docker/docker-compose.yml
|
||||
service: auth
|
||||
environment:
|
||||
GOTRUE_MAILER_AUTOCONFIRM: true
|
||||
@ -87,54 +87,57 @@ services:
|
||||
rest:
|
||||
<<: *supabase-services
|
||||
extends:
|
||||
file: ./supabase/docker/docker-compose.yml
|
||||
file: ./db/docker/docker-compose.yml
|
||||
service: rest
|
||||
|
||||
realtime:
|
||||
<<: *supabase-services
|
||||
extends:
|
||||
file: ./supabase/docker/docker-compose.yml
|
||||
file: ./db/docker/docker-compose.yml
|
||||
service: realtime
|
||||
|
||||
storage:
|
||||
<<: *supabase-services
|
||||
extends:
|
||||
file: ./supabase/docker/docker-compose.yml
|
||||
file: ./db/docker/docker-compose.yml
|
||||
service: storage
|
||||
|
||||
imgproxy:
|
||||
<<: *supabase-services
|
||||
extends:
|
||||
file: ./supabase/docker/docker-compose.yml
|
||||
file: ./db/docker/docker-compose.yml
|
||||
service: imgproxy
|
||||
|
||||
meta:
|
||||
<<: *supabase-services
|
||||
extends:
|
||||
file: ./supabase/docker/docker-compose.yml
|
||||
file: ./db/docker/docker-compose.yml
|
||||
service: meta
|
||||
|
||||
functions:
|
||||
<<: *supabase-services
|
||||
extends:
|
||||
file: ./supabase/docker/docker-compose.yml
|
||||
file: ./db/docker/docker-compose.yml
|
||||
service: functions
|
||||
|
||||
analytics:
|
||||
<<: *supabase-services
|
||||
extends:
|
||||
file: ./supabase/docker/docker-compose.yml
|
||||
file: ./db/docker/docker-compose.yml
|
||||
service: analytics
|
||||
|
||||
db:
|
||||
<<: *supabase-services
|
||||
extends:
|
||||
file: ./supabase/docker/docker-compose.yml
|
||||
file: ./db/docker/docker-compose.yml
|
||||
service: db
|
||||
ports:
|
||||
- ${POSTGRES_PORT}:5432 # We don't use Supavisor locally, so we expose the db directly.
|
||||
|
||||
vector:
|
||||
<<: *supabase-services
|
||||
extends:
|
||||
file: ./supabase/docker/docker-compose.yml
|
||||
file: ./db/docker/docker-compose.yml
|
||||
service: vector
|
||||
|
||||
deps:
|
||||
|
@ -1 +0,0 @@
|
||||
Subproject commit 4647d50380f2b7c7ba65611bd9ec474b8d6a4003
|
@ -84,11 +84,11 @@ To run the backend services, follow these steps:
|
||||
```
|
||||
This command will initialize and update the submodules in the repository. The `supabase` folder will be cloned to the root directory.
|
||||
|
||||
* Copy the `.env.example` file available in the `supabase/docker` directory to `.env` in `autogpt_platform`:
|
||||
* Copy the `.env.example` file to `.env` in `autogpt_platform`:
|
||||
```
|
||||
cp supabase/docker/.env.example .env
|
||||
cp .env.example .env
|
||||
```
|
||||
This command will copy the `.env.example` file to `.env` in the `supabase/docker` directory. You can modify the `.env` file to add your own environment variables.
|
||||
This command will copy the `.env.example` file to `.env` in the `supabase` directory. You can modify the `.env` file to add your own environment variables.
|
||||
|
||||
* Run the backend services:
|
||||
```
|
||||
|
Loading…
x
Reference in New Issue
Block a user