diff --git a/INSTRUCTIONS.md b/INSTRUCTIONS.md new file mode 100644 index 00000000000..30f3049d328 --- /dev/null +++ b/INSTRUCTIONS.md @@ -0,0 +1,64 @@ +# RAGFlow with Supabase and Tailscale + +This document provides instructions on how to configure this forked version of RAGFlow to use Supabase for database and storage, and Tailscale for remote access. + +## Prerequisites + +- A Supabase account (free tier is sufficient). +- A Tailscale account and Tailscale installed on both your local machine and the server where you will run RAGFlow. + +## 1. Supabase Configuration + +1. **Create a new Supabase project:** + - Go to your Supabase dashboard and create a new project. + +2. **Get Database Credentials:** + - In your Supabase project, navigate to **Settings > Database**. + - Under **Connection info**, you will find the following: + - `Host` + - `Port` + - `Database name` (usually `postgres`) + - `User` (usually `postgres`) + - `Password` + +3. **Get S3 Storage Credentials:** + - In your Supabase project, navigate to **Settings > Storage**. + - You will find the following: + - `Endpoint URL` + - `Region` + - To get the `Access Key` and `Secret Key`, you need to generate them. Go to **Storage > Settings > S3 Connection** and generate a new key pair. + - Create a new bucket in the Supabase Storage section. The name of this bucket will be your `S3_BUCKET`. + +4. **Configure RAGFlow:** + - In the `docker` directory, copy the `.env.example` file to a new file named `.env`. + - Open the `.env` file and fill in the values for the PostgreSQL and S3 sections with the credentials you obtained from Supabase. + +## 2. Running RAGFlow + +Once you have configured your `.env` file, you can start RAGFlow using Docker Compose: + +```bash +cd docker +docker-compose up -d +``` + +## 3. Tailscale Access + +To access your RAGFlow instance from your local machine using Tailscale, you need to find the Tailscale IP address of your server. + +1. **Find your server's Tailscale IP:** + - On your server, run the following command: + ```bash + tailscale ip -4 + ``` + - This will give you the Tailscale IP address of your server (e.g., `100.x.x.x`). + +2. **Access RAGFlow:** + - On your local machine, open your web browser and go to `http://:9380`. + - You should now be able to access the RAGFlow web interface. + +## 4. Troubleshooting + +- If you have any issues connecting to the database or storage, double-check your credentials in the `.env` file. +- Make sure that the bucket you created in Supabase Storage is public if you want to access the files directly. +- If you have trouble accessing RAGFlow through Tailscale, ensure that Tailscale is running on both your local machine and the server, and that there are no firewall rules blocking the connection. diff --git a/docker/.env.example b/docker/.env.example new file mode 100644 index 00000000000..c142f1aebec --- /dev/null +++ b/docker/.env.example @@ -0,0 +1,70 @@ +# ----------------------------------------------------------------------------- +# RAGFlow Configuration +# ----------------------------------------------------------------------------- +# This file contains the environment variables for configuring RAGFlow. +# Copy this file to .env and fill in the values for your environment. + +# ----------------------------------------------------------------------------- +# Supabase PostgreSQL Configuration +# ----------------------------------------------------------------------------- +# To get these values, go to your Supabase project -> Settings -> Database +POSTGRES_HOST=your_supabase_host +POSTGRES_PORT=your_supabase_port +POSTGRES_DBNAME=postgres +POSTGRES_USER=postgres +POSTGRES_PASSWORD=your_supabase_password + +# ----------------------------------------------------------------------------- +# Supabase S3 Storage Configuration +# ----------------------------------------------------------------------------- +# To get these values, go to your Supabase project -> Settings -> Storage +S3_ACCESS_KEY=your_supabase_s3_access_key +S3_SECRET_KEY=your_supabase_s3_secret_key +S3_ENDPOINT_URL=your_supabase_s3_endpoint_url +S3_REGION=your_supabase_s3_region +S3_BUCKET=your_supabase_s3_bucket + +# ----------------------------------------------------------------------------- +# RAGFlow Service Configuration +# ----------------------------------------------------------------------------- +SVR_HTTP_PORT=9380 +TIMEZONE=Asia/Shanghai + +# ----------------------------------------------------------------------------- +# Vector Database (Infinity) +# ----------------------------------------------------------------------------- +INFINITY_THRIFT_PORT=23817 +INFINITY_HTTP_PORT=23820 +INFINITY_PSQL_PORT=5432 + +# ----------------------------------------------------------------------------- +# Redis Configuration +# ----------------------------------------------------------------------------- +REDIS_PASSWORD=infini_rag_flow +REDIS_PORT=6379 + +# ----------------------------------------------------------------------------- +# Elasticsearch/Opensearch Configuration (Optional) +# ----------------------------------------------------------------------------- +# Only needed if you are using Elasticsearch or Opensearch +# STACK_VERSION=8.14.1 +# ES_PORT=9200 +# ELASTIC_PASSWORD=infini_rag_flow +# OS_PORT=9201 +# OPENSEARCH_PASSWORD=infini_rag_flow_OS_01 + +# ----------------------------------------------------------------------------- +# Other Settings +# ----------------------------------------------------------------------------- +MEM_LIMIT=4g +RAGFLOW_IMAGE=infiniflow/ragflow:latest +SANDBOX_EXECUTOR_MANAGER_IMAGE=infiniflow/sandbox-executor-manager:latest +SANDBOX_EXECUTOR_MANAGER_PORT=9385 +SANDBOX_EXECUTOR_MANAGER_POOL_SIZE=3 +SANDBOX_BASE_PYTHON_IMAGE=infiniflow/sandbox-base-python:latest +SANDBOX_BASE_NODEJS_IMAGE=infiniflow/sandbox-base-nodejs:latest +SANDBOX_ENABLE_SECCOMP=false +SANDBOX_MAX_MEMORY=256m +SANDBOX_TIMEOUT=10s +HF_ENDPOINT=https://huggingface.co +MACOS= diff --git a/docker/docker-compose-base.yml b/docker/docker-compose-base.yml deleted file mode 100644 index 7ded0b10c05..00000000000 --- a/docker/docker-compose-base.yml +++ /dev/null @@ -1,227 +0,0 @@ -services: - es01: - container_name: ragflow-es-01 - profiles: - - elasticsearch - image: elasticsearch:${STACK_VERSION} - volumes: - - esdata01:/usr/share/elasticsearch/data - ports: - - ${ES_PORT}:9200 - env_file: .env - environment: - - node.name=es01 - - ELASTIC_PASSWORD=${ELASTIC_PASSWORD} - - bootstrap.memory_lock=false - - discovery.type=single-node - - xpack.security.enabled=true - - xpack.security.http.ssl.enabled=false - - xpack.security.transport.ssl.enabled=false - - cluster.routing.allocation.disk.watermark.low=5gb - - cluster.routing.allocation.disk.watermark.high=3gb - - cluster.routing.allocation.disk.watermark.flood_stage=2gb - - TZ=${TIMEZONE} - mem_limit: ${MEM_LIMIT} - ulimits: - memlock: - soft: -1 - hard: -1 - healthcheck: - test: ["CMD-SHELL", "curl http://localhost:9200"] - interval: 10s - timeout: 10s - retries: 120 - networks: - - ragflow - restart: on-failure - - opensearch01: - container_name: ragflow-opensearch-01 - profiles: - - opensearch - image: hub.icert.top/opensearchproject/opensearch:2.19.1 - volumes: - - osdata01:/usr/share/opensearch/data - ports: - - ${OS_PORT}:9201 - env_file: .env - environment: - - node.name=opensearch01 - - OPENSEARCH_PASSWORD=${OPENSEARCH_PASSWORD} - - OPENSEARCH_INITIAL_ADMIN_PASSWORD=${OPENSEARCH_PASSWORD} - - bootstrap.memory_lock=false - - discovery.type=single-node - - plugins.security.disabled=false - - plugins.security.ssl.http.enabled=false - - plugins.security.ssl.transport.enabled=true - - cluster.routing.allocation.disk.watermark.low=5gb - - cluster.routing.allocation.disk.watermark.high=3gb - - cluster.routing.allocation.disk.watermark.flood_stage=2gb - - TZ=${TIMEZONE} - - http.port=9201 - mem_limit: ${MEM_LIMIT} - ulimits: - memlock: - soft: -1 - hard: -1 - healthcheck: - test: ["CMD-SHELL", "curl http://localhost:9201"] - interval: 10s - timeout: 10s - retries: 120 - networks: - - ragflow - restart: on-failure - - infinity: - container_name: ragflow-infinity - profiles: - - infinity - image: infiniflow/infinity:v0.6.0-dev5 - volumes: - - infinity_data:/var/infinity - - ./infinity_conf.toml:/infinity_conf.toml - command: ["-f", "/infinity_conf.toml"] - ports: - - ${INFINITY_THRIFT_PORT}:23817 - - ${INFINITY_HTTP_PORT}:23820 - - ${INFINITY_PSQL_PORT}:5432 - env_file: .env - environment: - - TZ=${TIMEZONE} - mem_limit: ${MEM_LIMIT} - ulimits: - nofile: - soft: 500000 - hard: 500000 - networks: - - ragflow - healthcheck: - test: ["CMD", "curl", "http://localhost:23820/admin/node/current"] - interval: 10s - timeout: 10s - retries: 120 - restart: on-failure - - sandbox-executor-manager: - container_name: ragflow-sandbox-executor-manager - profiles: - - sandbox - image: ${SANDBOX_EXECUTOR_MANAGER_IMAGE-infiniflow/sandbox-executor-manager:latest} - privileged: true - ports: - - ${SANDBOX_EXECUTOR_MANAGER_PORT-9385}:9385 - env_file: .env - volumes: - - /var/run/docker.sock:/var/run/docker.sock - networks: - - ragflow - security_opt: - - no-new-privileges:true - environment: - - TZ=${TIMEZONE} - - SANDBOX_EXECUTOR_MANAGER_POOL_SIZE=${SANDBOX_EXECUTOR_MANAGER_POOL_SIZE:-3} - - SANDBOX_BASE_PYTHON_IMAGE=${SANDBOX_BASE_PYTHON_IMAGE:-infiniflow/sandbox-base-python:latest} - - SANDBOX_BASE_NODEJS_IMAGE=${SANDBOX_BASE_NODEJS_IMAGE:-infiniflow/sandbox-base-nodejs:latest} - - SANDBOX_ENABLE_SECCOMP=${SANDBOX_ENABLE_SECCOMP:-false} - - SANDBOX_MAX_MEMORY=${SANDBOX_MAX_MEMORY:-256m} - - SANDBOX_TIMEOUT=${SANDBOX_TIMEOUT:-10s} - healthcheck: - test: ["CMD", "curl", "http://localhost:9385/healthz"] - interval: 10s - timeout: 5s - retries: 5 - restart: on-failure - - mysql: - # mysql:5.7 linux/arm64 image is unavailable. - image: mysql:8.0.39 - container_name: ragflow-mysql - env_file: .env - environment: - - MYSQL_ROOT_PASSWORD=${MYSQL_PASSWORD} - - TZ=${TIMEZONE} - command: - --max_connections=1000 - --character-set-server=utf8mb4 - --collation-server=utf8mb4_unicode_ci - --default-authentication-plugin=mysql_native_password - --tls_version="TLSv1.2,TLSv1.3" - --init-file /data/application/init.sql - --binlog_expire_logs_seconds=604800 - ports: - - ${MYSQL_PORT}:3306 - volumes: - - mysql_data:/var/lib/mysql - - ./init.sql:/data/application/init.sql - networks: - - ragflow - healthcheck: - test: ["CMD", "mysqladmin" ,"ping", "-uroot", "-p${MYSQL_PASSWORD}"] - interval: 10s - timeout: 10s - retries: 3 - restart: on-failure - - minio: - image: quay.io/minio/minio:RELEASE.2025-06-13T11-33-47Z - container_name: ragflow-minio - command: server --console-address ":9001" /data - ports: - - ${MINIO_PORT}:9000 - - ${MINIO_CONSOLE_PORT}:9001 - env_file: .env - environment: - - MINIO_ROOT_USER=${MINIO_USER} - - MINIO_ROOT_PASSWORD=${MINIO_PASSWORD} - - TZ=${TIMEZONE} - volumes: - - minio_data:/data - networks: - - ragflow - restart: on-failure - healthcheck: - test: ["CMD", "curl", "-f", "http://localhost:9000/minio/health/live"] - interval: 30s - timeout: 20s - retries: 3 - - redis: - # swr.cn-north-4.myhuaweicloud.com/ddn-k8s/docker.io/valkey/valkey:8 - image: valkey/valkey:8 - container_name: ragflow-redis - command: redis-server --requirepass ${REDIS_PASSWORD} --maxmemory 128mb --maxmemory-policy allkeys-lru - env_file: .env - ports: - - ${REDIS_PORT}:6379 - volumes: - - redis_data:/data - networks: - - ragflow - restart: on-failure - healthcheck: - test: ["CMD", "redis-cli", "-a", "${REDIS_PASSWORD}", "ping"] - interval: 5s - timeout: 3s - retries: 3 - start_period: 10s - - - -volumes: - esdata01: - driver: local - osdata01: - driver: local - infinity_data: - driver: local - mysql_data: - driver: local - minio_data: - driver: local - redis_data: - driver: local - -networks: - ragflow: - driver: bridge diff --git a/docker/docker-compose.yml b/docker/docker-compose.yml index 3583afdf359..369f2982e64 100644 --- a/docker/docker-compose.yml +++ b/docker/docker-compose.yml @@ -1,27 +1,6 @@ -include: - - ./docker-compose-base.yml -# To ensure that the container processes the locally modified `service_conf.yaml.template` instead of the one included in its image, you need to mount the local `service_conf.yaml.template` to the container. services: ragflow: - depends_on: - mysql: - condition: service_healthy image: ${RAGFLOW_IMAGE} - # Example configuration to set up an MCP server: - # command: - # - --enable-mcpserver - # - --mcp-host=0.0.0.0 - # - --mcp-port=9382 - # - --mcp-base-url=http://127.0.0.1:9380 - # - --mcp-script-path=/ragflow/mcp/server/server.py - # - --mcp-mode=self-host - # - --mcp-host-api-key=ragflow-xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx - # Optional transport flags for MCP (customize if needed). - # Host mode need to combined with --no-transport-streamable-http-enabled flag, namely, host+streamable-http is not supported yet. - # The following are enabled by default unless explicitly disabled with --no-. - # - --no-transport-sse-enabled # Disable legacy SSE endpoints (/sse and /messages/) - # - --no-transport-streamable-http-enabled # Disable Streamable HTTP transport (/mcp endpoint) - # - --no-json-response # Disable JSON response mode in Streamable HTTP transport (instead of SSE over HTTP) container_name: ragflow-server ports: - ${SVR_HTTP_PORT}:9380 @@ -29,7 +8,7 @@ services: - 443:443 - 5678:5678 - 5679:5679 - - 9382:9382 # entry for MCP (host_port:docker_port). The docker_port must match the value you set for `mcp-port` above. + - 9382:9382 volumes: - ./ragflow-logs:/ragflow/logs - ./nginx/ragflow.conf:/etc/nginx/conf.d/ragflow.conf @@ -43,32 +22,177 @@ services: - TZ=${TIMEZONE} - HF_ENDPOINT=${HF_ENDPOINT-} - MACOS=${MACOS-} + - DATABASE_TYPE=POSTGRES + - STORAGE_IMPL=AWS_S3 networks: - ragflow restart: on-failure - # https://docs.docker.com/engine/daemon/prometheus/#create-a-prometheus-configuration - # If you use Docker Desktop, the --add-host flag is optional. This flag ensures that the host's internal IP is exposed to the Prometheus container. extra_hosts: - "host.docker.internal:host-gateway" - # executor: - # depends_on: - # mysql: - # condition: service_healthy - # image: ${RAGFLOW_IMAGE} - # container_name: ragflow-executor - # volumes: - # - ./ragflow-logs:/ragflow/logs - # - ./nginx/ragflow.conf:/etc/nginx/conf.d/ragflow.conf - # env_file: .env - # environment: - # - TZ=${TIMEZONE} - # - HF_ENDPOINT=${HF_ENDPOINT} - # - MACOS=${MACOS} - # entrypoint: "/ragflow/entrypoint_task_executor.sh 1 3" - # networks: - # - ragflow - # restart: on-failure - # # https://docs.docker.com/engine/daemon/prometheus/#create-a-prometheus-configuration - # # If you're using Docker Desktop, the --add-host flag is optional. This flag makes sure that the host's internal IP gets exposed to the Prometheus container. - # extra_hosts: - # - "host.docker.internal:host-gateway" + + es01: + container_name: ragflow-es-01 + profiles: + - elasticsearch + image: elasticsearch:${STACK_VERSION} + volumes: + - esdata01:/usr/share/elasticsearch/data + ports: + - ${ES_PORT}:9200 + env_file: .env + environment: + - node.name=es01 + - ELASTIC_PASSWORD=${ELASTIC_PASSWORD} + - bootstrap.memory_lock=false + - discovery.type=single-node + - xpack.security.enabled=true + - xpack.security.http.ssl.enabled=false + - xpack.security.transport.ssl.enabled=false + - cluster.routing.allocation.disk.watermark.low=5gb + - cluster.routing.allocation.disk.watermark.high=3gb + - cluster.routing.allocation.disk.watermark.flood_stage=2gb + - TZ=${TIMEZONE} + mem_limit: ${MEM_LIMIT} + ulimits: + memlock: + soft: -1 + hard: -1 + healthcheck: + test: ["CMD-SHELL", "curl http://localhost:9200"] + interval: 10s + timeout: 10s + retries: 120 + networks: + - ragflow + restart: on-failure + + opensearch01: + container_name: ragflow-opensearch-01 + profiles: + - opensearch + image: hub.icert.top/opensearchproject/opensearch:2.19.1 + volumes: + - osdata01:/usr/share/opensearch/data + ports: + - ${OS_PORT}:9201 + env_file: .env + environment: + - node.name=opensearch01 + - OPENSEARCH_PASSWORD=${OPENSEARCH_PASSWORD} + - OPENSEARCH_INITIAL_ADMIN_PASSWORD=${OPENSEARCH_PASSWORD} + - bootstrap.memory_lock=false + - discovery.type=single-node + - plugins.security.disabled=false + - plugins.security.ssl.http.enabled=false + - plugins.security.ssl.transport.enabled=true + - cluster.routing.allocation.disk.watermark.low=5gb + - cluster.routing.allocation.disk.watermark.high=3gb + - cluster.routing.allocation.disk.watermark.flood_stage=2gb + - TZ=${TIMEZONE} + - http.port=9201 + mem_limit: ${MEM_LIMIT} + ulimits: + memlock: + soft: -1 + hard: -1 + healthcheck: + test: ["CMD-SHELL", "curl http://localhost:9201"] + interval: 10s + timeout: 10s + retries: 120 + networks: + - ragflow + restart: on-failure + + infinity: + container_name: ragflow-infinity + profiles: + - infinity + image: infiniflow/infinity:v0.6.0-dev5 + volumes: + - infinity_data:/var/infinity + - ./infinity_conf.toml:/infinity_conf.toml + command: ["-f", "/infinity_conf.toml"] + ports: + - ${INFINITY_THRIFT_PORT}:23817 + - ${INFINITY_HTTP_PORT}:23820 + - ${INFINITY_PSQL_PORT}:5432 + env_file: .env + environment: + - TZ=${TIMEZONE} + mem_limit: ${MEM_LIMIT} + ulimits: + nofile: + soft: 500000 + hard: 500000 + networks: + - ragflow + healthcheck: + test: ["CMD", "curl", "http://localhost:23820/admin/node/current"] + interval: 10s + timeout: 10s + retries: 120 + restart: on-failure + + sandbox-executor-manager: + container_name: ragflow-sandbox-executor-manager + profiles: + - sandbox + image: ${SANDBOX_EXECUTOR_MANAGER_IMAGE-infiniflow/sandbox-executor-manager:latest} + privileged: true + ports: + - ${SANDBOX_EXECUTOR_MANAGER_PORT-9385}:9385 + env_file: .env + volumes: + - /var/run/docker.sock:/var/run/docker.sock + networks: + - ragflow + security_opt: + - no-new-privileges:true + environment: + - TZ=${TIMEZONE} + - SANDBOX_EXECUTOR_MANAGER_POOL_SIZE=${SANDBOX_EXECUTOR_MANAGER_POOL_SIZE:-3} + - SANDBOX_BASE_PYTHON_IMAGE=${SANDBOX_BASE_PYTHON_IMAGE:-infiniflow/sandbox-base-python:latest} + - SANDBOX_BASE_NODEJS_IMAGE=${SANDBOX_BASE_NODEJS_IMAGE:-infiniflow/sandbox-base-nodejs:latest} + - SANDBOX_ENABLE_SECCOMP=${SANDBOX_ENABLE_SECCOMP:-false} + - SANDBOX_MAX_MEMORY=${SANDBOX_MAX_MEMORY:-256m} + - SANDBOX_TIMEOUT=${SANDBOX_TIMEOUT:-10s} + healthcheck: + test: ["CMD", "curl", "http://localhost:9385/healthz"] + interval: 10s + timeout: 5s + retries: 5 + restart: on-failure + + redis: + image: valkey/valkey:8 + container_name: ragflow-redis + command: redis-server --requirepass ${REDIS_PASSWORD} --maxmemory 128mb --maxmemory-policy allkeys-lru + env_file: .env + ports: + - ${REDIS_PORT}:6379 + volumes: + - redis_data:/data + networks: + - ragflow + restart: on-failure + healthcheck: + test: ["CMD", "redis-cli", "-a", "${REDIS_PASSWORD}", "ping"] + interval: 5s + timeout: 3s + retries: 3 + start_period: 10s + +volumes: + esdata01: + driver: local + osdata01: + driver: local + infinity_data: + driver: local + redis_data: + driver: local + +networks: + ragflow: + driver: bridge diff --git a/docker/service_conf.yaml.template b/docker/service_conf.yaml.template index c36e921c9c7..8f8c0b9f955 100644 --- a/docker/service_conf.yaml.template +++ b/docker/service_conf.yaml.template @@ -1,19 +1,19 @@ ragflow: host: ${RAGFLOW_HOST:-0.0.0.0} http_port: 9380 -mysql: - name: '${MYSQL_DBNAME:-rag_flow}' - user: '${MYSQL_USER:-root}' - password: '${MYSQL_PASSWORD:-infini_rag_flow}' - host: '${MYSQL_HOST:-mysql}' - port: 3306 - max_connections: 900 - stale_timeout: 300 - max_allowed_packet: ${MYSQL_MAX_PACKET:-1073741824} -minio: - user: '${MINIO_USER:-rag_flow}' - password: '${MINIO_PASSWORD:-infini_rag_flow}' - host: '${MINIO_HOST:-minio}:9000' +# mysql: +# name: '${MYSQL_DBNAME:-rag_flow}' +# user: '${MYSQL_USER:-root}' +# password: '${MYSQL_PASSWORD:-infini_rag_flow}' +# host: '${MYSQL_HOST:-mysql}' +# port: 3306 +# max_connections: 900 +# stale_timeout: 300 +# max_allowed_packet: ${MYSQL_MAX_PACKET:-1073741824} +# minio: +# user: '${MINIO_USER:-rag_flow}' +# password: '${MINIO_PASSWORD:-infini_rag_flow}' +# host: '${MINIO_HOST:-minio}:9000' es: hosts: 'http://${ES_HOST:-es01}:9200' username: '${ES_USER:-elastic}' @@ -30,23 +30,23 @@ redis: password: '${REDIS_PASSWORD:-infini_rag_flow}' host: '${REDIS_HOST:-redis}:6379' -# postgres: -# name: '${POSTGRES_DBNAME:-rag_flow}' -# user: '${POSTGRES_USER:-rag_flow}' -# password: '${POSTGRES_PASSWORD:-infini_rag_flow}' -# host: '${POSTGRES_HOST:-postgres}' -# port: 5432 -# max_connections: 100 -# stale_timeout: 30 -# s3: -# access_key: 'access_key' -# secret_key: 'secret_key' -# region: 'region' -# endpoint_url: 'endpoint_url' -# bucket: 'bucket' -# prefix_path: 'prefix_path' -# signature_version: 'v4' -# addressing_style: 'path' +postgres: + name: '${POSTGRES_DBNAME:-rag_flow}' + user: '${POSTGRES_USER:-rag_flow}' + password: '${POSTGRES_PASSWORD:-infini_rag_flow}' + host: '${POSTGRES_HOST:-postgres}' + port: 5432 + max_connections: 100 + stale_timeout: 30 +s3: + access_key: '${S3_ACCESS_KEY}' + secret_key: '${S3_SECRET_KEY}' + region: '${S3_REGION}' + endpoint_url: '${S3_ENDPOINT_URL}' + bucket: '${S3_BUCKET}' + prefix_path: 'ragflow' + signature_version: 'v4' + addressing_style: 'path' # oss: # access_key: '${ACCESS_KEY}' # secret_key: '${SECRET_KEY}' diff --git a/rag/utils/storage_factory.py b/rag/utils/storage_factory.py index 4ac091f85f5..b10618c30eb 100644 --- a/rag/utils/storage_factory.py +++ b/rag/utils/storage_factory.py @@ -49,5 +49,5 @@ def create(cls, storage: Storage): return cls.storage_mapping[storage]() -STORAGE_IMPL_TYPE = os.getenv('STORAGE_IMPL', 'MINIO') +STORAGE_IMPL_TYPE = os.getenv('STORAGE_IMPL', 'AWS_S3') STORAGE_IMPL = StorageFactory.create(Storage[STORAGE_IMPL_TYPE])