diff --git a/xin-dify/data/certbot/README.md b/xin-dify/data/certbot/README.md new file mode 100644 index 0000000..21be34b --- /dev/null +++ b/xin-dify/data/certbot/README.md @@ -0,0 +1,76 @@ +# Launching new servers with SSL certificates + +## Short description + +docker compose certbot configurations with Backward compatibility (without certbot container). +Use `docker compose --profile certbot up` to use this features. + +## The simplest way for launching new servers with SSL certificates + +1. Get letsencrypt certs + set `.env` values + ```properties + NGINX_SSL_CERT_FILENAME=fullchain.pem + NGINX_SSL_CERT_KEY_FILENAME=privkey.pem + NGINX_ENABLE_CERTBOT_CHALLENGE=true + CERTBOT_DOMAIN=your_domain.com + CERTBOT_EMAIL=example@your_domain.com + ``` + execute command: + ```shell + docker network prune + docker compose --profile certbot up --force-recreate -d + ``` + then after the containers launched: + ```shell + docker compose exec -it certbot /bin/sh /update-cert.sh + ``` +2. Edit `.env` file and `docker compose --profile certbot up` again. + set `.env` value additionally + ```properties + NGINX_HTTPS_ENABLED=true + ``` + execute command: + ```shell + docker compose --profile certbot up -d --no-deps --force-recreate nginx + ``` + Then you can access your serve with HTTPS. + [https://your_domain.com](https://your_domain.com) + +## SSL certificates renewal + +For SSL certificates renewal, execute commands below: + +```shell +docker compose exec -it certbot /bin/sh /update-cert.sh +docker compose exec nginx nginx -s reload +``` + +## Options for certbot + +`CERTBOT_OPTIONS` key might be helpful for testing. i.e., + +```properties +CERTBOT_OPTIONS=--dry-run +``` + +To apply changes to `CERTBOT_OPTIONS`, regenerate the certbot container before updating the certificates. + +```shell +docker compose --profile certbot up -d --no-deps --force-recreate certbot +docker compose exec -it certbot /bin/sh /update-cert.sh +``` + +Then, reload the nginx container if necessary. + +```shell +docker compose exec nginx nginx -s reload +``` + +## For legacy servers + +To use cert files dir `nginx/ssl` as before, simply launch containers WITHOUT `--profile certbot` option. + +```shell +docker compose up -d +``` diff --git a/xin-dify/data/certbot/docker-entrypoint.sh b/xin-dify/data/certbot/docker-entrypoint.sh new file mode 100755 index 0000000..a70ecd8 --- /dev/null +++ b/xin-dify/data/certbot/docker-entrypoint.sh @@ -0,0 +1,30 @@ +#!/bin/sh +set -e + +printf '%s\n' "Docker entrypoint script is running" + +printf '%s\n' "\nChecking specific environment variables:" +printf '%s\n' "CERTBOT_EMAIL: ${CERTBOT_EMAIL:-Not set}" +printf '%s\n' "CERTBOT_DOMAIN: ${CERTBOT_DOMAIN:-Not set}" +printf '%s\n' "CERTBOT_OPTIONS: ${CERTBOT_OPTIONS:-Not set}" + +printf '%s\n' "\nChecking mounted directories:" +for dir in "/etc/letsencrypt" "/var/www/html" "/var/log/letsencrypt"; do + if [ -d "$dir" ]; then + printf '%s\n' "$dir exists. Contents:" + ls -la "$dir" + else + printf '%s\n' "$dir does not exist." + fi +done + +printf '%s\n' "\nGenerating update-cert.sh from template" +sed -e "s|\${CERTBOT_EMAIL}|$CERTBOT_EMAIL|g" \ + -e "s|\${CERTBOT_DOMAIN}|$CERTBOT_DOMAIN|g" \ + -e "s|\${CERTBOT_OPTIONS}|$CERTBOT_OPTIONS|g" \ + /update-cert.template.txt > /update-cert.sh + +chmod +x /update-cert.sh + +printf '%s\n' "\nExecuting command:" "$@" +exec "$@" diff --git a/xin-dify/data/certbot/update-cert.template.txt b/xin-dify/data/certbot/update-cert.template.txt new file mode 100755 index 0000000..16786a1 --- /dev/null +++ b/xin-dify/data/certbot/update-cert.template.txt @@ -0,0 +1,19 @@ +#!/bin/bash +set -e + +DOMAIN="${CERTBOT_DOMAIN}" +EMAIL="${CERTBOT_EMAIL}" +OPTIONS="${CERTBOT_OPTIONS}" +CERT_NAME="${DOMAIN}" # 証明書名をドメイン名と同じにする + +# Check if the certificate already exists +if [ -f "/etc/letsencrypt/renewal/${CERT_NAME}.conf" ]; then + echo "Certificate exists. Attempting to renew..." + certbot renew --noninteractive --cert-name ${CERT_NAME} --webroot --webroot-path=/var/www/html --email ${EMAIL} --agree-tos --no-eff-email ${OPTIONS} +else + echo "Certificate does not exist. Obtaining a new certificate..." + certbot certonly --noninteractive --webroot --webroot-path=/var/www/html --email ${EMAIL} --agree-tos --no-eff-email -d ${DOMAIN} ${OPTIONS} +fi +echo "Certificate operation successful" +# Note: Nginx reload should be handled outside this container +echo "Please ensure to reload Nginx to apply any certificate changes." diff --git a/xin-dify/data/couchbase-server/Dockerfile b/xin-dify/data/couchbase-server/Dockerfile new file mode 100644 index 0000000..bd8af64 --- /dev/null +++ b/xin-dify/data/couchbase-server/Dockerfile @@ -0,0 +1,4 @@ +FROM couchbase/server:latest AS stage_base +# FROM couchbase:latest AS stage_base +COPY init-cbserver.sh /opt/couchbase/init/ +RUN chmod +x /opt/couchbase/init/init-cbserver.sh \ No newline at end of file diff --git a/xin-dify/data/couchbase-server/init-cbserver.sh b/xin-dify/data/couchbase-server/init-cbserver.sh new file mode 100755 index 0000000..e66bc18 --- /dev/null +++ b/xin-dify/data/couchbase-server/init-cbserver.sh @@ -0,0 +1,44 @@ +#!/bin/bash +# used to start couchbase server - can't get around this as docker compose only allows you to start one command - so we have to start couchbase like the standard couchbase Dockerfile would +# https://github.com/couchbase/docker/blob/master/enterprise/couchbase-server/7.2.0/Dockerfile#L88 + +/entrypoint.sh couchbase-server & + +# track if setup is complete so we don't try to setup again +FILE=/opt/couchbase/init/setupComplete.txt + +if ! [ -f "$FILE" ]; then + # used to automatically create the cluster based on environment variables + # https://docs.couchbase.com/server/current/cli/cbcli/couchbase-cli-cluster-init.html + + echo $COUCHBASE_ADMINISTRATOR_USERNAME ":" $COUCHBASE_ADMINISTRATOR_PASSWORD + + sleep 20s + /opt/couchbase/bin/couchbase-cli cluster-init -c 127.0.0.1 \ + --cluster-username $COUCHBASE_ADMINISTRATOR_USERNAME \ + --cluster-password $COUCHBASE_ADMINISTRATOR_PASSWORD \ + --services data,index,query,fts \ + --cluster-ramsize $COUCHBASE_RAM_SIZE \ + --cluster-index-ramsize $COUCHBASE_INDEX_RAM_SIZE \ + --cluster-eventing-ramsize $COUCHBASE_EVENTING_RAM_SIZE \ + --cluster-fts-ramsize $COUCHBASE_FTS_RAM_SIZE \ + --index-storage-setting default + + sleep 2s + + # used to auto create the bucket based on environment variables + # https://docs.couchbase.com/server/current/cli/cbcli/couchbase-cli-bucket-create.html + + /opt/couchbase/bin/couchbase-cli bucket-create -c localhost:8091 \ + --username $COUCHBASE_ADMINISTRATOR_USERNAME \ + --password $COUCHBASE_ADMINISTRATOR_PASSWORD \ + --bucket $COUCHBASE_BUCKET \ + --bucket-ramsize $COUCHBASE_BUCKET_RAMSIZE \ + --bucket-type couchbase + + # create file so we know that the cluster is setup and don't run the setup again + touch $FILE +fi + # docker compose will stop the container from running unless we do this + # known issue and workaround + tail -f /dev/null diff --git a/xin-dify/data/docker-compose-template.yaml b/xin-dify/data/docker-compose-template.yaml new file mode 100644 index 0000000..e933dd8 --- /dev/null +++ b/xin-dify/data/docker-compose-template.yaml @@ -0,0 +1,656 @@ +x-shared-env: &shared-api-worker-env +services: + # API service + api: + image: langgenius/dify-api:1.2.0 + restart: always + environment: + # Use the shared environment variables. + <<: *shared-api-worker-env + # Startup mode, 'api' starts the API server. + MODE: api + SENTRY_DSN: ${API_SENTRY_DSN:-} + SENTRY_TRACES_SAMPLE_RATE: ${API_SENTRY_TRACES_SAMPLE_RATE:-1.0} + SENTRY_PROFILES_SAMPLE_RATE: ${API_SENTRY_PROFILES_SAMPLE_RATE:-1.0} + PLUGIN_REMOTE_INSTALL_HOST: ${EXPOSE_PLUGIN_DEBUGGING_HOST:-localhost} + PLUGIN_REMOTE_INSTALL_PORT: ${EXPOSE_PLUGIN_DEBUGGING_PORT:-5003} + PLUGIN_MAX_PACKAGE_SIZE: ${PLUGIN_MAX_PACKAGE_SIZE:-52428800} + INNER_API_KEY_FOR_PLUGIN: ${PLUGIN_DIFY_INNER_API_KEY:-QaHbTe77CtuXmsfyhR7+vRjI/+XbV1AaFy691iy+kGDv2Jvy0/eAh8Y1} + depends_on: + - db + - redis + volumes: + # Mount the storage directory to the container, for storing user files. + - ./volumes/app/storage:/app/api/storage + networks: + - ssrf_proxy_network + - default + + # worker service + # The Celery worker for processing the queue. + worker: + image: langgenius/dify-api:1.2.0 + restart: always + environment: + # Use the shared environment variables. + <<: *shared-api-worker-env + # Startup mode, 'worker' starts the Celery worker for processing the queue. + MODE: worker + SENTRY_DSN: ${API_SENTRY_DSN:-} + SENTRY_TRACES_SAMPLE_RATE: ${API_SENTRY_TRACES_SAMPLE_RATE:-1.0} + SENTRY_PROFILES_SAMPLE_RATE: ${API_SENTRY_PROFILES_SAMPLE_RATE:-1.0} + PLUGIN_MAX_PACKAGE_SIZE: ${PLUGIN_MAX_PACKAGE_SIZE:-52428800} + INNER_API_KEY_FOR_PLUGIN: ${PLUGIN_DIFY_INNER_API_KEY:-QaHbTe77CtuXmsfyhR7+vRjI/+XbV1AaFy691iy+kGDv2Jvy0/eAh8Y1} + depends_on: + - db + - redis + volumes: + # Mount the storage directory to the container, for storing user files. + - ./volumes/app/storage:/app/api/storage + networks: + - ssrf_proxy_network + - default + + # Frontend web application. + web: + image: langgenius/dify-web:1.2.0 + restart: always + environment: + CONSOLE_API_URL: ${CONSOLE_API_URL:-} + APP_API_URL: ${APP_API_URL:-} + SENTRY_DSN: ${WEB_SENTRY_DSN:-} + NEXT_TELEMETRY_DISABLED: ${NEXT_TELEMETRY_DISABLED:-0} + TEXT_GENERATION_TIMEOUT_MS: ${TEXT_GENERATION_TIMEOUT_MS:-60000} + CSP_WHITELIST: ${CSP_WHITELIST:-} + MARKETPLACE_API_URL: ${MARKETPLACE_API_URL:-https://marketplace.dify.ai} + MARKETPLACE_URL: ${MARKETPLACE_URL:-https://marketplace.dify.ai} + TOP_K_MAX_VALUE: ${TOP_K_MAX_VALUE:-} + INDEXING_MAX_SEGMENTATION_TOKENS_LENGTH: ${INDEXING_MAX_SEGMENTATION_TOKENS_LENGTH:-} + PM2_INSTANCES: ${PM2_INSTANCES:-2} + LOOP_NODE_MAX_COUNT: ${LOOP_NODE_MAX_COUNT:-100} + MAX_TOOLS_NUM: ${MAX_TOOLS_NUM:-10} + MAX_PARALLEL_LIMIT: ${MAX_PARALLEL_LIMIT:-10} + MAX_ITERATIONS_NUM: ${MAX_ITERATIONS_NUM:-5} + + # The postgres database. + db: + image: postgres:15-alpine + restart: always + environment: + PGUSER: ${PGUSER:-postgres} + POSTGRES_PASSWORD: ${POSTGRES_PASSWORD:-difyai123456} + POSTGRES_DB: ${POSTGRES_DB:-dify} + PGDATA: ${PGDATA:-/var/lib/postgresql/data/pgdata} + command: > + postgres -c 'max_connections=${POSTGRES_MAX_CONNECTIONS:-100}' + -c 'shared_buffers=${POSTGRES_SHARED_BUFFERS:-128MB}' + -c 'work_mem=${POSTGRES_WORK_MEM:-4MB}' + -c 'maintenance_work_mem=${POSTGRES_MAINTENANCE_WORK_MEM:-64MB}' + -c 'effective_cache_size=${POSTGRES_EFFECTIVE_CACHE_SIZE:-4096MB}' + volumes: + - ./volumes/db/data:/var/lib/postgresql/data + healthcheck: + test: [ 'CMD', 'pg_isready', '-h', 'db', '-U', '${PGUSER:-postgres}', '-d', '${POSTGRES_DB:-dify}' ] + interval: 1s + timeout: 3s + retries: 60 + + # The redis cache. + redis: + image: redis:6-alpine + restart: always + environment: + REDISCLI_AUTH: ${REDIS_PASSWORD:-difyai123456} + volumes: + # Mount the redis data directory to the container. + - ./volumes/redis/data:/data + # Set the redis password when startup redis server. + command: redis-server --requirepass ${REDIS_PASSWORD:-difyai123456} + healthcheck: + test: [ 'CMD', 'redis-cli', 'ping' ] + + # The DifySandbox + sandbox: + image: langgenius/dify-sandbox:0.2.11 + restart: always + environment: + # The DifySandbox configurations + # Make sure you are changing this key for your deployment with a strong key. + # You can generate a strong key using `openssl rand -base64 42`. + API_KEY: ${SANDBOX_API_KEY:-dify-sandbox} + GIN_MODE: ${SANDBOX_GIN_MODE:-release} + WORKER_TIMEOUT: ${SANDBOX_WORKER_TIMEOUT:-15} + ENABLE_NETWORK: ${SANDBOX_ENABLE_NETWORK:-true} + HTTP_PROXY: ${SANDBOX_HTTP_PROXY:-http://ssrf_proxy:3128} + HTTPS_PROXY: ${SANDBOX_HTTPS_PROXY:-http://ssrf_proxy:3128} + SANDBOX_PORT: ${SANDBOX_PORT:-8194} + volumes: + - ./volumes/sandbox/dependencies:/dependencies + - ./volumes/sandbox/conf:/conf + healthcheck: + test: [ 'CMD', 'curl', '-f', 'http://localhost:8194/health' ] + networks: + - ssrf_proxy_network + + # plugin daemon + plugin_daemon: + image: langgenius/dify-plugin-daemon:0.0.7-local + restart: always + environment: + # Use the shared environment variables. + <<: *shared-api-worker-env + DB_DATABASE: ${DB_PLUGIN_DATABASE:-dify_plugin} + SERVER_PORT: ${PLUGIN_DAEMON_PORT:-5002} + SERVER_KEY: ${PLUGIN_DAEMON_KEY:-lYkiYYT6owG+71oLerGzA7GXCgOT++6ovaezWAjpCjf+Sjc3ZtU+qUEi} + MAX_PLUGIN_PACKAGE_SIZE: ${PLUGIN_MAX_PACKAGE_SIZE:-52428800} + PPROF_ENABLED: ${PLUGIN_PPROF_ENABLED:-false} + DIFY_INNER_API_URL: ${PLUGIN_DIFY_INNER_API_URL:-http://api:5001} + DIFY_INNER_API_KEY: ${PLUGIN_DIFY_INNER_API_KEY:-QaHbTe77CtuXmsfyhR7+vRjI/+XbV1AaFy691iy+kGDv2Jvy0/eAh8Y1} + PLUGIN_REMOTE_INSTALLING_HOST: ${PLUGIN_DEBUGGING_HOST:-0.0.0.0} + PLUGIN_REMOTE_INSTALLING_PORT: ${PLUGIN_DEBUGGING_PORT:-5003} + PLUGIN_WORKING_PATH: ${PLUGIN_WORKING_PATH:-/app/storage/cwd} + FORCE_VERIFYING_SIGNATURE: ${FORCE_VERIFYING_SIGNATURE:-true} + PYTHON_ENV_INIT_TIMEOUT: ${PLUGIN_PYTHON_ENV_INIT_TIMEOUT:-120} + PLUGIN_MAX_EXECUTION_TIMEOUT: ${PLUGIN_MAX_EXECUTION_TIMEOUT:-600} + PIP_MIRROR_URL: ${PIP_MIRROR_URL:-} + PLUGIN_STORAGE_TYPE: ${PLUGIN_STORAGE_TYPE:-local} + PLUGIN_STORAGE_LOCAL_ROOT: ${PLUGIN_STORAGE_LOCAL_ROOT:-/app/storage} + PLUGIN_INSTALLED_PATH: ${PLUGIN_INSTALLED_PATH:-plugin} + PLUGIN_PACKAGE_CACHE_PATH: ${PLUGIN_PACKAGE_CACHE_PATH:-plugin_packages} + PLUGIN_MEDIA_CACHE_PATH: ${PLUGIN_MEDIA_CACHE_PATH:-assets} + PLUGIN_STORAGE_OSS_BUCKET: ${PLUGIN_STORAGE_OSS_BUCKET:-} + S3_USE_AWS_MANAGED_IAM: ${PLUGIN_S3_USE_AWS_MANAGED_IAM:-false} + S3_ENDPOINT: ${PLUGIN_S3_ENDPOINT:-} + S3_USE_PATH_STYLE: ${PLUGIN_S3_USE_PATH_STYLE:-false} + AWS_ACCESS_KEY: ${PLUGIN_AWS_ACCESS_KEY:-} + AWS_SECRET_KEY: ${PLUGIN_AWS_SECRET_KEY:-} + AWS_REGION: ${PLUGIN_AWS_REGION:-} + AZURE_BLOB_STORAGE_CONNECTION_STRING: ${PLUGIN_AZURE_BLOB_STORAGE_CONNECTION_STRING:-} + AZURE_BLOB_STORAGE_CONTAINER_NAME: ${PLUGIN_AZURE_BLOB_STORAGE_CONTAINER_NAME:-} + TENCENT_COS_SECRET_KEY: ${PLUGIN_TENCENT_COS_SECRET_KEY:-} + TENCENT_COS_SECRET_ID: ${PLUGIN_TENCENT_COS_SECRET_ID:-} + TENCENT_COS_REGION: ${PLUGIN_TENCENT_COS_REGION:-} + ports: + - "${EXPOSE_PLUGIN_DEBUGGING_PORT:-5003}:${PLUGIN_DEBUGGING_PORT:-5003}" + volumes: + - ./volumes/plugin_daemon:/app/storage + depends_on: + db: + condition: service_healthy + + # ssrf_proxy server + # for more information, please refer to + # https://docs.dify.ai/learn-more/faq/install-faq#id-18.-why-is-ssrf_proxy-needed + ssrf_proxy: + image: ubuntu/squid:latest + restart: always + volumes: + - ./ssrf_proxy/squid.conf.template:/etc/squid/squid.conf.template + - ./ssrf_proxy/docker-entrypoint.sh:/docker-entrypoint-mount.sh + entrypoint: [ 'sh', '-c', "cp /docker-entrypoint-mount.sh /docker-entrypoint.sh && sed -i 's/\r$$//' /docker-entrypoint.sh && chmod +x /docker-entrypoint.sh && /docker-entrypoint.sh" ] + environment: + # pls clearly modify the squid env vars to fit your network environment. + HTTP_PORT: ${SSRF_HTTP_PORT:-3128} + COREDUMP_DIR: ${SSRF_COREDUMP_DIR:-/var/spool/squid} + REVERSE_PROXY_PORT: ${SSRF_REVERSE_PROXY_PORT:-8194} + SANDBOX_HOST: ${SSRF_SANDBOX_HOST:-sandbox} + SANDBOX_PORT: ${SANDBOX_PORT:-8194} + networks: + - ssrf_proxy_network + - default + + # Certbot service + # use `docker-compose --profile certbot up` to start the certbot service. + certbot: + image: certbot/certbot + profiles: + - certbot + volumes: + - ./volumes/certbot/conf:/etc/letsencrypt + - ./volumes/certbot/www:/var/www/html + - ./volumes/certbot/logs:/var/log/letsencrypt + - ./volumes/certbot/conf/live:/etc/letsencrypt/live + - ./certbot/update-cert.template.txt:/update-cert.template.txt + - ./certbot/docker-entrypoint.sh:/docker-entrypoint.sh + environment: + - CERTBOT_EMAIL=${CERTBOT_EMAIL} + - CERTBOT_DOMAIN=${CERTBOT_DOMAIN} + - CERTBOT_OPTIONS=${CERTBOT_OPTIONS:-} + entrypoint: [ '/docker-entrypoint.sh' ] + command: [ 'tail', '-f', '/dev/null' ] + + # The nginx reverse proxy. + # used for reverse proxying the API service and Web service. + nginx: + image: nginx:latest + restart: always + volumes: + - ./nginx/nginx.conf.template:/etc/nginx/nginx.conf.template + - ./nginx/proxy.conf.template:/etc/nginx/proxy.conf.template + - ./nginx/https.conf.template:/etc/nginx/https.conf.template + - ./nginx/conf.d:/etc/nginx/conf.d + - ./nginx/docker-entrypoint.sh:/docker-entrypoint-mount.sh + - ./nginx/ssl:/etc/ssl # cert dir (legacy) + - ./volumes/certbot/conf/live:/etc/letsencrypt/live # cert dir (with certbot container) + - ./volumes/certbot/conf:/etc/letsencrypt + - ./volumes/certbot/www:/var/www/html + entrypoint: [ 'sh', '-c', "cp /docker-entrypoint-mount.sh /docker-entrypoint.sh && sed -i 's/\r$$//' /docker-entrypoint.sh && chmod +x /docker-entrypoint.sh && /docker-entrypoint.sh" ] + environment: + NGINX_SERVER_NAME: ${NGINX_SERVER_NAME:-_} + NGINX_HTTPS_ENABLED: ${NGINX_HTTPS_ENABLED:-false} + NGINX_SSL_PORT: ${NGINX_SSL_PORT:-443} + NGINX_PORT: ${NGINX_PORT:-80} + # You're required to add your own SSL certificates/keys to the `./nginx/ssl` directory + # and modify the env vars below in .env if HTTPS_ENABLED is true. + NGINX_SSL_CERT_FILENAME: ${NGINX_SSL_CERT_FILENAME:-dify.crt} + NGINX_SSL_CERT_KEY_FILENAME: ${NGINX_SSL_CERT_KEY_FILENAME:-dify.key} + NGINX_SSL_PROTOCOLS: ${NGINX_SSL_PROTOCOLS:-TLSv1.1 TLSv1.2 TLSv1.3} + NGINX_WORKER_PROCESSES: ${NGINX_WORKER_PROCESSES:-auto} + NGINX_CLIENT_MAX_BODY_SIZE: ${NGINX_CLIENT_MAX_BODY_SIZE:-15M} + NGINX_KEEPALIVE_TIMEOUT: ${NGINX_KEEPALIVE_TIMEOUT:-65} + NGINX_PROXY_READ_TIMEOUT: ${NGINX_PROXY_READ_TIMEOUT:-3600s} + NGINX_PROXY_SEND_TIMEOUT: ${NGINX_PROXY_SEND_TIMEOUT:-3600s} + NGINX_ENABLE_CERTBOT_CHALLENGE: ${NGINX_ENABLE_CERTBOT_CHALLENGE:-false} + CERTBOT_DOMAIN: ${CERTBOT_DOMAIN:-} + depends_on: + - api + - web + ports: + - '${EXPOSE_NGINX_PORT:-80}:${NGINX_PORT:-80}' + - '${EXPOSE_NGINX_SSL_PORT:-443}:${NGINX_SSL_PORT:-443}' + + # The Weaviate vector store. + weaviate: + image: semitechnologies/weaviate:1.19.0 + profiles: + - '' + - weaviate + restart: always + volumes: + # Mount the Weaviate data directory to the con tainer. + - ./volumes/weaviate:/var/lib/weaviate + environment: + # The Weaviate configurations + # You can refer to the [Weaviate](https://weaviate.io/developers/weaviate/config-refs/env-vars) documentation for more information. + PERSISTENCE_DATA_PATH: ${WEAVIATE_PERSISTENCE_DATA_PATH:-/var/lib/weaviate} + QUERY_DEFAULTS_LIMIT: ${WEAVIATE_QUERY_DEFAULTS_LIMIT:-25} + AUTHENTICATION_ANONYMOUS_ACCESS_ENABLED: ${WEAVIATE_AUTHENTICATION_ANONYMOUS_ACCESS_ENABLED:-false} + DEFAULT_VECTORIZER_MODULE: ${WEAVIATE_DEFAULT_VECTORIZER_MODULE:-none} + CLUSTER_HOSTNAME: ${WEAVIATE_CLUSTER_HOSTNAME:-node1} + AUTHENTICATION_APIKEY_ENABLED: ${WEAVIATE_AUTHENTICATION_APIKEY_ENABLED:-true} + AUTHENTICATION_APIKEY_ALLOWED_KEYS: ${WEAVIATE_AUTHENTICATION_APIKEY_ALLOWED_KEYS:-WVF5YThaHlkYwhGUSmCRgsX3tD5ngdN8pkih} + AUTHENTICATION_APIKEY_USERS: ${WEAVIATE_AUTHENTICATION_APIKEY_USERS:-hello@dify.ai} + AUTHORIZATION_ADMINLIST_ENABLED: ${WEAVIATE_AUTHORIZATION_ADMINLIST_ENABLED:-true} + AUTHORIZATION_ADMINLIST_USERS: ${WEAVIATE_AUTHORIZATION_ADMINLIST_USERS:-hello@dify.ai} + + # Qdrant vector store. + # (if used, you need to set VECTOR_STORE to qdrant in the api & worker service.) + qdrant: + image: langgenius/qdrant:v1.7.3 + profiles: + - qdrant + restart: always + volumes: + - ./volumes/qdrant:/qdrant/storage + environment: + QDRANT_API_KEY: ${QDRANT_API_KEY:-difyai123456} + + # The Couchbase vector store. + couchbase-server: + build: ./couchbase-server + profiles: + - couchbase + restart: always + environment: + - CLUSTER_NAME=dify_search + - COUCHBASE_ADMINISTRATOR_USERNAME=${COUCHBASE_USER:-Administrator} + - COUCHBASE_ADMINISTRATOR_PASSWORD=${COUCHBASE_PASSWORD:-password} + - COUCHBASE_BUCKET=${COUCHBASE_BUCKET_NAME:-Embeddings} + - COUCHBASE_BUCKET_RAMSIZE=512 + - COUCHBASE_RAM_SIZE=2048 + - COUCHBASE_EVENTING_RAM_SIZE=512 + - COUCHBASE_INDEX_RAM_SIZE=512 + - COUCHBASE_FTS_RAM_SIZE=1024 + hostname: couchbase-server + container_name: couchbase-server + working_dir: /opt/couchbase + stdin_open: true + tty: true + entrypoint: [ "" ] + command: sh -c "/opt/couchbase/init/init-cbserver.sh" + volumes: + - ./volumes/couchbase/data:/opt/couchbase/var/lib/couchbase/data + healthcheck: + # ensure bucket was created before proceeding + test: [ "CMD-SHELL", "curl -s -f -u Administrator:password http://localhost:8091/pools/default/buckets | grep -q '\\[{' || exit 1" ] + interval: 10s + retries: 10 + start_period: 30s + timeout: 10s + + # The pgvector vector database. + pgvector: + image: pgvector/pgvector:pg16 + profiles: + - pgvector + restart: always + environment: + PGUSER: ${PGVECTOR_PGUSER:-postgres} + # The password for the default postgres user. + POSTGRES_PASSWORD: ${PGVECTOR_POSTGRES_PASSWORD:-difyai123456} + # The name of the default postgres database. + POSTGRES_DB: ${PGVECTOR_POSTGRES_DB:-dify} + # postgres data directory + PGDATA: ${PGVECTOR_PGDATA:-/var/lib/postgresql/data/pgdata} + # pg_bigm module for full text search + PG_BIGM: ${PGVECTOR_PG_BIGM:-false} + PG_BIGM_VERSION: ${PGVECTOR_PG_BIGM_VERSION:-1.2-20240606} + volumes: + - ./volumes/pgvector/data:/var/lib/postgresql/data + - ./pgvector/docker-entrypoint.sh:/docker-entrypoint.sh + entrypoint: [ '/docker-entrypoint.sh' ] + healthcheck: + test: [ 'CMD', 'pg_isready' ] + interval: 1s + timeout: 3s + retries: 30 + + # pgvecto-rs vector store + pgvecto-rs: + image: tensorchord/pgvecto-rs:pg16-v0.3.0 + profiles: + - pgvecto-rs + restart: always + environment: + PGUSER: ${PGVECTOR_PGUSER:-postgres} + # The password for the default postgres user. + POSTGRES_PASSWORD: ${PGVECTOR_POSTGRES_PASSWORD:-difyai123456} + # The name of the default postgres database. + POSTGRES_DB: ${PGVECTOR_POSTGRES_DB:-dify} + # postgres data directory + PGDATA: ${PGVECTOR_PGDATA:-/var/lib/postgresql/data/pgdata} + volumes: + - ./volumes/pgvecto_rs/data:/var/lib/postgresql/data + healthcheck: + test: [ 'CMD', 'pg_isready' ] + interval: 1s + timeout: 3s + retries: 30 + + # Chroma vector database + chroma: + image: ghcr.io/chroma-core/chroma:0.5.20 + profiles: + - chroma + restart: always + volumes: + - ./volumes/chroma:/chroma/chroma + environment: + CHROMA_SERVER_AUTHN_CREDENTIALS: ${CHROMA_SERVER_AUTHN_CREDENTIALS:-difyai123456} + CHROMA_SERVER_AUTHN_PROVIDER: ${CHROMA_SERVER_AUTHN_PROVIDER:-chromadb.auth.token_authn.TokenAuthenticationServerProvider} + IS_PERSISTENT: ${CHROMA_IS_PERSISTENT:-TRUE} + + # OceanBase vector database + oceanbase: + image: oceanbase/oceanbase-ce:4.3.5.1-101000042025031818 + container_name: oceanbase + profiles: + - oceanbase + restart: always + volumes: + - ./volumes/oceanbase/data:/root/ob + - ./volumes/oceanbase/conf:/root/.obd/cluster + - ./volumes/oceanbase/init.d:/root/boot/init.d + environment: + OB_MEMORY_LIMIT: ${OCEANBASE_MEMORY_LIMIT:-6G} + OB_SYS_PASSWORD: ${OCEANBASE_VECTOR_PASSWORD:-difyai123456} + OB_TENANT_PASSWORD: ${OCEANBASE_VECTOR_PASSWORD:-difyai123456} + OB_CLUSTER_NAME: ${OCEANBASE_CLUSTER_NAME:-difyai} + MODE: MINI + ports: + - "${OCEANBASE_VECTOR_PORT:-2881}:2881" + + # Oracle vector database + oracle: + image: container-registry.oracle.com/database/free:latest + profiles: + - oracle + restart: always + volumes: + - source: oradata + type: volume + target: /opt/oracle/oradata + - ./startupscripts:/opt/oracle/scripts/startup + environment: + ORACLE_PWD: ${ORACLE_PWD:-Dify123456} + ORACLE_CHARACTERSET: ${ORACLE_CHARACTERSET:-AL32UTF8} + + # Milvus vector database services + etcd: + container_name: milvus-etcd + image: quay.io/coreos/etcd:v3.5.5 + profiles: + - milvus + environment: + ETCD_AUTO_COMPACTION_MODE: ${ETCD_AUTO_COMPACTION_MODE:-revision} + ETCD_AUTO_COMPACTION_RETENTION: ${ETCD_AUTO_COMPACTION_RETENTION:-1000} + ETCD_QUOTA_BACKEND_BYTES: ${ETCD_QUOTA_BACKEND_BYTES:-4294967296} + ETCD_SNAPSHOT_COUNT: ${ETCD_SNAPSHOT_COUNT:-50000} + volumes: + - ./volumes/milvus/etcd:/etcd + command: etcd -advertise-client-urls=http://127.0.0.1:2379 -listen-client-urls http://0.0.0.0:2379 --data-dir /etcd + healthcheck: + test: [ 'CMD', 'etcdctl', 'endpoint', 'health' ] + interval: 30s + timeout: 20s + retries: 3 + networks: + - milvus + + minio: + container_name: milvus-minio + image: minio/minio:RELEASE.2023-03-20T20-16-18Z + profiles: + - milvus + environment: + MINIO_ACCESS_KEY: ${MINIO_ACCESS_KEY:-minioadmin} + MINIO_SECRET_KEY: ${MINIO_SECRET_KEY:-minioadmin} + volumes: + - ./volumes/milvus/minio:/minio_data + command: minio server /minio_data --console-address ":9001" + healthcheck: + test: [ 'CMD', 'curl', '-f', 'http://localhost:9000/minio/health/live' ] + interval: 30s + timeout: 20s + retries: 3 + networks: + - milvus + + milvus-standalone: + container_name: milvus-standalone + image: milvusdb/milvus:v2.5.0-beta + profiles: + - milvus + command: [ 'milvus', 'run', 'standalone' ] + environment: + ETCD_ENDPOINTS: ${ETCD_ENDPOINTS:-etcd:2379} + MINIO_ADDRESS: ${MINIO_ADDRESS:-minio:9000} + common.security.authorizationEnabled: ${MILVUS_AUTHORIZATION_ENABLED:-true} + volumes: + - ./volumes/milvus/milvus:/var/lib/milvus + healthcheck: + test: [ 'CMD', 'curl', '-f', 'http://localhost:9091/healthz' ] + interval: 30s + start_period: 90s + timeout: 20s + retries: 3 + depends_on: + - etcd + - minio + ports: + - 19530:19530 + - 9091:9091 + networks: + - milvus + + # Opensearch vector database + opensearch: + container_name: opensearch + image: opensearchproject/opensearch:latest + profiles: + - opensearch + environment: + discovery.type: ${OPENSEARCH_DISCOVERY_TYPE:-single-node} + bootstrap.memory_lock: ${OPENSEARCH_BOOTSTRAP_MEMORY_LOCK:-true} + OPENSEARCH_JAVA_OPTS: -Xms${OPENSEARCH_JAVA_OPTS_MIN:-512m} -Xmx${OPENSEARCH_JAVA_OPTS_MAX:-1024m} + OPENSEARCH_INITIAL_ADMIN_PASSWORD: ${OPENSEARCH_INITIAL_ADMIN_PASSWORD:-Qazwsxedc!@#123} + ulimits: + memlock: + soft: ${OPENSEARCH_MEMLOCK_SOFT:--1} + hard: ${OPENSEARCH_MEMLOCK_HARD:--1} + nofile: + soft: ${OPENSEARCH_NOFILE_SOFT:-65536} + hard: ${OPENSEARCH_NOFILE_HARD:-65536} + volumes: + - ./volumes/opensearch/data:/usr/share/opensearch/data + networks: + - opensearch-net + + opensearch-dashboards: + container_name: opensearch-dashboards + image: opensearchproject/opensearch-dashboards:latest + profiles: + - opensearch + environment: + OPENSEARCH_HOSTS: '["https://opensearch:9200"]' + volumes: + - ./volumes/opensearch/opensearch_dashboards.yml:/usr/share/opensearch-dashboards/config/opensearch_dashboards.yml + networks: + - opensearch-net + depends_on: + - opensearch + + # opengauss vector database. + opengauss: + image: opengauss/opengauss:7.0.0-RC1 + profiles: + - opengauss + privileged: true + restart: always + environment: + GS_USERNAME: ${OPENGAUSS_USER:-postgres} + GS_PASSWORD: ${OPENGAUSS_PASSWORD:-Dify@123} + GS_PORT: ${OPENGAUSS_PORT:-6600} + GS_DB: ${OPENGAUSS_DATABASE:-dify} + volumes: + - ./volumes/opengauss/data:/var/lib/opengauss/data + healthcheck: + test: ["CMD-SHELL", "netstat -lntp | grep tcp6 > /dev/null 2>&1"] + interval: 10s + timeout: 10s + retries: 10 + ports: + - ${OPENGAUSS_PORT:-6600}:${OPENGAUSS_PORT:-6600} + + # MyScale vector database + myscale: + container_name: myscale + image: myscale/myscaledb:1.6.4 + profiles: + - myscale + restart: always + tty: true + volumes: + - ./volumes/myscale/data:/var/lib/clickhouse + - ./volumes/myscale/log:/var/log/clickhouse-server + - ./volumes/myscale/config/users.d/custom_users_config.xml:/etc/clickhouse-server/users.d/custom_users_config.xml + ports: + - ${MYSCALE_PORT:-8123}:${MYSCALE_PORT:-8123} + + # https://www.elastic.co/guide/en/elasticsearch/reference/current/settings.html + # https://www.elastic.co/guide/en/elasticsearch/reference/current/docker.html#docker-prod-prerequisites + elasticsearch: + image: docker.elastic.co/elasticsearch/elasticsearch:8.14.3 + container_name: elasticsearch + profiles: + - elasticsearch + - elasticsearch-ja + restart: always + volumes: + - ./elasticsearch/docker-entrypoint.sh:/docker-entrypoint-mount.sh + - dify_es01_data:/usr/share/elasticsearch/data + environment: + ELASTIC_PASSWORD: ${ELASTICSEARCH_PASSWORD:-elastic} + VECTOR_STORE: ${VECTOR_STORE:-} + cluster.name: dify-es-cluster + node.name: dify-es0 + discovery.type: single-node + xpack.license.self_generated.type: basic + xpack.security.enabled: 'true' + xpack.security.enrollment.enabled: 'false' + xpack.security.http.ssl.enabled: 'false' + ports: + - ${ELASTICSEARCH_PORT:-9200}:9200 + deploy: + resources: + limits: + memory: 2g + entrypoint: [ 'sh', '-c', "sh /docker-entrypoint-mount.sh" ] + healthcheck: + test: [ 'CMD', 'curl', '-s', 'http://localhost:9200/_cluster/health?pretty' ] + interval: 30s + timeout: 10s + retries: 50 + + # https://www.elastic.co/guide/en/kibana/current/docker.html + # https://www.elastic.co/guide/en/kibana/current/settings.html + kibana: + image: docker.elastic.co/kibana/kibana:8.14.3 + container_name: kibana + profiles: + - elasticsearch + depends_on: + - elasticsearch + restart: always + environment: + XPACK_ENCRYPTEDSAVEDOBJECTS_ENCRYPTIONKEY: d1a66dfd-c4d3-4a0a-8290-2abcb83ab3aa + NO_PROXY: localhost,127.0.0.1,elasticsearch,kibana + XPACK_SECURITY_ENABLED: 'true' + XPACK_SECURITY_ENROLLMENT_ENABLED: 'false' + XPACK_SECURITY_HTTP_SSL_ENABLED: 'false' + XPACK_FLEET_ISAIRGAPPED: 'true' + I18N_LOCALE: zh-CN + SERVER_PORT: '5601' + ELASTICSEARCH_HOSTS: http://elasticsearch:9200 + ports: + - ${KIBANA_PORT:-5601}:5601 + healthcheck: + test: [ 'CMD-SHELL', 'curl -s http://localhost:5601 >/dev/null || exit 1' ] + interval: 30s + timeout: 10s + retries: 3 + + # unstructured . + # (if used, you need to set ETL_TYPE to Unstructured in the api & worker service.) + unstructured: + image: downloads.unstructured.io/unstructured-io/unstructured-api:latest + profiles: + - unstructured + restart: always + volumes: + - ./volumes/unstructured:/app/data + +networks: + # create a network between sandbox, api and ssrf_proxy, and can not access outside. + ssrf_proxy_network: + driver: bridge + internal: true + milvus: + driver: bridge + opensearch-net: + driver: bridge + internal: true + +volumes: + oradata: + dify_es01_data: diff --git a/xin-dify/data/elasticsearch/docker-entrypoint.sh b/xin-dify/data/elasticsearch/docker-entrypoint.sh new file mode 100755 index 0000000..6669aec --- /dev/null +++ b/xin-dify/data/elasticsearch/docker-entrypoint.sh @@ -0,0 +1,25 @@ +#!/bin/bash + +set -e + +if [ "${VECTOR_STORE}" = "elasticsearch-ja" ]; then + # Check if the ICU tokenizer plugin is installed + if ! /usr/share/elasticsearch/bin/elasticsearch-plugin list | grep -q analysis-icu; then + printf '%s\n' "Installing the ICU tokenizer plugin" + if ! /usr/share/elasticsearch/bin/elasticsearch-plugin install analysis-icu; then + printf '%s\n' "Failed to install the ICU tokenizer plugin" + exit 1 + fi + fi + # Check if the Japanese language analyzer plugin is installed + if ! /usr/share/elasticsearch/bin/elasticsearch-plugin list | grep -q analysis-kuromoji; then + printf '%s\n' "Installing the Japanese language analyzer plugin" + if ! /usr/share/elasticsearch/bin/elasticsearch-plugin install analysis-kuromoji; then + printf '%s\n' "Failed to install the Japanese language analyzer plugin" + exit 1 + fi + fi +fi + +# Run the original entrypoint script +exec /bin/tini -- /usr/local/bin/docker-entrypoint.sh diff --git a/xin-dify/data/nginx/conf.d/default.conf.template b/xin-dify/data/nginx/conf.d/default.conf.template new file mode 100644 index 0000000..a458412 --- /dev/null +++ b/xin-dify/data/nginx/conf.d/default.conf.template @@ -0,0 +1,48 @@ +# Please do not directly edit this file. Instead, modify the .env variables related to NGINX configuration. + +server { + listen ${NGINX_PORT}; + server_name ${NGINX_SERVER_NAME}; + + location /console/api { + proxy_pass http://api:5001; + include proxy.conf; + } + + location /api { + proxy_pass http://api:5001; + include proxy.conf; + } + + location /v1 { + proxy_pass http://api:5001; + include proxy.conf; + } + + location /files { + proxy_pass http://api:5001; + include proxy.conf; + } + + location /explore { + proxy_pass http://web:3000; + include proxy.conf; + } + + location /e/ { + proxy_pass http://plugin_daemon:5002; + proxy_set_header Dify-Hook-Url $scheme://$host$request_uri; + include proxy.conf; + } + + location / { + proxy_pass http://web:3000; + include proxy.conf; + } + + # placeholder for acme challenge location + ${ACME_CHALLENGE_LOCATION} + + # placeholder for https config defined in https.conf.template + ${HTTPS_CONFIG} +} diff --git a/xin-dify/data/nginx/docker-entrypoint.sh b/xin-dify/data/nginx/docker-entrypoint.sh new file mode 100755 index 0000000..8e1110f --- /dev/null +++ b/xin-dify/data/nginx/docker-entrypoint.sh @@ -0,0 +1,42 @@ +#!/bin/bash + +HTTPS_CONFIG='' + +if [ "${NGINX_HTTPS_ENABLED}" = "true" ]; then + # Check if the certificate and key files for the specified domain exist + if [ -n "${CERTBOT_DOMAIN}" ] && \ + [ -f "/etc/letsencrypt/live/${CERTBOT_DOMAIN}/${NGINX_SSL_CERT_FILENAME}" ] && \ + [ -f "/etc/letsencrypt/live/${CERTBOT_DOMAIN}/${NGINX_SSL_CERT_KEY_FILENAME}" ]; then + SSL_CERTIFICATE_PATH="/etc/letsencrypt/live/${CERTBOT_DOMAIN}/${NGINX_SSL_CERT_FILENAME}" + SSL_CERTIFICATE_KEY_PATH="/etc/letsencrypt/live/${CERTBOT_DOMAIN}/${NGINX_SSL_CERT_KEY_FILENAME}" + else + SSL_CERTIFICATE_PATH="/etc/ssl/${NGINX_SSL_CERT_FILENAME}" + SSL_CERTIFICATE_KEY_PATH="/etc/ssl/${NGINX_SSL_CERT_KEY_FILENAME}" + fi + export SSL_CERTIFICATE_PATH + export SSL_CERTIFICATE_KEY_PATH + + # set the HTTPS_CONFIG environment variable to the content of the https.conf.template + HTTPS_CONFIG=$(envsubst < /etc/nginx/https.conf.template) + export HTTPS_CONFIG + # Substitute the HTTPS_CONFIG in the default.conf.template with content from https.conf.template + envsubst '${HTTPS_CONFIG}' < /etc/nginx/conf.d/default.conf.template > /etc/nginx/conf.d/default.conf +fi +export HTTPS_CONFIG + +if [ "${NGINX_ENABLE_CERTBOT_CHALLENGE}" = "true" ]; then + ACME_CHALLENGE_LOCATION='location /.well-known/acme-challenge/ { root /var/www/html; }' +else + ACME_CHALLENGE_LOCATION='' +fi +export ACME_CHALLENGE_LOCATION + +env_vars=$(printenv | cut -d= -f1 | sed 's/^/$/g' | paste -sd, -) + +envsubst "$env_vars" < /etc/nginx/nginx.conf.template > /etc/nginx/nginx.conf +envsubst "$env_vars" < /etc/nginx/proxy.conf.template > /etc/nginx/proxy.conf + +envsubst "$env_vars" < /etc/nginx/conf.d/default.conf.template > /etc/nginx/conf.d/default.conf + +# Start Nginx using the default entrypoint +exec nginx -g 'daemon off;' \ No newline at end of file diff --git a/xin-dify/data/nginx/https.conf.template b/xin-dify/data/nginx/https.conf.template new file mode 100644 index 0000000..95ea36f --- /dev/null +++ b/xin-dify/data/nginx/https.conf.template @@ -0,0 +1,9 @@ +# Please do not directly edit this file. Instead, modify the .env variables related to NGINX configuration. + +listen ${NGINX_SSL_PORT} ssl; +ssl_certificate ${SSL_CERTIFICATE_PATH}; +ssl_certificate_key ${SSL_CERTIFICATE_KEY_PATH}; +ssl_protocols ${NGINX_SSL_PROTOCOLS}; +ssl_prefer_server_ciphers on; +ssl_session_cache shared:SSL:10m; +ssl_session_timeout 10m; \ No newline at end of file diff --git a/xin-dify/data/nginx/nginx.conf.template b/xin-dify/data/nginx/nginx.conf.template new file mode 100644 index 0000000..32a5716 --- /dev/null +++ b/xin-dify/data/nginx/nginx.conf.template @@ -0,0 +1,34 @@ +# Please do not directly edit this file. Instead, modify the .env variables related to NGINX configuration. + +user nginx; +worker_processes ${NGINX_WORKER_PROCESSES}; + +error_log /var/log/nginx/error.log notice; +pid /var/run/nginx.pid; + + +events { + worker_connections 1024; +} + + +http { + include /etc/nginx/mime.types; + default_type application/octet-stream; + + log_format main '$remote_addr - $remote_user [$time_local] "$request" ' + '$status $body_bytes_sent "$http_referer" ' + '"$http_user_agent" "$http_x_forwarded_for"'; + + access_log /var/log/nginx/access.log main; + + sendfile on; + #tcp_nopush on; + + keepalive_timeout ${NGINX_KEEPALIVE_TIMEOUT}; + + #gzip on; + client_max_body_size ${NGINX_CLIENT_MAX_BODY_SIZE}; + + include /etc/nginx/conf.d/*.conf; +} \ No newline at end of file diff --git a/xin-dify/data/nginx/proxy.conf.template b/xin-dify/data/nginx/proxy.conf.template new file mode 100644 index 0000000..117f806 --- /dev/null +++ b/xin-dify/data/nginx/proxy.conf.template @@ -0,0 +1,11 @@ +# Please do not directly edit this file. Instead, modify the .env variables related to NGINX configuration. + +proxy_set_header Host $host; +proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; +proxy_set_header X-Forwarded-Proto $scheme; +proxy_set_header X-Forwarded-Port $server_port; +proxy_http_version 1.1; +proxy_set_header Connection ""; +proxy_buffering off; +proxy_read_timeout ${NGINX_PROXY_READ_TIMEOUT}; +proxy_send_timeout ${NGINX_PROXY_SEND_TIMEOUT}; diff --git a/xin-dify/data/nginx/ssl/.gitkeep b/xin-dify/data/nginx/ssl/.gitkeep new file mode 100644 index 0000000..e69de29 diff --git a/xin-dify/data/pgvector/docker-entrypoint.sh b/xin-dify/data/pgvector/docker-entrypoint.sh new file mode 100755 index 0000000..262eacf --- /dev/null +++ b/xin-dify/data/pgvector/docker-entrypoint.sh @@ -0,0 +1,24 @@ +#!/bin/bash + +PG_MAJOR=16 + +if [ "${PG_BIGM}" = "true" ]; then + # install pg_bigm + apt-get update + apt-get install -y curl make gcc postgresql-server-dev-${PG_MAJOR} + + curl -LO https://github.com/pgbigm/pg_bigm/archive/refs/tags/v${PG_BIGM_VERSION}.tar.gz + tar xf v${PG_BIGM_VERSION}.tar.gz + cd pg_bigm-${PG_BIGM_VERSION} || exit 1 + make USE_PGXS=1 PG_CONFIG=/usr/bin/pg_config + make USE_PGXS=1 PG_CONFIG=/usr/bin/pg_config install + + cd - || exit 1 + rm -rf v${PG_BIGM_VERSION}.tar.gz pg_bigm-${PG_BIGM_VERSION} + + # enable pg_bigm + sed -i -e 's/^#\s*shared_preload_libraries.*/shared_preload_libraries = '\''pg_bigm'\''/' /var/lib/postgresql/data/pgdata/postgresql.conf +fi + +# Run the original entrypoint script +exec /usr/local/bin/docker-entrypoint.sh postgres diff --git a/xin-dify/data/ssrf_proxy/docker-entrypoint.sh b/xin-dify/data/ssrf_proxy/docker-entrypoint.sh new file mode 100755 index 0000000..613897b --- /dev/null +++ b/xin-dify/data/ssrf_proxy/docker-entrypoint.sh @@ -0,0 +1,42 @@ +#!/bin/bash + +# Modified based on Squid OCI image entrypoint + +# This entrypoint aims to forward the squid logs to stdout to assist users of +# common container related tooling (e.g., kubernetes, docker-compose, etc) to +# access the service logs. + +# Moreover, it invokes the squid binary, leaving all the desired parameters to +# be provided by the "command" passed to the spawned container. If no command +# is provided by the user, the default behavior (as per the CMD statement in +# the Dockerfile) will be to use Ubuntu's default configuration [1] and run +# squid with the "-NYC" options to mimic the behavior of the Ubuntu provided +# systemd unit. + +# [1] The default configuration is changed in the Dockerfile to allow local +# network connections. See the Dockerfile for further information. + +echo "[ENTRYPOINT] re-create snakeoil self-signed certificate removed in the build process" +if [ ! -f /etc/ssl/private/ssl-cert-snakeoil.key ]; then + /usr/sbin/make-ssl-cert generate-default-snakeoil --force-overwrite > /dev/null 2>&1 +fi + +tail -F /var/log/squid/access.log 2>/dev/null & +tail -F /var/log/squid/error.log 2>/dev/null & +tail -F /var/log/squid/store.log 2>/dev/null & +tail -F /var/log/squid/cache.log 2>/dev/null & + +# Replace environment variables in the template and output to the squid.conf +echo "[ENTRYPOINT] replacing environment variables in the template" +awk '{ + while(match($0, /\${[A-Za-z_][A-Za-z_0-9]*}/)) { + var = substr($0, RSTART+2, RLENGTH-3) + val = ENVIRON[var] + $0 = substr($0, 1, RSTART-1) val substr($0, RSTART+RLENGTH) + } + print +}' /etc/squid/squid.conf.template > /etc/squid/squid.conf + +/usr/sbin/squid -Nz +echo "[ENTRYPOINT] starting squid" +/usr/sbin/squid -f /etc/squid/squid.conf -NYC 1 diff --git a/xin-dify/data/ssrf_proxy/squid.conf.template b/xin-dify/data/ssrf_proxy/squid.conf.template new file mode 100644 index 0000000..c74c1fb --- /dev/null +++ b/xin-dify/data/ssrf_proxy/squid.conf.template @@ -0,0 +1,56 @@ +acl localnet src 0.0.0.1-0.255.255.255 # RFC 1122 "this" network (LAN) +acl localnet src 10.0.0.0/8 # RFC 1918 local private network (LAN) +acl localnet src 100.64.0.0/10 # RFC 6598 shared address space (CGN) +acl localnet src 169.254.0.0/16 # RFC 3927 link-local (directly plugged) machines +acl localnet src 172.16.0.0/12 # RFC 1918 local private network (LAN) +acl localnet src 192.168.0.0/16 # RFC 1918 local private network (LAN) +acl localnet src fc00::/7 # RFC 4193 local private network range +acl localnet src fe80::/10 # RFC 4291 link-local (directly plugged) machines +acl SSL_ports port 443 +# acl SSL_ports port 1025-65535 # Enable the configuration to resolve this issue: https://github.com/langgenius/dify/issues/12792 +acl Safe_ports port 80 # http +acl Safe_ports port 21 # ftp +acl Safe_ports port 443 # https +acl Safe_ports port 70 # gopher +acl Safe_ports port 210 # wais +acl Safe_ports port 1025-65535 # unregistered ports +acl Safe_ports port 280 # http-mgmt +acl Safe_ports port 488 # gss-http +acl Safe_ports port 591 # filemaker +acl Safe_ports port 777 # multiling http +acl CONNECT method CONNECT +acl allowed_domains dstdomain .marketplace.dify.ai +http_access allow allowed_domains +http_access deny !Safe_ports +http_access deny CONNECT !SSL_ports +http_access allow localhost manager +http_access deny manager +http_access allow localhost +include /etc/squid/conf.d/*.conf +http_access deny all + +################################## Proxy Server ################################ +http_port ${HTTP_PORT} +coredump_dir ${COREDUMP_DIR} +refresh_pattern ^ftp: 1440 20% 10080 +refresh_pattern ^gopher: 1440 0% 1440 +refresh_pattern -i (/cgi-bin/|\?) 0 0% 0 +refresh_pattern \/(Packages|Sources)(|\.bz2|\.gz|\.xz)$ 0 0% 0 refresh-ims +refresh_pattern \/Release(|\.gpg)$ 0 0% 0 refresh-ims +refresh_pattern \/InRelease$ 0 0% 0 refresh-ims +refresh_pattern \/(Translation-.*)(|\.bz2|\.gz|\.xz)$ 0 0% 0 refresh-ims +refresh_pattern . 0 20% 4320 + + +# cache_dir ufs /var/spool/squid 100 16 256 +# upstream proxy, set to your own upstream proxy IP to avoid SSRF attacks +# cache_peer 172.1.1.1 parent 3128 0 no-query no-digest no-netdb-exchange default + +################################## Reverse Proxy To Sandbox ################################ +http_port ${REVERSE_PROXY_PORT} accel vhost +cache_peer ${SANDBOX_HOST} parent ${SANDBOX_PORT} 0 no-query originserver +acl src_all src all +http_access allow src_all + +# Unless the option's size is increased, an error will occur when uploading more than two files. +client_request_buffer_max_size 100 MB \ No newline at end of file diff --git a/xin-dify/data/startupscripts/init.sh b/xin-dify/data/startupscripts/init.sh new file mode 100755 index 0000000..c6e6e19 --- /dev/null +++ b/xin-dify/data/startupscripts/init.sh @@ -0,0 +1,13 @@ +#!/usr/bin/env bash + +DB_INITIALIZED="/opt/oracle/oradata/dbinit" +#[ -f ${DB_INITIALIZED} ] && exit +#touch ${DB_INITIALIZED} +if [ -f ${DB_INITIALIZED} ]; then + echo 'File exists. Standards for have been Init' + exit +else + echo 'File does not exist. Standards for first time Start up this DB' + "$ORACLE_HOME"/bin/sqlplus -s "/ as sysdba" @"/opt/oracle/scripts/startup/init_user.script"; + touch ${DB_INITIALIZED} +fi diff --git a/xin-dify/data/startupscripts/init_user.script b/xin-dify/data/startupscripts/init_user.script new file mode 100755 index 0000000..0c5bff1 --- /dev/null +++ b/xin-dify/data/startupscripts/init_user.script @@ -0,0 +1,10 @@ +show pdbs; +ALTER SYSTEM SET PROCESSES=500 SCOPE=SPFILE; +alter session set container= freepdb1; +create user dify identified by dify DEFAULT TABLESPACE users quota unlimited on users; +grant DB_DEVELOPER_ROLE to dify; + +BEGIN +CTX_DDL.CREATE_PREFERENCE('dify.world_lexer','WORLD_LEXER'); +END; +/ diff --git a/xin-dify/data/tidb/config/pd.toml b/xin-dify/data/tidb/config/pd.toml new file mode 100644 index 0000000..042b251 --- /dev/null +++ b/xin-dify/data/tidb/config/pd.toml @@ -0,0 +1,4 @@ +# PD Configuration File reference: +# https://docs.pingcap.com/tidb/stable/pd-configuration-file#pd-configuration-file +[replication] +max-replicas = 1 \ No newline at end of file diff --git a/xin-dify/data/tidb/config/tiflash-learner.toml b/xin-dify/data/tidb/config/tiflash-learner.toml new file mode 100644 index 0000000..5098829 --- /dev/null +++ b/xin-dify/data/tidb/config/tiflash-learner.toml @@ -0,0 +1,13 @@ +# TiFlash tiflash-learner.toml Configuration File reference: +# https://docs.pingcap.com/tidb/stable/tiflash-configuration#configure-the-tiflash-learnertoml-file + +log-file = "/logs/tiflash_tikv.log" + +[server] +engine-addr = "tiflash:4030" +addr = "0.0.0.0:20280" +advertise-addr = "tiflash:20280" +status-addr = "tiflash:20292" + +[storage] +data-dir = "/data/flash" diff --git a/xin-dify/data/tidb/config/tiflash.toml b/xin-dify/data/tidb/config/tiflash.toml new file mode 100644 index 0000000..30ac13e --- /dev/null +++ b/xin-dify/data/tidb/config/tiflash.toml @@ -0,0 +1,19 @@ +# TiFlash tiflash.toml Configuration File reference: +# https://docs.pingcap.com/tidb/stable/tiflash-configuration#configure-the-tiflashtoml-file + +listen_host = "0.0.0.0" +path = "/data" + +[flash] +tidb_status_addr = "tidb:10080" +service_addr = "tiflash:4030" + +[flash.proxy] +config = "/tiflash-learner.toml" + +[logger] +errorlog = "/logs/tiflash_error.log" +log = "/logs/tiflash.log" + +[raft] +pd_addr = "pd0:2379" diff --git a/xin-dify/data/tidb/docker-compose.yaml b/xin-dify/data/tidb/docker-compose.yaml new file mode 100644 index 0000000..fa15770 --- /dev/null +++ b/xin-dify/data/tidb/docker-compose.yaml @@ -0,0 +1,62 @@ +services: + pd0: + image: pingcap/pd:v8.5.1 + # ports: + # - "2379" + volumes: + - ./config/pd.toml:/pd.toml:ro + - ./volumes/data:/data + - ./volumes/logs:/logs + command: + - --name=pd0 + - --client-urls=http://0.0.0.0:2379 + - --peer-urls=http://0.0.0.0:2380 + - --advertise-client-urls=http://pd0:2379 + - --advertise-peer-urls=http://pd0:2380 + - --initial-cluster=pd0=http://pd0:2380 + - --data-dir=/data/pd + - --config=/pd.toml + - --log-file=/logs/pd.log + restart: on-failure + tikv: + image: pingcap/tikv:v8.5.1 + volumes: + - ./volumes/data:/data + - ./volumes/logs:/logs + command: + - --addr=0.0.0.0:20160 + - --advertise-addr=tikv:20160 + - --status-addr=tikv:20180 + - --data-dir=/data/tikv + - --pd=pd0:2379 + - --log-file=/logs/tikv.log + depends_on: + - "pd0" + restart: on-failure + tidb: + image: pingcap/tidb:v8.5.1 + # ports: + # - "4000:4000" + volumes: + - ./volumes/logs:/logs + command: + - --advertise-address=tidb + - --store=tikv + - --path=pd0:2379 + - --log-file=/logs/tidb.log + depends_on: + - "tikv" + restart: on-failure + tiflash: + image: pingcap/tiflash:v8.5.1 + volumes: + - ./config/tiflash.toml:/tiflash.toml:ro + - ./config/tiflash-learner.toml:/tiflash-learner.toml:ro + - ./volumes/data:/data + - ./volumes/logs:/logs + command: + - --config=/tiflash.toml + depends_on: + - "tikv" + - "tidb" + restart: on-failure diff --git a/xin-dify/data/volumes/myscale/config/users.d/custom_users_config.xml b/xin-dify/data/volumes/myscale/config/users.d/custom_users_config.xml new file mode 100644 index 0000000..67f24b6 --- /dev/null +++ b/xin-dify/data/volumes/myscale/config/users.d/custom_users_config.xml @@ -0,0 +1,17 @@ + + + + + + ::1 + 127.0.0.1 + 10.0.0.0/8 + 172.16.0.0/12 + 192.168.0.0/16 + + default + default + 1 + + + \ No newline at end of file diff --git a/xin-dify/data/volumes/oceanbase/init.d/vec_memory.sql b/xin-dify/data/volumes/oceanbase/init.d/vec_memory.sql new file mode 100644 index 0000000..f4c283f --- /dev/null +++ b/xin-dify/data/volumes/oceanbase/init.d/vec_memory.sql @@ -0,0 +1 @@ +ALTER SYSTEM SET ob_vector_memory_limit_percentage = 30; \ No newline at end of file diff --git a/xin-dify/data/volumes/opensearch/opensearch_dashboards.yml b/xin-dify/data/volumes/opensearch/opensearch_dashboards.yml new file mode 100644 index 0000000..f50d63b --- /dev/null +++ b/xin-dify/data/volumes/opensearch/opensearch_dashboards.yml @@ -0,0 +1,222 @@ +--- +# Copyright OpenSearch Contributors +# SPDX-License-Identifier: Apache-2.0 + +# Description: +# Default configuration for OpenSearch Dashboards + +# OpenSearch Dashboards is served by a back end server. This setting specifies the port to use. +# server.port: 5601 + +# Specifies the address to which the OpenSearch Dashboards server will bind. IP addresses and host names are both valid values. +# The default is 'localhost', which usually means remote machines will not be able to connect. +# To allow connections from remote users, set this parameter to a non-loopback address. +# server.host: "localhost" + +# Enables you to specify a path to mount OpenSearch Dashboards at if you are running behind a proxy. +# Use the `server.rewriteBasePath` setting to tell OpenSearch Dashboards if it should remove the basePath +# from requests it receives, and to prevent a deprecation warning at startup. +# This setting cannot end in a slash. +# server.basePath: "" + +# Specifies whether OpenSearch Dashboards should rewrite requests that are prefixed with +# `server.basePath` or require that they are rewritten by your reverse proxy. +# server.rewriteBasePath: false + +# The maximum payload size in bytes for incoming server requests. +# server.maxPayloadBytes: 1048576 + +# The OpenSearch Dashboards server's name. This is used for display purposes. +# server.name: "your-hostname" + +# The URLs of the OpenSearch instances to use for all your queries. +# opensearch.hosts: ["http://localhost:9200"] + +# OpenSearch Dashboards uses an index in OpenSearch to store saved searches, visualizations and +# dashboards. OpenSearch Dashboards creates a new index if the index doesn't already exist. +# opensearchDashboards.index: ".opensearch_dashboards" + +# The default application to load. +# opensearchDashboards.defaultAppId: "home" + +# Setting for an optimized healthcheck that only uses the local OpenSearch node to do Dashboards healthcheck. +# This settings should be used for large clusters or for clusters with ingest heavy nodes. +# It allows Dashboards to only healthcheck using the local OpenSearch node rather than fan out requests across all nodes. +# +# It requires the user to create an OpenSearch node attribute with the same name as the value used in the setting +# This node attribute should assign all nodes of the same cluster an integer value that increments with each new cluster that is spun up +# e.g. in opensearch.yml file you would set the value to a setting using node.attr.cluster_id: +# Should only be enabled if there is a corresponding node attribute created in your OpenSearch config that matches the value here +# opensearch.optimizedHealthcheckId: "cluster_id" + +# If your OpenSearch is protected with basic authentication, these settings provide +# the username and password that the OpenSearch Dashboards server uses to perform maintenance on the OpenSearch Dashboards +# index at startup. Your OpenSearch Dashboards users still need to authenticate with OpenSearch, which +# is proxied through the OpenSearch Dashboards server. +# opensearch.username: "opensearch_dashboards_system" +# opensearch.password: "pass" + +# Enables SSL and paths to the PEM-format SSL certificate and SSL key files, respectively. +# These settings enable SSL for outgoing requests from the OpenSearch Dashboards server to the browser. +# server.ssl.enabled: false +# server.ssl.certificate: /path/to/your/server.crt +# server.ssl.key: /path/to/your/server.key + +# Optional settings that provide the paths to the PEM-format SSL certificate and key files. +# These files are used to verify the identity of OpenSearch Dashboards to OpenSearch and are required when +# xpack.security.http.ssl.client_authentication in OpenSearch is set to required. +# opensearch.ssl.certificate: /path/to/your/client.crt +# opensearch.ssl.key: /path/to/your/client.key + +# Optional setting that enables you to specify a path to the PEM file for the certificate +# authority for your OpenSearch instance. +# opensearch.ssl.certificateAuthorities: [ "/path/to/your/CA.pem" ] + +# To disregard the validity of SSL certificates, change this setting's value to 'none'. +# opensearch.ssl.verificationMode: full + +# Time in milliseconds to wait for OpenSearch to respond to pings. Defaults to the value of +# the opensearch.requestTimeout setting. +# opensearch.pingTimeout: 1500 + +# Time in milliseconds to wait for responses from the back end or OpenSearch. This value +# must be a positive integer. +# opensearch.requestTimeout: 30000 + +# List of OpenSearch Dashboards client-side headers to send to OpenSearch. To send *no* client-side +# headers, set this value to [] (an empty list). +# opensearch.requestHeadersWhitelist: [ authorization ] + +# Header names and values that are sent to OpenSearch. Any custom headers cannot be overwritten +# by client-side headers, regardless of the opensearch.requestHeadersWhitelist configuration. +# opensearch.customHeaders: {} + +# Time in milliseconds for OpenSearch to wait for responses from shards. Set to 0 to disable. +# opensearch.shardTimeout: 30000 + +# Logs queries sent to OpenSearch. Requires logging.verbose set to true. +# opensearch.logQueries: false + +# Specifies the path where OpenSearch Dashboards creates the process ID file. +# pid.file: /var/run/opensearchDashboards.pid + +# Enables you to specify a file where OpenSearch Dashboards stores log output. +# logging.dest: stdout + +# Set the value of this setting to true to suppress all logging output. +# logging.silent: false + +# Set the value of this setting to true to suppress all logging output other than error messages. +# logging.quiet: false + +# Set the value of this setting to true to log all events, including system usage information +# and all requests. +# logging.verbose: false + +# Set the interval in milliseconds to sample system and process performance +# metrics. Minimum is 100ms. Defaults to 5000. +# ops.interval: 5000 + +# Specifies locale to be used for all localizable strings, dates and number formats. +# Supported languages are the following: English - en , by default , Chinese - zh-CN . +# i18n.locale: "en" + +# Set the allowlist to check input graphite Url. Allowlist is the default check list. +# vis_type_timeline.graphiteAllowedUrls: ['https://www.hostedgraphite.com/UID/ACCESS_KEY/graphite'] + +# Set the blocklist to check input graphite Url. Blocklist is an IP list. +# Below is an example for reference +# vis_type_timeline.graphiteBlockedIPs: [ +# //Loopback +# '127.0.0.0/8', +# '::1/128', +# //Link-local Address for IPv6 +# 'fe80::/10', +# //Private IP address for IPv4 +# '10.0.0.0/8', +# '172.16.0.0/12', +# '192.168.0.0/16', +# //Unique local address (ULA) +# 'fc00::/7', +# //Reserved IP address +# '0.0.0.0/8', +# '100.64.0.0/10', +# '192.0.0.0/24', +# '192.0.2.0/24', +# '198.18.0.0/15', +# '192.88.99.0/24', +# '198.51.100.0/24', +# '203.0.113.0/24', +# '224.0.0.0/4', +# '240.0.0.0/4', +# '255.255.255.255/32', +# '::/128', +# '2001:db8::/32', +# 'ff00::/8', +# ] +# vis_type_timeline.graphiteBlockedIPs: [] + +# opensearchDashboards.branding: +# logo: +# defaultUrl: "" +# darkModeUrl: "" +# mark: +# defaultUrl: "" +# darkModeUrl: "" +# loadingLogo: +# defaultUrl: "" +# darkModeUrl: "" +# faviconUrl: "" +# applicationTitle: "" + +# Set the value of this setting to true to capture region blocked warnings and errors +# for your map rendering services. +# map.showRegionBlockedWarning: false% + +# Set the value of this setting to false to suppress search usage telemetry +# for reducing the load of OpenSearch cluster. +# data.search.usageTelemetry.enabled: false + +# 2.4 renames 'wizard.enabled: false' to 'vis_builder.enabled: false' +# Set the value of this setting to false to disable VisBuilder +# functionality in Visualization. +# vis_builder.enabled: false + +# 2.4 New Experimental Feature +# Set the value of this setting to true to enable the experimental multiple data source +# support feature. Use with caution. +# data_source.enabled: false +# Set the value of these settings to customize crypto materials to encryption saved credentials +# in data sources. +# data_source.encryption.wrappingKeyName: 'changeme' +# data_source.encryption.wrappingKeyNamespace: 'changeme' +# data_source.encryption.wrappingKey: [0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0] + +# 2.6 New ML Commons Dashboards Feature +# Set the value of this setting to true to enable the ml commons dashboards +# ml_commons_dashboards.enabled: false + +# 2.12 New experimental Assistant Dashboards Feature +# Set the value of this setting to true to enable the assistant dashboards +# assistant.chat.enabled: false + +# 2.13 New Query Assistant Feature +# Set the value of this setting to false to disable the query assistant +# observability.query_assist.enabled: false + +# 2.14 Enable Ui Metric Collectors in Usage Collector +# Set the value of this setting to true to enable UI Metric collections +# usageCollection.uiMetric.enabled: false + +opensearch.hosts: [https://localhost:9200] +opensearch.ssl.verificationMode: none +opensearch.username: admin +opensearch.password: 'Qazwsxedc!@#123' +opensearch.requestHeadersWhitelist: [authorization, securitytenant] + +opensearch_security.multitenancy.enabled: true +opensearch_security.multitenancy.tenants.preferred: [Private, Global] +opensearch_security.readonly_mode.roles: [kibana_read_only] +# Use this setting if you are running opensearch-dashboards without https +opensearch_security.cookie.secure: false +server.host: '0.0.0.0' diff --git a/xin-dify/data/volumes/sandbox/conf/config.yaml b/xin-dify/data/volumes/sandbox/conf/config.yaml new file mode 100644 index 0000000..8c1a1de --- /dev/null +++ b/xin-dify/data/volumes/sandbox/conf/config.yaml @@ -0,0 +1,14 @@ +app: + port: 8194 + debug: True + key: dify-sandbox +max_workers: 4 +max_requests: 50 +worker_timeout: 5 +python_path: /usr/local/bin/python3 +enable_network: True # please make sure there is no network risk in your environment +allowed_syscalls: # please leave it empty if you have no idea how seccomp works +proxy: + socks5: '' + http: '' + https: '' diff --git a/xin-dify/data/volumes/sandbox/conf/config.yaml.example b/xin-dify/data/volumes/sandbox/conf/config.yaml.example new file mode 100644 index 0000000..f92c19e --- /dev/null +++ b/xin-dify/data/volumes/sandbox/conf/config.yaml.example @@ -0,0 +1,35 @@ +app: + port: 8194 + debug: True + key: dify-sandbox +max_workers: 4 +max_requests: 50 +worker_timeout: 5 +python_path: /usr/local/bin/python3 +python_lib_path: + - /usr/local/lib/python3.10 + - /usr/lib/python3.10 + - /usr/lib/python3 + - /usr/lib/x86_64-linux-gnu + - /etc/ssl/certs/ca-certificates.crt + - /etc/nsswitch.conf + - /etc/hosts + - /etc/resolv.conf + - /run/systemd/resolve/stub-resolv.conf + - /run/resolvconf/resolv.conf + - /etc/localtime + - /usr/share/zoneinfo + - /etc/timezone + # add more paths if needed +python_pip_mirror_url: https://pypi.tuna.tsinghua.edu.cn/simple +nodejs_path: /usr/local/bin/node +enable_network: True +allowed_syscalls: + - 1 + - 2 + - 3 + # add all the syscalls which you require +proxy: + socks5: '' + http: '' + https: '' diff --git a/xin-dify/data/volumes/sandbox/dependencies/python-requirements.txt b/xin-dify/data/volumes/sandbox/dependencies/python-requirements.txt new file mode 100644 index 0000000..e69de29 diff --git a/xin-dify/docker-compose.yaml b/xin-dify/docker-compose.yaml new file mode 100644 index 0000000..f1b317c --- /dev/null +++ b/xin-dify/docker-compose.yaml @@ -0,0 +1,1124 @@ +# ================================================================== +# WARNING: This file is auto-generated by generate_docker_compose +# Do not modify this file directly. Instead, update the .env.example +# or docker-compose-template.yaml and regenerate this file. +# ================================================================== + +x-shared-env: &shared-api-worker-env + CONSOLE_API_URL: ${CONSOLE_API_URL:-} + CONSOLE_WEB_URL: ${CONSOLE_WEB_URL:-} + SERVICE_API_URL: ${SERVICE_API_URL:-} + APP_API_URL: ${APP_API_URL:-} + APP_WEB_URL: ${APP_WEB_URL:-} + FILES_URL: ${FILES_URL:-} + LOG_LEVEL: ${LOG_LEVEL:-INFO} + LOG_FILE: ${LOG_FILE:-/app/logs/server.log} + LOG_FILE_MAX_SIZE: ${LOG_FILE_MAX_SIZE:-20} + LOG_FILE_BACKUP_COUNT: ${LOG_FILE_BACKUP_COUNT:-5} + LOG_DATEFORMAT: ${LOG_DATEFORMAT:-%Y-%m-%d %H:%M:%S} + LOG_TZ: ${LOG_TZ:-UTC} + DEBUG: ${DEBUG:-false} + FLASK_DEBUG: ${FLASK_DEBUG:-false} + SECRET_KEY: ${SECRET_KEY:-sk-9f73s3ljTXVcMT3Blb3ljTqtsKiGHXVcMT3BlbkFJLK7U} + INIT_PASSWORD: ${INIT_PASSWORD:-} + DEPLOY_ENV: ${DEPLOY_ENV:-PRODUCTION} + CHECK_UPDATE_URL: ${CHECK_UPDATE_URL:-https://updates.dify.ai} + OPENAI_API_BASE: ${OPENAI_API_BASE:-https://api.openai.com/v1} + MIGRATION_ENABLED: ${MIGRATION_ENABLED:-true} + FILES_ACCESS_TIMEOUT: ${FILES_ACCESS_TIMEOUT:-300} + ACCESS_TOKEN_EXPIRE_MINUTES: ${ACCESS_TOKEN_EXPIRE_MINUTES:-60} + REFRESH_TOKEN_EXPIRE_DAYS: ${REFRESH_TOKEN_EXPIRE_DAYS:-30} + APP_MAX_ACTIVE_REQUESTS: ${APP_MAX_ACTIVE_REQUESTS:-0} + APP_MAX_EXECUTION_TIME: ${APP_MAX_EXECUTION_TIME:-1200} + DIFY_BIND_ADDRESS: ${DIFY_BIND_ADDRESS:-0.0.0.0} + DIFY_PORT: ${DIFY_PORT:-5001} + SERVER_WORKER_AMOUNT: ${SERVER_WORKER_AMOUNT:-1} + SERVER_WORKER_CLASS: ${SERVER_WORKER_CLASS:-gevent} + SERVER_WORKER_CONNECTIONS: ${SERVER_WORKER_CONNECTIONS:-10} + CELERY_WORKER_CLASS: ${CELERY_WORKER_CLASS:-} + GUNICORN_TIMEOUT: ${GUNICORN_TIMEOUT:-360} + CELERY_WORKER_AMOUNT: ${CELERY_WORKER_AMOUNT:-} + CELERY_AUTO_SCALE: ${CELERY_AUTO_SCALE:-false} + CELERY_MAX_WORKERS: ${CELERY_MAX_WORKERS:-} + CELERY_MIN_WORKERS: ${CELERY_MIN_WORKERS:-} + API_TOOL_DEFAULT_CONNECT_TIMEOUT: ${API_TOOL_DEFAULT_CONNECT_TIMEOUT:-10} + API_TOOL_DEFAULT_READ_TIMEOUT: ${API_TOOL_DEFAULT_READ_TIMEOUT:-60} + DB_USERNAME: ${DB_USERNAME:-postgres} + DB_PASSWORD: ${DB_PASSWORD:-difyai123456} + DB_HOST: ${DB_HOST:-db} + DB_PORT: ${DB_PORT:-5432} + DB_DATABASE: ${DB_DATABASE:-dify} + SQLALCHEMY_POOL_SIZE: ${SQLALCHEMY_POOL_SIZE:-30} + SQLALCHEMY_POOL_RECYCLE: ${SQLALCHEMY_POOL_RECYCLE:-3600} + SQLALCHEMY_ECHO: ${SQLALCHEMY_ECHO:-false} + POSTGRES_MAX_CONNECTIONS: ${POSTGRES_MAX_CONNECTIONS:-100} + POSTGRES_SHARED_BUFFERS: ${POSTGRES_SHARED_BUFFERS:-128MB} + POSTGRES_WORK_MEM: ${POSTGRES_WORK_MEM:-4MB} + POSTGRES_MAINTENANCE_WORK_MEM: ${POSTGRES_MAINTENANCE_WORK_MEM:-64MB} + POSTGRES_EFFECTIVE_CACHE_SIZE: ${POSTGRES_EFFECTIVE_CACHE_SIZE:-4096MB} + REDIS_HOST: ${REDIS_HOST:-redis} + REDIS_PORT: ${REDIS_PORT:-6379} + REDIS_USERNAME: ${REDIS_USERNAME:-} + REDIS_PASSWORD: ${REDIS_PASSWORD:-difyai123456} + REDIS_USE_SSL: ${REDIS_USE_SSL:-false} + REDIS_DB: ${REDIS_DB:-0} + REDIS_USE_SENTINEL: ${REDIS_USE_SENTINEL:-false} + REDIS_SENTINELS: ${REDIS_SENTINELS:-} + REDIS_SENTINEL_SERVICE_NAME: ${REDIS_SENTINEL_SERVICE_NAME:-} + REDIS_SENTINEL_USERNAME: ${REDIS_SENTINEL_USERNAME:-} + REDIS_SENTINEL_PASSWORD: ${REDIS_SENTINEL_PASSWORD:-} + REDIS_SENTINEL_SOCKET_TIMEOUT: ${REDIS_SENTINEL_SOCKET_TIMEOUT:-0.1} + REDIS_USE_CLUSTERS: ${REDIS_USE_CLUSTERS:-false} + REDIS_CLUSTERS: ${REDIS_CLUSTERS:-} + REDIS_CLUSTERS_PASSWORD: ${REDIS_CLUSTERS_PASSWORD:-} + CELERY_BROKER_URL: ${CELERY_BROKER_URL:-redis://:difyai123456@redis:6379/1} + BROKER_USE_SSL: ${BROKER_USE_SSL:-false} + CELERY_USE_SENTINEL: ${CELERY_USE_SENTINEL:-false} + CELERY_SENTINEL_MASTER_NAME: ${CELERY_SENTINEL_MASTER_NAME:-} + CELERY_SENTINEL_SOCKET_TIMEOUT: ${CELERY_SENTINEL_SOCKET_TIMEOUT:-0.1} + WEB_API_CORS_ALLOW_ORIGINS: ${WEB_API_CORS_ALLOW_ORIGINS:-*} + CONSOLE_CORS_ALLOW_ORIGINS: ${CONSOLE_CORS_ALLOW_ORIGINS:-*} + STORAGE_TYPE: ${STORAGE_TYPE:-opendal} + OPENDAL_SCHEME: ${OPENDAL_SCHEME:-fs} + OPENDAL_FS_ROOT: ${OPENDAL_FS_ROOT:-storage} + S3_ENDPOINT: ${S3_ENDPOINT:-} + S3_REGION: ${S3_REGION:-us-east-1} + S3_BUCKET_NAME: ${S3_BUCKET_NAME:-difyai} + S3_ACCESS_KEY: ${S3_ACCESS_KEY:-} + S3_SECRET_KEY: ${S3_SECRET_KEY:-} + S3_USE_AWS_MANAGED_IAM: ${S3_USE_AWS_MANAGED_IAM:-false} + AZURE_BLOB_ACCOUNT_NAME: ${AZURE_BLOB_ACCOUNT_NAME:-difyai} + AZURE_BLOB_ACCOUNT_KEY: ${AZURE_BLOB_ACCOUNT_KEY:-difyai} + AZURE_BLOB_CONTAINER_NAME: ${AZURE_BLOB_CONTAINER_NAME:-difyai-container} + AZURE_BLOB_ACCOUNT_URL: ${AZURE_BLOB_ACCOUNT_URL:-https://.blob.core.windows.net} + GOOGLE_STORAGE_BUCKET_NAME: ${GOOGLE_STORAGE_BUCKET_NAME:-your-bucket-name} + GOOGLE_STORAGE_SERVICE_ACCOUNT_JSON_BASE64: ${GOOGLE_STORAGE_SERVICE_ACCOUNT_JSON_BASE64:-} + ALIYUN_OSS_BUCKET_NAME: ${ALIYUN_OSS_BUCKET_NAME:-your-bucket-name} + ALIYUN_OSS_ACCESS_KEY: ${ALIYUN_OSS_ACCESS_KEY:-your-access-key} + ALIYUN_OSS_SECRET_KEY: ${ALIYUN_OSS_SECRET_KEY:-your-secret-key} + ALIYUN_OSS_ENDPOINT: ${ALIYUN_OSS_ENDPOINT:-https://oss-ap-southeast-1-internal.aliyuncs.com} + ALIYUN_OSS_REGION: ${ALIYUN_OSS_REGION:-ap-southeast-1} + ALIYUN_OSS_AUTH_VERSION: ${ALIYUN_OSS_AUTH_VERSION:-v4} + ALIYUN_OSS_PATH: ${ALIYUN_OSS_PATH:-your-path} + TENCENT_COS_BUCKET_NAME: ${TENCENT_COS_BUCKET_NAME:-your-bucket-name} + TENCENT_COS_SECRET_KEY: ${TENCENT_COS_SECRET_KEY:-your-secret-key} + TENCENT_COS_SECRET_ID: ${TENCENT_COS_SECRET_ID:-your-secret-id} + TENCENT_COS_REGION: ${TENCENT_COS_REGION:-your-region} + TENCENT_COS_SCHEME: ${TENCENT_COS_SCHEME:-your-scheme} + OCI_ENDPOINT: ${OCI_ENDPOINT:-https://your-object-storage-namespace.compat.objectstorage.us-ashburn-1.oraclecloud.com} + OCI_BUCKET_NAME: ${OCI_BUCKET_NAME:-your-bucket-name} + OCI_ACCESS_KEY: ${OCI_ACCESS_KEY:-your-access-key} + OCI_SECRET_KEY: ${OCI_SECRET_KEY:-your-secret-key} + OCI_REGION: ${OCI_REGION:-us-ashburn-1} + HUAWEI_OBS_BUCKET_NAME: ${HUAWEI_OBS_BUCKET_NAME:-your-bucket-name} + HUAWEI_OBS_SECRET_KEY: ${HUAWEI_OBS_SECRET_KEY:-your-secret-key} + HUAWEI_OBS_ACCESS_KEY: ${HUAWEI_OBS_ACCESS_KEY:-your-access-key} + HUAWEI_OBS_SERVER: ${HUAWEI_OBS_SERVER:-your-server-url} + VOLCENGINE_TOS_BUCKET_NAME: ${VOLCENGINE_TOS_BUCKET_NAME:-your-bucket-name} + VOLCENGINE_TOS_SECRET_KEY: ${VOLCENGINE_TOS_SECRET_KEY:-your-secret-key} + VOLCENGINE_TOS_ACCESS_KEY: ${VOLCENGINE_TOS_ACCESS_KEY:-your-access-key} + VOLCENGINE_TOS_ENDPOINT: ${VOLCENGINE_TOS_ENDPOINT:-your-server-url} + VOLCENGINE_TOS_REGION: ${VOLCENGINE_TOS_REGION:-your-region} + BAIDU_OBS_BUCKET_NAME: ${BAIDU_OBS_BUCKET_NAME:-your-bucket-name} + BAIDU_OBS_SECRET_KEY: ${BAIDU_OBS_SECRET_KEY:-your-secret-key} + BAIDU_OBS_ACCESS_KEY: ${BAIDU_OBS_ACCESS_KEY:-your-access-key} + BAIDU_OBS_ENDPOINT: ${BAIDU_OBS_ENDPOINT:-your-server-url} + SUPABASE_BUCKET_NAME: ${SUPABASE_BUCKET_NAME:-your-bucket-name} + SUPABASE_API_KEY: ${SUPABASE_API_KEY:-your-access-key} + SUPABASE_URL: ${SUPABASE_URL:-your-server-url} + VECTOR_STORE: ${VECTOR_STORE:-weaviate} + WEAVIATE_ENDPOINT: ${WEAVIATE_ENDPOINT:-http://weaviate:8080} + WEAVIATE_API_KEY: ${WEAVIATE_API_KEY:-WVF5YThaHlkYwhGUSmCRgsX3tD5ngdN8pkih} + QDRANT_URL: ${QDRANT_URL:-http://qdrant:6333} + QDRANT_API_KEY: ${QDRANT_API_KEY:-difyai123456} + QDRANT_CLIENT_TIMEOUT: ${QDRANT_CLIENT_TIMEOUT:-20} + QDRANT_GRPC_ENABLED: ${QDRANT_GRPC_ENABLED:-false} + QDRANT_GRPC_PORT: ${QDRANT_GRPC_PORT:-6334} + MILVUS_URI: ${MILVUS_URI:-http://host.docker.internal:19530} + MILVUS_TOKEN: ${MILVUS_TOKEN:-} + MILVUS_USER: ${MILVUS_USER:-} + MILVUS_PASSWORD: ${MILVUS_PASSWORD:-} + MILVUS_ENABLE_HYBRID_SEARCH: ${MILVUS_ENABLE_HYBRID_SEARCH:-False} + MYSCALE_HOST: ${MYSCALE_HOST:-myscale} + MYSCALE_PORT: ${MYSCALE_PORT:-8123} + MYSCALE_USER: ${MYSCALE_USER:-default} + MYSCALE_PASSWORD: ${MYSCALE_PASSWORD:-} + MYSCALE_DATABASE: ${MYSCALE_DATABASE:-dify} + MYSCALE_FTS_PARAMS: ${MYSCALE_FTS_PARAMS:-} + COUCHBASE_CONNECTION_STRING: ${COUCHBASE_CONNECTION_STRING:-couchbase://couchbase-server} + COUCHBASE_USER: ${COUCHBASE_USER:-Administrator} + COUCHBASE_PASSWORD: ${COUCHBASE_PASSWORD:-password} + COUCHBASE_BUCKET_NAME: ${COUCHBASE_BUCKET_NAME:-Embeddings} + COUCHBASE_SCOPE_NAME: ${COUCHBASE_SCOPE_NAME:-_default} + PGVECTOR_HOST: ${PGVECTOR_HOST:-pgvector} + PGVECTOR_PORT: ${PGVECTOR_PORT:-5432} + PGVECTOR_USER: ${PGVECTOR_USER:-postgres} + PGVECTOR_PASSWORD: ${PGVECTOR_PASSWORD:-difyai123456} + PGVECTOR_DATABASE: ${PGVECTOR_DATABASE:-dify} + PGVECTOR_MIN_CONNECTION: ${PGVECTOR_MIN_CONNECTION:-1} + PGVECTOR_MAX_CONNECTION: ${PGVECTOR_MAX_CONNECTION:-5} + PGVECTOR_PG_BIGM: ${PGVECTOR_PG_BIGM:-false} + PGVECTOR_PG_BIGM_VERSION: ${PGVECTOR_PG_BIGM_VERSION:-1.2-20240606} + PGVECTO_RS_HOST: ${PGVECTO_RS_HOST:-pgvecto-rs} + PGVECTO_RS_PORT: ${PGVECTO_RS_PORT:-5432} + PGVECTO_RS_USER: ${PGVECTO_RS_USER:-postgres} + PGVECTO_RS_PASSWORD: ${PGVECTO_RS_PASSWORD:-difyai123456} + PGVECTO_RS_DATABASE: ${PGVECTO_RS_DATABASE:-dify} + ANALYTICDB_KEY_ID: ${ANALYTICDB_KEY_ID:-your-ak} + ANALYTICDB_KEY_SECRET: ${ANALYTICDB_KEY_SECRET:-your-sk} + ANALYTICDB_REGION_ID: ${ANALYTICDB_REGION_ID:-cn-hangzhou} + ANALYTICDB_INSTANCE_ID: ${ANALYTICDB_INSTANCE_ID:-gp-ab123456} + ANALYTICDB_ACCOUNT: ${ANALYTICDB_ACCOUNT:-testaccount} + ANALYTICDB_PASSWORD: ${ANALYTICDB_PASSWORD:-testpassword} + ANALYTICDB_NAMESPACE: ${ANALYTICDB_NAMESPACE:-dify} + ANALYTICDB_NAMESPACE_PASSWORD: ${ANALYTICDB_NAMESPACE_PASSWORD:-difypassword} + ANALYTICDB_HOST: ${ANALYTICDB_HOST:-gp-test.aliyuncs.com} + ANALYTICDB_PORT: ${ANALYTICDB_PORT:-5432} + ANALYTICDB_MIN_CONNECTION: ${ANALYTICDB_MIN_CONNECTION:-1} + ANALYTICDB_MAX_CONNECTION: ${ANALYTICDB_MAX_CONNECTION:-5} + TIDB_VECTOR_HOST: ${TIDB_VECTOR_HOST:-tidb} + TIDB_VECTOR_PORT: ${TIDB_VECTOR_PORT:-4000} + TIDB_VECTOR_USER: ${TIDB_VECTOR_USER:-} + TIDB_VECTOR_PASSWORD: ${TIDB_VECTOR_PASSWORD:-} + TIDB_VECTOR_DATABASE: ${TIDB_VECTOR_DATABASE:-dify} + TIDB_ON_QDRANT_URL: ${TIDB_ON_QDRANT_URL:-http://127.0.0.1} + TIDB_ON_QDRANT_API_KEY: ${TIDB_ON_QDRANT_API_KEY:-dify} + TIDB_ON_QDRANT_CLIENT_TIMEOUT: ${TIDB_ON_QDRANT_CLIENT_TIMEOUT:-20} + TIDB_ON_QDRANT_GRPC_ENABLED: ${TIDB_ON_QDRANT_GRPC_ENABLED:-false} + TIDB_ON_QDRANT_GRPC_PORT: ${TIDB_ON_QDRANT_GRPC_PORT:-6334} + TIDB_PUBLIC_KEY: ${TIDB_PUBLIC_KEY:-dify} + TIDB_PRIVATE_KEY: ${TIDB_PRIVATE_KEY:-dify} + TIDB_API_URL: ${TIDB_API_URL:-http://127.0.0.1} + TIDB_IAM_API_URL: ${TIDB_IAM_API_URL:-http://127.0.0.1} + TIDB_REGION: ${TIDB_REGION:-regions/aws-us-east-1} + TIDB_PROJECT_ID: ${TIDB_PROJECT_ID:-dify} + TIDB_SPEND_LIMIT: ${TIDB_SPEND_LIMIT:-100} + CHROMA_HOST: ${CHROMA_HOST:-127.0.0.1} + CHROMA_PORT: ${CHROMA_PORT:-8000} + CHROMA_TENANT: ${CHROMA_TENANT:-default_tenant} + CHROMA_DATABASE: ${CHROMA_DATABASE:-default_database} + CHROMA_AUTH_PROVIDER: ${CHROMA_AUTH_PROVIDER:-chromadb.auth.token_authn.TokenAuthClientProvider} + CHROMA_AUTH_CREDENTIALS: ${CHROMA_AUTH_CREDENTIALS:-} + ORACLE_USER: ${ORACLE_USER:-dify} + ORACLE_PASSWORD: ${ORACLE_PASSWORD:-dify} + ORACLE_DSN: ${ORACLE_DSN:-oracle:1521/FREEPDB1} + ORACLE_CONFIG_DIR: ${ORACLE_CONFIG_DIR:-/app/api/storage/wallet} + ORACLE_WALLET_LOCATION: ${ORACLE_WALLET_LOCATION:-/app/api/storage/wallet} + ORACLE_WALLET_PASSWORD: ${ORACLE_WALLET_PASSWORD:-dify} + ORACLE_IS_AUTONOMOUS: ${ORACLE_IS_AUTONOMOUS:-false} + RELYT_HOST: ${RELYT_HOST:-db} + RELYT_PORT: ${RELYT_PORT:-5432} + RELYT_USER: ${RELYT_USER:-postgres} + RELYT_PASSWORD: ${RELYT_PASSWORD:-difyai123456} + RELYT_DATABASE: ${RELYT_DATABASE:-postgres} + OPENSEARCH_HOST: ${OPENSEARCH_HOST:-opensearch} + OPENSEARCH_PORT: ${OPENSEARCH_PORT:-9200} + OPENSEARCH_USER: ${OPENSEARCH_USER:-admin} + OPENSEARCH_PASSWORD: ${OPENSEARCH_PASSWORD:-admin} + OPENSEARCH_SECURE: ${OPENSEARCH_SECURE:-true} + TENCENT_VECTOR_DB_URL: ${TENCENT_VECTOR_DB_URL:-http://127.0.0.1} + TENCENT_VECTOR_DB_API_KEY: ${TENCENT_VECTOR_DB_API_KEY:-dify} + TENCENT_VECTOR_DB_TIMEOUT: ${TENCENT_VECTOR_DB_TIMEOUT:-30} + TENCENT_VECTOR_DB_USERNAME: ${TENCENT_VECTOR_DB_USERNAME:-dify} + TENCENT_VECTOR_DB_DATABASE: ${TENCENT_VECTOR_DB_DATABASE:-dify} + TENCENT_VECTOR_DB_SHARD: ${TENCENT_VECTOR_DB_SHARD:-1} + TENCENT_VECTOR_DB_REPLICAS: ${TENCENT_VECTOR_DB_REPLICAS:-2} + TENCENT_VECTOR_DB_ENABLE_HYBRID_SEARCH: ${TENCENT_VECTOR_DB_ENABLE_HYBRID_SEARCH:-false} + ELASTICSEARCH_HOST: ${ELASTICSEARCH_HOST:-0.0.0.0} + ELASTICSEARCH_PORT: ${ELASTICSEARCH_PORT:-9200} + ELASTICSEARCH_USERNAME: ${ELASTICSEARCH_USERNAME:-elastic} + ELASTICSEARCH_PASSWORD: ${ELASTICSEARCH_PASSWORD:-elastic} + KIBANA_PORT: ${KIBANA_PORT:-5601} + BAIDU_VECTOR_DB_ENDPOINT: ${BAIDU_VECTOR_DB_ENDPOINT:-http://127.0.0.1:5287} + BAIDU_VECTOR_DB_CONNECTION_TIMEOUT_MS: ${BAIDU_VECTOR_DB_CONNECTION_TIMEOUT_MS:-30000} + BAIDU_VECTOR_DB_ACCOUNT: ${BAIDU_VECTOR_DB_ACCOUNT:-root} + BAIDU_VECTOR_DB_API_KEY: ${BAIDU_VECTOR_DB_API_KEY:-dify} + BAIDU_VECTOR_DB_DATABASE: ${BAIDU_VECTOR_DB_DATABASE:-dify} + BAIDU_VECTOR_DB_SHARD: ${BAIDU_VECTOR_DB_SHARD:-1} + BAIDU_VECTOR_DB_REPLICAS: ${BAIDU_VECTOR_DB_REPLICAS:-3} + VIKINGDB_ACCESS_KEY: ${VIKINGDB_ACCESS_KEY:-your-ak} + VIKINGDB_SECRET_KEY: ${VIKINGDB_SECRET_KEY:-your-sk} + VIKINGDB_REGION: ${VIKINGDB_REGION:-cn-shanghai} + VIKINGDB_HOST: ${VIKINGDB_HOST:-api-vikingdb.xxx.volces.com} + VIKINGDB_SCHEMA: ${VIKINGDB_SCHEMA:-http} + VIKINGDB_CONNECTION_TIMEOUT: ${VIKINGDB_CONNECTION_TIMEOUT:-30} + VIKINGDB_SOCKET_TIMEOUT: ${VIKINGDB_SOCKET_TIMEOUT:-30} + LINDORM_URL: ${LINDORM_URL:-http://lindorm:30070} + LINDORM_USERNAME: ${LINDORM_USERNAME:-lindorm} + LINDORM_PASSWORD: ${LINDORM_PASSWORD:-lindorm} + OCEANBASE_VECTOR_HOST: ${OCEANBASE_VECTOR_HOST:-oceanbase} + OCEANBASE_VECTOR_PORT: ${OCEANBASE_VECTOR_PORT:-2881} + OCEANBASE_VECTOR_USER: ${OCEANBASE_VECTOR_USER:-root@test} + OCEANBASE_VECTOR_PASSWORD: ${OCEANBASE_VECTOR_PASSWORD:-difyai123456} + OCEANBASE_VECTOR_DATABASE: ${OCEANBASE_VECTOR_DATABASE:-test} + OCEANBASE_CLUSTER_NAME: ${OCEANBASE_CLUSTER_NAME:-difyai} + OCEANBASE_MEMORY_LIMIT: ${OCEANBASE_MEMORY_LIMIT:-6G} + OCEANBASE_ENABLE_HYBRID_SEARCH: ${OCEANBASE_ENABLE_HYBRID_SEARCH:-false} + OPENGAUSS_HOST: ${OPENGAUSS_HOST:-opengauss} + OPENGAUSS_PORT: ${OPENGAUSS_PORT:-6600} + OPENGAUSS_USER: ${OPENGAUSS_USER:-postgres} + OPENGAUSS_PASSWORD: ${OPENGAUSS_PASSWORD:-Dify@123} + OPENGAUSS_DATABASE: ${OPENGAUSS_DATABASE:-dify} + OPENGAUSS_MIN_CONNECTION: ${OPENGAUSS_MIN_CONNECTION:-1} + OPENGAUSS_MAX_CONNECTION: ${OPENGAUSS_MAX_CONNECTION:-5} + OPENGAUSS_ENABLE_PQ: ${OPENGAUSS_ENABLE_PQ:-false} + UPSTASH_VECTOR_URL: ${UPSTASH_VECTOR_URL:-https://xxx-vector.upstash.io} + UPSTASH_VECTOR_TOKEN: ${UPSTASH_VECTOR_TOKEN:-dify} + TABLESTORE_ENDPOINT: ${TABLESTORE_ENDPOINT:-https://instance-name.cn-hangzhou.ots.aliyuncs.com} + TABLESTORE_INSTANCE_NAME: ${TABLESTORE_INSTANCE_NAME:-instance-name} + TABLESTORE_ACCESS_KEY_ID: ${TABLESTORE_ACCESS_KEY_ID:-xxx} + TABLESTORE_ACCESS_KEY_SECRET: ${TABLESTORE_ACCESS_KEY_SECRET:-xxx} + UPLOAD_FILE_SIZE_LIMIT: ${UPLOAD_FILE_SIZE_LIMIT:-15} + UPLOAD_FILE_BATCH_LIMIT: ${UPLOAD_FILE_BATCH_LIMIT:-5} + ETL_TYPE: ${ETL_TYPE:-dify} + UNSTRUCTURED_API_URL: ${UNSTRUCTURED_API_URL:-} + UNSTRUCTURED_API_KEY: ${UNSTRUCTURED_API_KEY:-} + SCARF_NO_ANALYTICS: ${SCARF_NO_ANALYTICS:-true} + PROMPT_GENERATION_MAX_TOKENS: ${PROMPT_GENERATION_MAX_TOKENS:-512} + CODE_GENERATION_MAX_TOKENS: ${CODE_GENERATION_MAX_TOKENS:-1024} + PLUGIN_BASED_TOKEN_COUNTING_ENABLED: ${PLUGIN_BASED_TOKEN_COUNTING_ENABLED:-false} + MULTIMODAL_SEND_FORMAT: ${MULTIMODAL_SEND_FORMAT:-base64} + UPLOAD_IMAGE_FILE_SIZE_LIMIT: ${UPLOAD_IMAGE_FILE_SIZE_LIMIT:-10} + UPLOAD_VIDEO_FILE_SIZE_LIMIT: ${UPLOAD_VIDEO_FILE_SIZE_LIMIT:-100} + UPLOAD_AUDIO_FILE_SIZE_LIMIT: ${UPLOAD_AUDIO_FILE_SIZE_LIMIT:-50} + SENTRY_DSN: ${SENTRY_DSN:-} + API_SENTRY_DSN: ${API_SENTRY_DSN:-} + API_SENTRY_TRACES_SAMPLE_RATE: ${API_SENTRY_TRACES_SAMPLE_RATE:-1.0} + API_SENTRY_PROFILES_SAMPLE_RATE: ${API_SENTRY_PROFILES_SAMPLE_RATE:-1.0} + WEB_SENTRY_DSN: ${WEB_SENTRY_DSN:-} + NOTION_INTEGRATION_TYPE: ${NOTION_INTEGRATION_TYPE:-public} + NOTION_CLIENT_SECRET: ${NOTION_CLIENT_SECRET:-} + NOTION_CLIENT_ID: ${NOTION_CLIENT_ID:-} + NOTION_INTERNAL_SECRET: ${NOTION_INTERNAL_SECRET:-} + MAIL_TYPE: ${MAIL_TYPE:-resend} + MAIL_DEFAULT_SEND_FROM: ${MAIL_DEFAULT_SEND_FROM:-} + RESEND_API_URL: ${RESEND_API_URL:-https://api.resend.com} + RESEND_API_KEY: ${RESEND_API_KEY:-your-resend-api-key} + SMTP_SERVER: ${SMTP_SERVER:-} + SMTP_PORT: ${SMTP_PORT:-465} + SMTP_USERNAME: ${SMTP_USERNAME:-} + SMTP_PASSWORD: ${SMTP_PASSWORD:-} + SMTP_USE_TLS: ${SMTP_USE_TLS:-true} + SMTP_OPPORTUNISTIC_TLS: ${SMTP_OPPORTUNISTIC_TLS:-false} + INDEXING_MAX_SEGMENTATION_TOKENS_LENGTH: ${INDEXING_MAX_SEGMENTATION_TOKENS_LENGTH:-4000} + INVITE_EXPIRY_HOURS: ${INVITE_EXPIRY_HOURS:-72} + RESET_PASSWORD_TOKEN_EXPIRY_MINUTES: ${RESET_PASSWORD_TOKEN_EXPIRY_MINUTES:-5} + CODE_EXECUTION_ENDPOINT: ${CODE_EXECUTION_ENDPOINT:-http://sandbox:8194} + CODE_EXECUTION_API_KEY: ${CODE_EXECUTION_API_KEY:-dify-sandbox} + CODE_MAX_NUMBER: ${CODE_MAX_NUMBER:-9223372036854775807} + CODE_MIN_NUMBER: ${CODE_MIN_NUMBER:--9223372036854775808} + CODE_MAX_DEPTH: ${CODE_MAX_DEPTH:-5} + CODE_MAX_PRECISION: ${CODE_MAX_PRECISION:-20} + CODE_MAX_STRING_LENGTH: ${CODE_MAX_STRING_LENGTH:-80000} + CODE_MAX_STRING_ARRAY_LENGTH: ${CODE_MAX_STRING_ARRAY_LENGTH:-30} + CODE_MAX_OBJECT_ARRAY_LENGTH: ${CODE_MAX_OBJECT_ARRAY_LENGTH:-30} + CODE_MAX_NUMBER_ARRAY_LENGTH: ${CODE_MAX_NUMBER_ARRAY_LENGTH:-1000} + CODE_EXECUTION_CONNECT_TIMEOUT: ${CODE_EXECUTION_CONNECT_TIMEOUT:-10} + CODE_EXECUTION_READ_TIMEOUT: ${CODE_EXECUTION_READ_TIMEOUT:-60} + CODE_EXECUTION_WRITE_TIMEOUT: ${CODE_EXECUTION_WRITE_TIMEOUT:-10} + TEMPLATE_TRANSFORM_MAX_LENGTH: ${TEMPLATE_TRANSFORM_MAX_LENGTH:-80000} + WORKFLOW_MAX_EXECUTION_STEPS: ${WORKFLOW_MAX_EXECUTION_STEPS:-500} + WORKFLOW_MAX_EXECUTION_TIME: ${WORKFLOW_MAX_EXECUTION_TIME:-1200} + WORKFLOW_CALL_MAX_DEPTH: ${WORKFLOW_CALL_MAX_DEPTH:-5} + MAX_VARIABLE_SIZE: ${MAX_VARIABLE_SIZE:-204800} + WORKFLOW_PARALLEL_DEPTH_LIMIT: ${WORKFLOW_PARALLEL_DEPTH_LIMIT:-3} + WORKFLOW_FILE_UPLOAD_LIMIT: ${WORKFLOW_FILE_UPLOAD_LIMIT:-10} + HTTP_REQUEST_NODE_MAX_BINARY_SIZE: ${HTTP_REQUEST_NODE_MAX_BINARY_SIZE:-10485760} + HTTP_REQUEST_NODE_MAX_TEXT_SIZE: ${HTTP_REQUEST_NODE_MAX_TEXT_SIZE:-1048576} + HTTP_REQUEST_NODE_SSL_VERIFY: ${HTTP_REQUEST_NODE_SSL_VERIFY:-True} + SSRF_PROXY_HTTP_URL: ${SSRF_PROXY_HTTP_URL:-http://ssrf_proxy:3128} + SSRF_PROXY_HTTPS_URL: ${SSRF_PROXY_HTTPS_URL:-http://ssrf_proxy:3128} + LOOP_NODE_MAX_COUNT: ${LOOP_NODE_MAX_COUNT:-100} + MAX_TOOLS_NUM: ${MAX_TOOLS_NUM:-10} + MAX_PARALLEL_LIMIT: ${MAX_PARALLEL_LIMIT:-10} + MAX_ITERATIONS_NUM: ${MAX_ITERATIONS_NUM:-5} + TEXT_GENERATION_TIMEOUT_MS: ${TEXT_GENERATION_TIMEOUT_MS:-60000} + PGUSER: ${PGUSER:-${DB_USERNAME}} + POSTGRES_PASSWORD: ${POSTGRES_PASSWORD:-${DB_PASSWORD}} + POSTGRES_DB: ${POSTGRES_DB:-${DB_DATABASE}} + PGDATA: ${PGDATA:-/var/lib/postgresql/data/pgdata} + SANDBOX_API_KEY: ${SANDBOX_API_KEY:-dify-sandbox} + SANDBOX_GIN_MODE: ${SANDBOX_GIN_MODE:-release} + SANDBOX_WORKER_TIMEOUT: ${SANDBOX_WORKER_TIMEOUT:-15} + SANDBOX_ENABLE_NETWORK: ${SANDBOX_ENABLE_NETWORK:-true} + SANDBOX_HTTP_PROXY: ${SANDBOX_HTTP_PROXY:-http://ssrf_proxy:3128} + SANDBOX_HTTPS_PROXY: ${SANDBOX_HTTPS_PROXY:-http://ssrf_proxy:3128} + SANDBOX_PORT: ${SANDBOX_PORT:-8194} + WEAVIATE_PERSISTENCE_DATA_PATH: ${WEAVIATE_PERSISTENCE_DATA_PATH:-/var/lib/weaviate} + WEAVIATE_QUERY_DEFAULTS_LIMIT: ${WEAVIATE_QUERY_DEFAULTS_LIMIT:-25} + WEAVIATE_AUTHENTICATION_ANONYMOUS_ACCESS_ENABLED: ${WEAVIATE_AUTHENTICATION_ANONYMOUS_ACCESS_ENABLED:-true} + WEAVIATE_DEFAULT_VECTORIZER_MODULE: ${WEAVIATE_DEFAULT_VECTORIZER_MODULE:-none} + WEAVIATE_CLUSTER_HOSTNAME: ${WEAVIATE_CLUSTER_HOSTNAME:-node1} + WEAVIATE_AUTHENTICATION_APIKEY_ENABLED: ${WEAVIATE_AUTHENTICATION_APIKEY_ENABLED:-true} + WEAVIATE_AUTHENTICATION_APIKEY_ALLOWED_KEYS: ${WEAVIATE_AUTHENTICATION_APIKEY_ALLOWED_KEYS:-WVF5YThaHlkYwhGUSmCRgsX3tD5ngdN8pkih} + WEAVIATE_AUTHENTICATION_APIKEY_USERS: ${WEAVIATE_AUTHENTICATION_APIKEY_USERS:-hello@dify.ai} + WEAVIATE_AUTHORIZATION_ADMINLIST_ENABLED: ${WEAVIATE_AUTHORIZATION_ADMINLIST_ENABLED:-true} + WEAVIATE_AUTHORIZATION_ADMINLIST_USERS: ${WEAVIATE_AUTHORIZATION_ADMINLIST_USERS:-hello@dify.ai} + CHROMA_SERVER_AUTHN_CREDENTIALS: ${CHROMA_SERVER_AUTHN_CREDENTIALS:-difyai123456} + CHROMA_SERVER_AUTHN_PROVIDER: ${CHROMA_SERVER_AUTHN_PROVIDER:-chromadb.auth.token_authn.TokenAuthenticationServerProvider} + CHROMA_IS_PERSISTENT: ${CHROMA_IS_PERSISTENT:-TRUE} + ORACLE_PWD: ${ORACLE_PWD:-Dify123456} + ORACLE_CHARACTERSET: ${ORACLE_CHARACTERSET:-AL32UTF8} + ETCD_AUTO_COMPACTION_MODE: ${ETCD_AUTO_COMPACTION_MODE:-revision} + ETCD_AUTO_COMPACTION_RETENTION: ${ETCD_AUTO_COMPACTION_RETENTION:-1000} + ETCD_QUOTA_BACKEND_BYTES: ${ETCD_QUOTA_BACKEND_BYTES:-4294967296} + ETCD_SNAPSHOT_COUNT: ${ETCD_SNAPSHOT_COUNT:-50000} + MINIO_ACCESS_KEY: ${MINIO_ACCESS_KEY:-minioadmin} + MINIO_SECRET_KEY: ${MINIO_SECRET_KEY:-minioadmin} + ETCD_ENDPOINTS: ${ETCD_ENDPOINTS:-etcd:2379} + MINIO_ADDRESS: ${MINIO_ADDRESS:-minio:9000} + MILVUS_AUTHORIZATION_ENABLED: ${MILVUS_AUTHORIZATION_ENABLED:-true} + PGVECTOR_PGUSER: ${PGVECTOR_PGUSER:-postgres} + PGVECTOR_POSTGRES_PASSWORD: ${PGVECTOR_POSTGRES_PASSWORD:-difyai123456} + PGVECTOR_POSTGRES_DB: ${PGVECTOR_POSTGRES_DB:-dify} + PGVECTOR_PGDATA: ${PGVECTOR_PGDATA:-/var/lib/postgresql/data/pgdata} + OPENSEARCH_DISCOVERY_TYPE: ${OPENSEARCH_DISCOVERY_TYPE:-single-node} + OPENSEARCH_BOOTSTRAP_MEMORY_LOCK: ${OPENSEARCH_BOOTSTRAP_MEMORY_LOCK:-true} + OPENSEARCH_JAVA_OPTS_MIN: ${OPENSEARCH_JAVA_OPTS_MIN:-512m} + OPENSEARCH_JAVA_OPTS_MAX: ${OPENSEARCH_JAVA_OPTS_MAX:-1024m} + OPENSEARCH_INITIAL_ADMIN_PASSWORD: ${OPENSEARCH_INITIAL_ADMIN_PASSWORD:-Qazwsxedc!@#123} + OPENSEARCH_MEMLOCK_SOFT: ${OPENSEARCH_MEMLOCK_SOFT:--1} + OPENSEARCH_MEMLOCK_HARD: ${OPENSEARCH_MEMLOCK_HARD:--1} + OPENSEARCH_NOFILE_SOFT: ${OPENSEARCH_NOFILE_SOFT:-65536} + OPENSEARCH_NOFILE_HARD: ${OPENSEARCH_NOFILE_HARD:-65536} + NGINX_SERVER_NAME: ${NGINX_SERVER_NAME:-_} + NGINX_HTTPS_ENABLED: ${NGINX_HTTPS_ENABLED:-false} + NGINX_PORT: ${NGINX_PORT:-80} + NGINX_SSL_PORT: ${NGINX_SSL_PORT:-443} + NGINX_SSL_CERT_FILENAME: ${NGINX_SSL_CERT_FILENAME:-dify.crt} + NGINX_SSL_CERT_KEY_FILENAME: ${NGINX_SSL_CERT_KEY_FILENAME:-dify.key} + NGINX_SSL_PROTOCOLS: ${NGINX_SSL_PROTOCOLS:-TLSv1.1 TLSv1.2 TLSv1.3} + NGINX_WORKER_PROCESSES: ${NGINX_WORKER_PROCESSES:-auto} + NGINX_CLIENT_MAX_BODY_SIZE: ${NGINX_CLIENT_MAX_BODY_SIZE:-15M} + NGINX_KEEPALIVE_TIMEOUT: ${NGINX_KEEPALIVE_TIMEOUT:-65} + NGINX_PROXY_READ_TIMEOUT: ${NGINX_PROXY_READ_TIMEOUT:-3600s} + NGINX_PROXY_SEND_TIMEOUT: ${NGINX_PROXY_SEND_TIMEOUT:-3600s} + NGINX_ENABLE_CERTBOT_CHALLENGE: ${NGINX_ENABLE_CERTBOT_CHALLENGE:-false} + CERTBOT_EMAIL: ${CERTBOT_EMAIL:-your_email@example.com} + CERTBOT_DOMAIN: ${CERTBOT_DOMAIN:-your_domain.com} + CERTBOT_OPTIONS: ${CERTBOT_OPTIONS:-} + SSRF_HTTP_PORT: ${SSRF_HTTP_PORT:-3128} + SSRF_COREDUMP_DIR: ${SSRF_COREDUMP_DIR:-/var/spool/squid} + SSRF_REVERSE_PROXY_PORT: ${SSRF_REVERSE_PROXY_PORT:-8194} + SSRF_SANDBOX_HOST: ${SSRF_SANDBOX_HOST:-sandbox} + SSRF_DEFAULT_TIME_OUT: ${SSRF_DEFAULT_TIME_OUT:-5} + SSRF_DEFAULT_CONNECT_TIME_OUT: ${SSRF_DEFAULT_CONNECT_TIME_OUT:-5} + SSRF_DEFAULT_READ_TIME_OUT: ${SSRF_DEFAULT_READ_TIME_OUT:-5} + SSRF_DEFAULT_WRITE_TIME_OUT: ${SSRF_DEFAULT_WRITE_TIME_OUT:-5} + EXPOSE_NGINX_PORT: ${EXPOSE_NGINX_PORT:-20080} + EXPOSE_NGINX_SSL_PORT: ${EXPOSE_NGINX_SSL_PORT:-20443} + POSITION_TOOL_PINS: ${POSITION_TOOL_PINS:-} + POSITION_TOOL_INCLUDES: ${POSITION_TOOL_INCLUDES:-} + POSITION_TOOL_EXCLUDES: ${POSITION_TOOL_EXCLUDES:-} + POSITION_PROVIDER_PINS: ${POSITION_PROVIDER_PINS:-} + POSITION_PROVIDER_INCLUDES: ${POSITION_PROVIDER_INCLUDES:-} + POSITION_PROVIDER_EXCLUDES: ${POSITION_PROVIDER_EXCLUDES:-} + CSP_WHITELIST: ${CSP_WHITELIST:-} + CREATE_TIDB_SERVICE_JOB_ENABLED: ${CREATE_TIDB_SERVICE_JOB_ENABLED:-false} + MAX_SUBMIT_COUNT: ${MAX_SUBMIT_COUNT:-100} + TOP_K_MAX_VALUE: ${TOP_K_MAX_VALUE:-10} + DB_PLUGIN_DATABASE: ${DB_PLUGIN_DATABASE:-dify_plugin} + EXPOSE_PLUGIN_DAEMON_PORT: ${EXPOSE_PLUGIN_DAEMON_PORT:-5002} + PLUGIN_DAEMON_PORT: ${PLUGIN_DAEMON_PORT:-5002} + PLUGIN_DAEMON_KEY: ${PLUGIN_DAEMON_KEY:-lYkiYYT6owG+71oLerGzA7GXCgOT++6ovaezWAjpCjf+Sjc3ZtU+qUEi} + PLUGIN_DAEMON_URL: ${PLUGIN_DAEMON_URL:-http://plugin_daemon:5002} + PLUGIN_MAX_PACKAGE_SIZE: ${PLUGIN_MAX_PACKAGE_SIZE:-52428800} + PLUGIN_PPROF_ENABLED: ${PLUGIN_PPROF_ENABLED:-false} + PLUGIN_DEBUGGING_HOST: ${PLUGIN_DEBUGGING_HOST:-0.0.0.0} + PLUGIN_DEBUGGING_PORT: ${PLUGIN_DEBUGGING_PORT:-5003} + EXPOSE_PLUGIN_DEBUGGING_HOST: ${EXPOSE_PLUGIN_DEBUGGING_HOST:-localhost} + EXPOSE_PLUGIN_DEBUGGING_PORT: ${EXPOSE_PLUGIN_DEBUGGING_PORT:-5003} + PLUGIN_DIFY_INNER_API_KEY: ${PLUGIN_DIFY_INNER_API_KEY:-QaHbTe77CtuXmsfyhR7+vRjI/+XbV1AaFy691iy+kGDv2Jvy0/eAh8Y1} + PLUGIN_DIFY_INNER_API_URL: ${PLUGIN_DIFY_INNER_API_URL:-http://api:5001} + ENDPOINT_URL_TEMPLATE: ${ENDPOINT_URL_TEMPLATE:-http://localhost/e/{hook_id}} + MARKETPLACE_ENABLED: ${MARKETPLACE_ENABLED:-true} + MARKETPLACE_API_URL: ${MARKETPLACE_API_URL:-https://marketplace.dify.ai} + FORCE_VERIFYING_SIGNATURE: ${FORCE_VERIFYING_SIGNATURE:-true} + PLUGIN_PYTHON_ENV_INIT_TIMEOUT: ${PLUGIN_PYTHON_ENV_INIT_TIMEOUT:-120} + PLUGIN_MAX_EXECUTION_TIMEOUT: ${PLUGIN_MAX_EXECUTION_TIMEOUT:-600} + PIP_MIRROR_URL: ${PIP_MIRROR_URL:-} + PLUGIN_STORAGE_TYPE: ${PLUGIN_STORAGE_TYPE:-local} + PLUGIN_STORAGE_LOCAL_ROOT: ${PLUGIN_STORAGE_LOCAL_ROOT:-/app/storage} + PLUGIN_WORKING_PATH: ${PLUGIN_WORKING_PATH:-/app/storage/cwd} + PLUGIN_INSTALLED_PATH: ${PLUGIN_INSTALLED_PATH:-plugin} + PLUGIN_PACKAGE_CACHE_PATH: ${PLUGIN_PACKAGE_CACHE_PATH:-plugin_packages} + PLUGIN_MEDIA_CACHE_PATH: ${PLUGIN_MEDIA_CACHE_PATH:-assets} + PLUGIN_STORAGE_OSS_BUCKET: ${PLUGIN_STORAGE_OSS_BUCKET:-} + PLUGIN_S3_USE_AWS_MANAGED_IAM: ${PLUGIN_S3_USE_AWS_MANAGED_IAM:-false} + PLUGIN_S3_ENDPOINT: ${PLUGIN_S3_ENDPOINT:-} + PLUGIN_S3_USE_PATH_STYLE: ${PLUGIN_S3_USE_PATH_STYLE:-false} + PLUGIN_AWS_ACCESS_KEY: ${PLUGIN_AWS_ACCESS_KEY:-} + PLUGIN_AWS_SECRET_KEY: ${PLUGIN_AWS_SECRET_KEY:-} + PLUGIN_AWS_REGION: ${PLUGIN_AWS_REGION:-} + PLUGIN_AZURE_BLOB_STORAGE_CONTAINER_NAME: ${PLUGIN_AZURE_BLOB_STORAGE_CONTAINER_NAME:-} + PLUGIN_AZURE_BLOB_STORAGE_CONNECTION_STRING: ${PLUGIN_AZURE_BLOB_STORAGE_CONNECTION_STRING:-} + PLUGIN_TENCENT_COS_SECRET_KEY: ${PLUGIN_TENCENT_COS_SECRET_KEY:-} + PLUGIN_TENCENT_COS_SECRET_ID: ${PLUGIN_TENCENT_COS_SECRET_ID:-} + PLUGIN_TENCENT_COS_REGION: ${PLUGIN_TENCENT_COS_REGION:-} + ENABLE_OTEL: ${ENABLE_OTEL:-false} + OTLP_BASE_ENDPOINT: ${OTLP_BASE_ENDPOINT:-http://localhost:4318} + OTLP_API_KEY: ${OTLP_API_KEY:-} + OTEL_EXPORTER_TYPE: ${OTEL_EXPORTER_TYPE:-otlp} + OTEL_SAMPLING_RATE: ${OTEL_SAMPLING_RATE:-0.1} + OTEL_BATCH_EXPORT_SCHEDULE_DELAY: ${OTEL_BATCH_EXPORT_SCHEDULE_DELAY:-5000} + OTEL_MAX_QUEUE_SIZE: ${OTEL_MAX_QUEUE_SIZE:-2048} + OTEL_MAX_EXPORT_BATCH_SIZE: ${OTEL_MAX_EXPORT_BATCH_SIZE:-512} + OTEL_METRIC_EXPORT_INTERVAL: ${OTEL_METRIC_EXPORT_INTERVAL:-60000} + OTEL_BATCH_EXPORT_TIMEOUT: ${OTEL_BATCH_EXPORT_TIMEOUT:-10000} + OTEL_METRIC_EXPORT_TIMEOUT: ${OTEL_METRIC_EXPORT_TIMEOUT:-30000} + +services: + # API service + api: + image: langgenius/dify-api:1.2.0 + restart: always + environment: + # Use the shared environment variables. + <<: *shared-api-worker-env + # Startup mode, 'api' starts the API server. + MODE: api + SENTRY_DSN: ${API_SENTRY_DSN:-} + SENTRY_TRACES_SAMPLE_RATE: ${API_SENTRY_TRACES_SAMPLE_RATE:-1.0} + SENTRY_PROFILES_SAMPLE_RATE: ${API_SENTRY_PROFILES_SAMPLE_RATE:-1.0} + PLUGIN_REMOTE_INSTALL_HOST: ${EXPOSE_PLUGIN_DEBUGGING_HOST:-localhost} + PLUGIN_REMOTE_INSTALL_PORT: ${EXPOSE_PLUGIN_DEBUGGING_PORT:-5003} + PLUGIN_MAX_PACKAGE_SIZE: ${PLUGIN_MAX_PACKAGE_SIZE:-52428800} + INNER_API_KEY_FOR_PLUGIN: ${PLUGIN_DIFY_INNER_API_KEY:-QaHbTe77CtuXmsfyhR7+vRjI/+XbV1AaFy691iy+kGDv2Jvy0/eAh8Y1} + depends_on: + - db + - redis + volumes: + # Mount the storage directory to the container, for storing user files. + - ${APP_DATA_DIR}/volumes/app/storage:/app/api/storage + networks: + - ssrf_proxy_network + - default + + # worker service + # The Celery worker for processing the queue. + worker: + image: langgenius/dify-api:1.2.0 + restart: always + environment: + # Use the shared environment variables. + <<: *shared-api-worker-env + # Startup mode, 'worker' starts the Celery worker for processing the queue. + MODE: worker + SENTRY_DSN: ${API_SENTRY_DSN:-} + SENTRY_TRACES_SAMPLE_RATE: ${API_SENTRY_TRACES_SAMPLE_RATE:-1.0} + SENTRY_PROFILES_SAMPLE_RATE: ${API_SENTRY_PROFILES_SAMPLE_RATE:-1.0} + PLUGIN_MAX_PACKAGE_SIZE: ${PLUGIN_MAX_PACKAGE_SIZE:-52428800} + INNER_API_KEY_FOR_PLUGIN: ${PLUGIN_DIFY_INNER_API_KEY:-QaHbTe77CtuXmsfyhR7+vRjI/+XbV1AaFy691iy+kGDv2Jvy0/eAh8Y1} + depends_on: + - db + - redis + volumes: + # Mount the storage directory to the container, for storing user files. + - ${APP_DATA_DIR}/volumes/app/storage:/app/api/storage + networks: + - ssrf_proxy_network + - default + + # Frontend web application. + web: + image: langgenius/dify-web:1.2.0 + restart: always + environment: + CONSOLE_API_URL: ${CONSOLE_API_URL:-} + APP_API_URL: ${APP_API_URL:-} + SENTRY_DSN: ${WEB_SENTRY_DSN:-} + NEXT_TELEMETRY_DISABLED: ${NEXT_TELEMETRY_DISABLED:-0} + TEXT_GENERATION_TIMEOUT_MS: ${TEXT_GENERATION_TIMEOUT_MS:-60000} + CSP_WHITELIST: ${CSP_WHITELIST:-} + MARKETPLACE_API_URL: ${MARKETPLACE_API_URL:-https://marketplace.dify.ai} + MARKETPLACE_URL: ${MARKETPLACE_URL:-https://marketplace.dify.ai} + TOP_K_MAX_VALUE: ${TOP_K_MAX_VALUE:-} + INDEXING_MAX_SEGMENTATION_TOKENS_LENGTH: ${INDEXING_MAX_SEGMENTATION_TOKENS_LENGTH:-} + PM2_INSTANCES: ${PM2_INSTANCES:-2} + LOOP_NODE_MAX_COUNT: ${LOOP_NODE_MAX_COUNT:-100} + MAX_TOOLS_NUM: ${MAX_TOOLS_NUM:-10} + MAX_PARALLEL_LIMIT: ${MAX_PARALLEL_LIMIT:-10} + MAX_ITERATIONS_NUM: ${MAX_ITERATIONS_NUM:-5} + + # The postgres database. + db: + image: postgres:15-alpine + restart: always + environment: + PGUSER: ${PGUSER:-postgres} + POSTGRES_PASSWORD: ${POSTGRES_PASSWORD:-difyai123456} + POSTGRES_DB: ${POSTGRES_DB:-dify} + PGDATA: ${PGDATA:-/var/lib/postgresql/data/pgdata} + command: > + postgres -c 'max_connections=${POSTGRES_MAX_CONNECTIONS:-100}' + -c 'shared_buffers=${POSTGRES_SHARED_BUFFERS:-128MB}' + -c 'work_mem=${POSTGRES_WORK_MEM:-4MB}' + -c 'maintenance_work_mem=${POSTGRES_MAINTENANCE_WORK_MEM:-64MB}' + -c 'effective_cache_size=${POSTGRES_EFFECTIVE_CACHE_SIZE:-4096MB}' + volumes: + - ${APP_DATA_DIR}/volumes/db/data:/var/lib/postgresql/data + healthcheck: + test: [ 'CMD', 'pg_isready', '-h', 'db', '-U', '${PGUSER:-postgres}', '-d', '${POSTGRES_DB:-dify}' ] + interval: 1s + timeout: 3s + retries: 60 + + # The redis cache. + redis: + image: redis:6-alpine + restart: always + environment: + REDISCLI_AUTH: ${REDIS_PASSWORD:-difyai123456} + volumes: + # Mount the redis data directory to the container. + - ${APP_DATA_DIR}/volumes/redis/data:/data + # Set the redis password when startup redis server. + command: redis-server --requirepass ${REDIS_PASSWORD:-difyai123456} + healthcheck: + test: [ 'CMD', 'redis-cli', 'ping' ] + + # The DifySandbox + sandbox: + image: langgenius/dify-sandbox:0.2.11 + restart: always + environment: + # The DifySandbox configurations + # Make sure you are changing this key for your deployment with a strong key. + # You can generate a strong key using `openssl rand -base64 42`. + API_KEY: ${SANDBOX_API_KEY:-dify-sandbox} + GIN_MODE: ${SANDBOX_GIN_MODE:-release} + WORKER_TIMEOUT: ${SANDBOX_WORKER_TIMEOUT:-15} + ENABLE_NETWORK: ${SANDBOX_ENABLE_NETWORK:-true} + HTTP_PROXY: ${SANDBOX_HTTP_PROXY:-http://ssrf_proxy:3128} + HTTPS_PROXY: ${SANDBOX_HTTPS_PROXY:-http://ssrf_proxy:3128} + SANDBOX_PORT: ${SANDBOX_PORT:-8194} + volumes: + - ${APP_DATA_DIR}/volumes/sandbox/dependencies:/dependencies + - ${APP_DATA_DIR}/volumes/sandbox/conf:/conf + healthcheck: + test: [ 'CMD', 'curl', '-f', 'http://localhost:8194/health' ] + networks: + - ssrf_proxy_network + + # plugin daemon + plugin_daemon: + image: langgenius/dify-plugin-daemon:0.0.7-local + restart: always + environment: + # Use the shared environment variables. + <<: *shared-api-worker-env + DB_DATABASE: ${DB_PLUGIN_DATABASE:-dify_plugin} + SERVER_PORT: ${PLUGIN_DAEMON_PORT:-5002} + SERVER_KEY: ${PLUGIN_DAEMON_KEY:-lYkiYYT6owG+71oLerGzA7GXCgOT++6ovaezWAjpCjf+Sjc3ZtU+qUEi} + MAX_PLUGIN_PACKAGE_SIZE: ${PLUGIN_MAX_PACKAGE_SIZE:-52428800} + PPROF_ENABLED: ${PLUGIN_PPROF_ENABLED:-false} + DIFY_INNER_API_URL: ${PLUGIN_DIFY_INNER_API_URL:-http://api:5001} + DIFY_INNER_API_KEY: ${PLUGIN_DIFY_INNER_API_KEY:-QaHbTe77CtuXmsfyhR7+vRjI/+XbV1AaFy691iy+kGDv2Jvy0/eAh8Y1} + PLUGIN_REMOTE_INSTALLING_HOST: ${PLUGIN_DEBUGGING_HOST:-0.0.0.0} + PLUGIN_REMOTE_INSTALLING_PORT: ${PLUGIN_DEBUGGING_PORT:-5003} + PLUGIN_WORKING_PATH: ${PLUGIN_WORKING_PATH:-/app/storage/cwd} + FORCE_VERIFYING_SIGNATURE: ${FORCE_VERIFYING_SIGNATURE:-true} + PYTHON_ENV_INIT_TIMEOUT: ${PLUGIN_PYTHON_ENV_INIT_TIMEOUT:-120} + PLUGIN_MAX_EXECUTION_TIMEOUT: ${PLUGIN_MAX_EXECUTION_TIMEOUT:-600} + PIP_MIRROR_URL: ${PIP_MIRROR_URL:-} + PLUGIN_STORAGE_TYPE: ${PLUGIN_STORAGE_TYPE:-local} + PLUGIN_STORAGE_LOCAL_ROOT: ${PLUGIN_STORAGE_LOCAL_ROOT:-/app/storage} + PLUGIN_INSTALLED_PATH: ${PLUGIN_INSTALLED_PATH:-plugin} + PLUGIN_PACKAGE_CACHE_PATH: ${PLUGIN_PACKAGE_CACHE_PATH:-plugin_packages} + PLUGIN_MEDIA_CACHE_PATH: ${PLUGIN_MEDIA_CACHE_PATH:-assets} + PLUGIN_STORAGE_OSS_BUCKET: ${PLUGIN_STORAGE_OSS_BUCKET:-} + S3_USE_AWS_MANAGED_IAM: ${PLUGIN_S3_USE_AWS_MANAGED_IAM:-false} + S3_ENDPOINT: ${PLUGIN_S3_ENDPOINT:-} + S3_USE_PATH_STYLE: ${PLUGIN_S3_USE_PATH_STYLE:-false} + AWS_ACCESS_KEY: ${PLUGIN_AWS_ACCESS_KEY:-} + AWS_SECRET_KEY: ${PLUGIN_AWS_SECRET_KEY:-} + AWS_REGION: ${PLUGIN_AWS_REGION:-} + AZURE_BLOB_STORAGE_CONNECTION_STRING: ${PLUGIN_AZURE_BLOB_STORAGE_CONNECTION_STRING:-} + AZURE_BLOB_STORAGE_CONTAINER_NAME: ${PLUGIN_AZURE_BLOB_STORAGE_CONTAINER_NAME:-} + TENCENT_COS_SECRET_KEY: ${PLUGIN_TENCENT_COS_SECRET_KEY:-} + TENCENT_COS_SECRET_ID: ${PLUGIN_TENCENT_COS_SECRET_ID:-} + TENCENT_COS_REGION: ${PLUGIN_TENCENT_COS_REGION:-} + ports: + - "${EXPOSE_PLUGIN_DEBUGGING_PORT:-5003}:${PLUGIN_DEBUGGING_PORT:-5003}" + volumes: + - ${APP_DATA_DIR}/volumes/plugin_daemon:/app/storage + depends_on: + db: + condition: service_healthy + + # ssrf_proxy server + # for more information, please refer to + # https://docs.dify.ai/learn-more/faq/install-faq#id-18.-why-is-ssrf_proxy-needed + ssrf_proxy: + image: ubuntu/squid:latest + restart: always + volumes: + - ${APP_DATA_DIR}/ssrf_proxy/squid.conf.template:/etc/squid/squid.conf.template + - ${APP_DATA_DIR}/ssrf_proxy/docker-entrypoint.sh:/docker-entrypoint-mount.sh + entrypoint: [ 'sh', '-c', "cp /docker-entrypoint-mount.sh /docker-entrypoint.sh && sed -i 's/\r$$//' /docker-entrypoint.sh && chmod +x /docker-entrypoint.sh && /docker-entrypoint.sh" ] + environment: + # pls clearly modify the squid env vars to fit your network environment. + HTTP_PORT: ${SSRF_HTTP_PORT:-3128} + COREDUMP_DIR: ${SSRF_COREDUMP_DIR:-/var/spool/squid} + REVERSE_PROXY_PORT: ${SSRF_REVERSE_PROXY_PORT:-8194} + SANDBOX_HOST: ${SSRF_SANDBOX_HOST:-sandbox} + SANDBOX_PORT: ${SANDBOX_PORT:-8194} + networks: + - ssrf_proxy_network + - default + + # Certbot service + # use `docker-compose --profile certbot up` to start the certbot service. + certbot: + image: certbot/certbot + profiles: + - certbot + volumes: + - ${APP_DATA_DIR}/volumes/certbot/conf:/etc/letsencrypt + - ${APP_DATA_DIR}/volumes/certbot/www:/var/www/html + - ${APP_DATA_DIR}/volumes/certbot/logs:/var/log/letsencrypt + - ${APP_DATA_DIR}/volumes/certbot/conf/live:/etc/letsencrypt/live + - ${APP_DATA_DIR}/certbot/update-cert.template.txt:/update-cert.template.txt + - ${APP_DATA_DIR}/certbot/docker-entrypoint.sh:/docker-entrypoint.sh + environment: + - CERTBOT_EMAIL=${CERTBOT_EMAIL} + - CERTBOT_DOMAIN=${CERTBOT_DOMAIN} + - CERTBOT_OPTIONS=${CERTBOT_OPTIONS:-} + entrypoint: [ '/docker-entrypoint.sh' ] + command: [ 'tail', '-f', '/dev/null' ] + + # The nginx reverse proxy. + # used for reverse proxying the API service and Web service. + nginx: + image: nginx:latest + restart: always + volumes: + - ${APP_DATA_DIR}/nginx/nginx.conf.template:/etc/nginx/nginx.conf.template + - ${APP_DATA_DIR}/nginx/proxy.conf.template:/etc/nginx/proxy.conf.template + - ${APP_DATA_DIR}/nginx/https.conf.template:/etc/nginx/https.conf.template + - ${APP_DATA_DIR}/nginx/conf.d:/etc/nginx/conf.d + - ${APP_DATA_DIR}/nginx/docker-entrypoint.sh:/docker-entrypoint-mount.sh + - ${APP_DATA_DIR}/nginx/ssl:/etc/ssl # cert dir (legacy) + - ${APP_DATA_DIR}/volumes/certbot/conf/live:/etc/letsencrypt/live # cert dir (with certbot container) + - ${APP_DATA_DIR}/volumes/certbot/conf:/etc/letsencrypt + - ${APP_DATA_DIR}/volumes/certbot/www:/var/www/html + entrypoint: [ 'sh', '-c', "cp /docker-entrypoint-mount.sh /docker-entrypoint.sh && sed -i 's/\r$$//' /docker-entrypoint.sh && chmod +x /docker-entrypoint.sh && /docker-entrypoint.sh" ] + environment: + NGINX_SERVER_NAME: ${NGINX_SERVER_NAME:-_} + NGINX_HTTPS_ENABLED: ${NGINX_HTTPS_ENABLED:-false} + NGINX_SSL_PORT: ${NGINX_SSL_PORT:-443} + NGINX_PORT: ${NGINX_PORT:-80} + # You're required to add your own SSL certificates/keys to the `./nginx/ssl` directory + # and modify the env vars below in .env if HTTPS_ENABLED is true. + NGINX_SSL_CERT_FILENAME: ${NGINX_SSL_CERT_FILENAME:-dify.crt} + NGINX_SSL_CERT_KEY_FILENAME: ${NGINX_SSL_CERT_KEY_FILENAME:-dify.key} + NGINX_SSL_PROTOCOLS: ${NGINX_SSL_PROTOCOLS:-TLSv1.1 TLSv1.2 TLSv1.3} + NGINX_WORKER_PROCESSES: ${NGINX_WORKER_PROCESSES:-auto} + NGINX_CLIENT_MAX_BODY_SIZE: ${NGINX_CLIENT_MAX_BODY_SIZE:-15M} + NGINX_KEEPALIVE_TIMEOUT: ${NGINX_KEEPALIVE_TIMEOUT:-65} + NGINX_PROXY_READ_TIMEOUT: ${NGINX_PROXY_READ_TIMEOUT:-3600s} + NGINX_PROXY_SEND_TIMEOUT: ${NGINX_PROXY_SEND_TIMEOUT:-3600s} + NGINX_ENABLE_CERTBOT_CHALLENGE: ${NGINX_ENABLE_CERTBOT_CHALLENGE:-false} + CERTBOT_DOMAIN: ${CERTBOT_DOMAIN:-} + depends_on: + - api + - web + ports: + - '${EXPOSE_NGINX_PORT:-80}:${NGINX_PORT:-80}' + - '${EXPOSE_NGINX_SSL_PORT:-443}:${NGINX_SSL_PORT:-443}' + + # The Weaviate vector store. + weaviate: + image: semitechnologies/weaviate:1.19.0 + profiles: + - '' + - weaviate + restart: always + volumes: + # Mount the Weaviate data directory to the con tainer. + - ${APP_DATA_DIR}/volumes/weaviate:/var/lib/weaviate + environment: + # The Weaviate configurations + # You can refer to the [Weaviate](https://weaviate.io/developers/weaviate/config-refs/env-vars) documentation for more information. + PERSISTENCE_DATA_PATH: ${WEAVIATE_PERSISTENCE_DATA_PATH:-/var/lib/weaviate} + QUERY_DEFAULTS_LIMIT: ${WEAVIATE_QUERY_DEFAULTS_LIMIT:-25} + AUTHENTICATION_ANONYMOUS_ACCESS_ENABLED: ${WEAVIATE_AUTHENTICATION_ANONYMOUS_ACCESS_ENABLED:-false} + DEFAULT_VECTORIZER_MODULE: ${WEAVIATE_DEFAULT_VECTORIZER_MODULE:-none} + CLUSTER_HOSTNAME: ${WEAVIATE_CLUSTER_HOSTNAME:-node1} + AUTHENTICATION_APIKEY_ENABLED: ${WEAVIATE_AUTHENTICATION_APIKEY_ENABLED:-true} + AUTHENTICATION_APIKEY_ALLOWED_KEYS: ${WEAVIATE_AUTHENTICATION_APIKEY_ALLOWED_KEYS:-WVF5YThaHlkYwhGUSmCRgsX3tD5ngdN8pkih} + AUTHENTICATION_APIKEY_USERS: ${WEAVIATE_AUTHENTICATION_APIKEY_USERS:-hello@dify.ai} + AUTHORIZATION_ADMINLIST_ENABLED: ${WEAVIATE_AUTHORIZATION_ADMINLIST_ENABLED:-true} + AUTHORIZATION_ADMINLIST_USERS: ${WEAVIATE_AUTHORIZATION_ADMINLIST_USERS:-hello@dify.ai} + + # Qdrant vector store. + # (if used, you need to set VECTOR_STORE to qdrant in the api & worker service.) + qdrant: + image: langgenius/qdrant:v1.7.3 + profiles: + - qdrant + restart: always + volumes: + - ${APP_DATA_DIR}/volumes/qdrant:/qdrant/storage + environment: + QDRANT_API_KEY: ${QDRANT_API_KEY:-difyai123456} + + # The Couchbase vector store. + couchbase-server: + build: ./couchbase-server + profiles: + - couchbase + restart: always + environment: + - CLUSTER_NAME=dify_search + - COUCHBASE_ADMINISTRATOR_USERNAME=${COUCHBASE_USER:-Administrator} + - COUCHBASE_ADMINISTRATOR_PASSWORD=${COUCHBASE_PASSWORD:-password} + - COUCHBASE_BUCKET=${COUCHBASE_BUCKET_NAME:-Embeddings} + - COUCHBASE_BUCKET_RAMSIZE=512 + - COUCHBASE_RAM_SIZE=2048 + - COUCHBASE_EVENTING_RAM_SIZE=512 + - COUCHBASE_INDEX_RAM_SIZE=512 + - COUCHBASE_FTS_RAM_SIZE=1024 + hostname: couchbase-server + container_name: couchbase-server + working_dir: /opt/couchbase + stdin_open: true + tty: true + entrypoint: [ "" ] + command: sh -c "/opt/couchbase/init/init-cbserver.sh" + volumes: + - ${APP_DATA_DIR}/volumes/couchbase/data:/opt/couchbase/var/lib/couchbase/data + healthcheck: + # ensure bucket was created before proceeding + test: [ "CMD-SHELL", "curl -s -f -u Administrator:password http://localhost:8091/pools/default/buckets | grep -q '\\[{' || exit 1" ] + interval: 10s + retries: 10 + start_period: 30s + timeout: 10s + + # The pgvector vector database. + pgvector: + image: pgvector/pgvector:pg16 + profiles: + - pgvector + restart: always + environment: + PGUSER: ${PGVECTOR_PGUSER:-postgres} + # The password for the default postgres user. + POSTGRES_PASSWORD: ${PGVECTOR_POSTGRES_PASSWORD:-difyai123456} + # The name of the default postgres database. + POSTGRES_DB: ${PGVECTOR_POSTGRES_DB:-dify} + # postgres data directory + PGDATA: ${PGVECTOR_PGDATA:-/var/lib/postgresql/data/pgdata} + # pg_bigm module for full text search + PG_BIGM: ${PGVECTOR_PG_BIGM:-false} + PG_BIGM_VERSION: ${PGVECTOR_PG_BIGM_VERSION:-1.2-20240606} + volumes: + - ${APP_DATA_DIR}/volumes/pgvector/data:/var/lib/postgresql/data + - ${APP_DATA_DIR}/pgvector/docker-entrypoint.sh:/docker-entrypoint.sh + entrypoint: [ '/docker-entrypoint.sh' ] + healthcheck: + test: [ 'CMD', 'pg_isready' ] + interval: 1s + timeout: 3s + retries: 30 + + # pgvecto-rs vector store + pgvecto-rs: + image: tensorchord/pgvecto-rs:pg16-v0.3.0 + profiles: + - pgvecto-rs + restart: always + environment: + PGUSER: ${PGVECTOR_PGUSER:-postgres} + # The password for the default postgres user. + POSTGRES_PASSWORD: ${PGVECTOR_POSTGRES_PASSWORD:-difyai123456} + # The name of the default postgres database. + POSTGRES_DB: ${PGVECTOR_POSTGRES_DB:-dify} + # postgres data directory + PGDATA: ${PGVECTOR_PGDATA:-/var/lib/postgresql/data/pgdata} + volumes: + - ${APP_DATA_DIR}/volumes/pgvecto_rs/data:/var/lib/postgresql/data + healthcheck: + test: [ 'CMD', 'pg_isready' ] + interval: 1s + timeout: 3s + retries: 30 + + # Chroma vector database + chroma: + image: ghcr.io/chroma-core/chroma:0.5.20 + profiles: + - chroma + restart: always + volumes: + - ${APP_DATA_DIR}/volumes/chroma:/chroma/chroma + environment: + CHROMA_SERVER_AUTHN_CREDENTIALS: ${CHROMA_SERVER_AUTHN_CREDENTIALS:-difyai123456} + CHROMA_SERVER_AUTHN_PROVIDER: ${CHROMA_SERVER_AUTHN_PROVIDER:-chromadb.auth.token_authn.TokenAuthenticationServerProvider} + IS_PERSISTENT: ${CHROMA_IS_PERSISTENT:-TRUE} + + # OceanBase vector database + oceanbase: + image: oceanbase/oceanbase-ce:4.3.5.1-101000042025031818 + container_name: oceanbase + profiles: + - oceanbase + restart: always + volumes: + - ${APP_DATA_DIR}/volumes/oceanbase/data:/root/ob + - ${APP_DATA_DIR}/volumes/oceanbase/conf:/root/.obd/cluster + - ${APP_DATA_DIR}/volumes/oceanbase/init.d:/root/boot/init.d + environment: + OB_MEMORY_LIMIT: ${OCEANBASE_MEMORY_LIMIT:-6G} + OB_SYS_PASSWORD: ${OCEANBASE_VECTOR_PASSWORD:-difyai123456} + OB_TENANT_PASSWORD: ${OCEANBASE_VECTOR_PASSWORD:-difyai123456} + OB_CLUSTER_NAME: ${OCEANBASE_CLUSTER_NAME:-difyai} + MODE: MINI + ports: + - "${OCEANBASE_VECTOR_PORT:-2881}:2881" + + # Oracle vector database + oracle: + image: container-registry.oracle.com/database/free:latest + profiles: + - oracle + restart: always + volumes: + - source: oradata + type: volume + target: /opt/oracle/oradata + - ${APP_DATA_DIR}/startupscripts:/opt/oracle/scripts/startup + environment: + ORACLE_PWD: ${ORACLE_PWD:-Dify123456} + ORACLE_CHARACTERSET: ${ORACLE_CHARACTERSET:-AL32UTF8} + + # Milvus vector database services + etcd: + container_name: milvus-etcd + image: quay.io/coreos/etcd:v3.5.5 + profiles: + - milvus + environment: + ETCD_AUTO_COMPACTION_MODE: ${ETCD_AUTO_COMPACTION_MODE:-revision} + ETCD_AUTO_COMPACTION_RETENTION: ${ETCD_AUTO_COMPACTION_RETENTION:-1000} + ETCD_QUOTA_BACKEND_BYTES: ${ETCD_QUOTA_BACKEND_BYTES:-4294967296} + ETCD_SNAPSHOT_COUNT: ${ETCD_SNAPSHOT_COUNT:-50000} + volumes: + - ${APP_DATA_DIR}/volumes/milvus/etcd:/etcd + command: etcd -advertise-client-urls=http://127.0.0.1:2379 -listen-client-urls http://0.0.0.0:2379 --data-dir /etcd + healthcheck: + test: [ 'CMD', 'etcdctl', 'endpoint', 'health' ] + interval: 30s + timeout: 20s + retries: 3 + networks: + - milvus + + minio: + container_name: milvus-minio + image: minio/minio:RELEASE.2023-03-20T20-16-18Z + profiles: + - milvus + environment: + MINIO_ACCESS_KEY: ${MINIO_ACCESS_KEY:-minioadmin} + MINIO_SECRET_KEY: ${MINIO_SECRET_KEY:-minioadmin} + volumes: + - ${APP_DATA_DIR}/volumes/milvus/minio:/minio_data + command: minio server /minio_data --console-address ":9001" + healthcheck: + test: [ 'CMD', 'curl', '-f', 'http://localhost:9000/minio/health/live' ] + interval: 30s + timeout: 20s + retries: 3 + networks: + - milvus + + milvus-standalone: + container_name: milvus-standalone + image: milvusdb/milvus:v2.5.0-beta + profiles: + - milvus + command: [ 'milvus', 'run', 'standalone' ] + environment: + ETCD_ENDPOINTS: ${ETCD_ENDPOINTS:-etcd:2379} + MINIO_ADDRESS: ${MINIO_ADDRESS:-minio:9000} + common.security.authorizationEnabled: ${MILVUS_AUTHORIZATION_ENABLED:-true} + volumes: + - ${APP_DATA_DIR}/volumes/milvus/milvus:/var/lib/milvus + healthcheck: + test: [ 'CMD', 'curl', '-f', 'http://localhost:9091/healthz' ] + interval: 30s + start_period: 90s + timeout: 20s + retries: 3 + depends_on: + - etcd + - minio + ports: + - 19530:19530 + - 9091:9091 + networks: + - milvus + + # Opensearch vector database + opensearch: + container_name: opensearch + image: opensearchproject/opensearch:latest + profiles: + - opensearch + environment: + discovery.type: ${OPENSEARCH_DISCOVERY_TYPE:-single-node} + bootstrap.memory_lock: ${OPENSEARCH_BOOTSTRAP_MEMORY_LOCK:-true} + OPENSEARCH_JAVA_OPTS: -Xms${OPENSEARCH_JAVA_OPTS_MIN:-512m} -Xmx${OPENSEARCH_JAVA_OPTS_MAX:-1024m} + OPENSEARCH_INITIAL_ADMIN_PASSWORD: ${OPENSEARCH_INITIAL_ADMIN_PASSWORD:-Qazwsxedc!@#123} + ulimits: + memlock: + soft: ${OPENSEARCH_MEMLOCK_SOFT:--1} + hard: ${OPENSEARCH_MEMLOCK_HARD:--1} + nofile: + soft: ${OPENSEARCH_NOFILE_SOFT:-65536} + hard: ${OPENSEARCH_NOFILE_HARD:-65536} + volumes: + - ${APP_DATA_DIR}/volumes/opensearch/data:/usr/share/opensearch/data + networks: + - opensearch-net + + opensearch-dashboards: + container_name: opensearch-dashboards + image: opensearchproject/opensearch-dashboards:latest + profiles: + - opensearch + environment: + OPENSEARCH_HOSTS: '["https://opensearch:9200"]' + volumes: + - ${APP_DATA_DIR}/volumes/opensearch/opensearch_dashboards.yml:/usr/share/opensearch-dashboards/config/opensearch_dashboards.yml + networks: + - opensearch-net + depends_on: + - opensearch + + # opengauss vector database. + opengauss: + image: opengauss/opengauss:7.0.0-RC1 + profiles: + - opengauss + privileged: true + restart: always + environment: + GS_USERNAME: ${OPENGAUSS_USER:-postgres} + GS_PASSWORD: ${OPENGAUSS_PASSWORD:-Dify@123} + GS_PORT: ${OPENGAUSS_PORT:-6600} + GS_DB: ${OPENGAUSS_DATABASE:-dify} + volumes: + - ${APP_DATA_DIR}/volumes/opengauss/data:/var/lib/opengauss/data + healthcheck: + test: ["CMD-SHELL", "netstat -lntp | grep tcp6 > /dev/null 2>&1"] + interval: 10s + timeout: 10s + retries: 10 + ports: + - ${OPENGAUSS_PORT:-6600}:${OPENGAUSS_PORT:-6600} + + # MyScale vector database + myscale: + container_name: myscale + image: myscale/myscaledb:1.6.4 + profiles: + - myscale + restart: always + tty: true + volumes: + - ${APP_DATA_DIR}/volumes/myscale/data:/var/lib/clickhouse + - ${APP_DATA_DIR}/volumes/myscale/log:/var/log/clickhouse-server + - ${APP_DATA_DIR}/volumes/myscale/config/users.d/custom_users_config.xml:/etc/clickhouse-server/users.d/custom_users_config.xml + ports: + - ${MYSCALE_PORT:-8123}:${MYSCALE_PORT:-8123} + + # https://www.elastic.co/guide/en/elasticsearch/reference/current/settings.html + # https://www.elastic.co/guide/en/elasticsearch/reference/current/docker.html#docker-prod-prerequisites + elasticsearch: + image: docker.elastic.co/elasticsearch/elasticsearch:8.14.3 + container_name: elasticsearch + profiles: + - elasticsearch + - elasticsearch-ja + restart: always + volumes: + - ${APP_DATA_DIR}/elasticsearch/docker-entrypoint.sh:/docker-entrypoint-mount.sh + - dify_es01_data:/usr/share/elasticsearch/data + environment: + ELASTIC_PASSWORD: ${ELASTICSEARCH_PASSWORD:-elastic} + VECTOR_STORE: ${VECTOR_STORE:-} + cluster.name: dify-es-cluster + node.name: dify-es0 + discovery.type: single-node + xpack.license.self_generated.type: basic + xpack.security.enabled: 'true' + xpack.security.enrollment.enabled: 'false' + xpack.security.http.ssl.enabled: 'false' + ports: + - ${ELASTICSEARCH_PORT:-9200}:9200 + deploy: + resources: + limits: + memory: 2g + entrypoint: [ 'sh', '-c', "sh /docker-entrypoint-mount.sh" ] + healthcheck: + test: [ 'CMD', 'curl', '-s', 'http://localhost:9200/_cluster/health?pretty' ] + interval: 30s + timeout: 10s + retries: 50 + + # https://www.elastic.co/guide/en/kibana/current/docker.html + # https://www.elastic.co/guide/en/kibana/current/settings.html + kibana: + image: docker.elastic.co/kibana/kibana:8.14.3 + container_name: kibana + profiles: + - elasticsearch + depends_on: + - elasticsearch + restart: always + environment: + XPACK_ENCRYPTEDSAVEDOBJECTS_ENCRYPTIONKEY: d1a66dfd-c4d3-4a0a-8290-2abcb83ab3aa + NO_PROXY: localhost,127.0.0.1,elasticsearch,kibana + XPACK_SECURITY_ENABLED: 'true' + XPACK_SECURITY_ENROLLMENT_ENABLED: 'false' + XPACK_SECURITY_HTTP_SSL_ENABLED: 'false' + XPACK_FLEET_ISAIRGAPPED: 'true' + I18N_LOCALE: zh-CN + SERVER_PORT: '5601' + ELASTICSEARCH_HOSTS: http://elasticsearch:9200 + ports: + - ${KIBANA_PORT:-5601}:5601 + healthcheck: + test: [ 'CMD-SHELL', 'curl -s http://localhost:5601 >/dev/null || exit 1' ] + interval: 30s + timeout: 10s + retries: 3 + + # unstructured . + # (if used, you need to set ETL_TYPE to Unstructured in the api & worker service.) + unstructured: + image: downloads.unstructured.io/unstructured-io/unstructured-api:latest + profiles: + - unstructured + restart: always + volumes: + - ${APP_DATA_DIR}/volumes/unstructured:/app/data + +networks: + # create a network between sandbox, api and ssrf_proxy, and can not access outside. + ssrf_proxy_network: + driver: bridge + internal: true + milvus: + driver: bridge + opensearch-net: + driver: bridge + internal: true + +volumes: + oradata: + dify_es01_data: diff --git a/xin-dify/umbrel-app.yml b/xin-dify/umbrel-app.yml new file mode 100644 index 0000000..6aa2bce --- /dev/null +++ b/xin-dify/umbrel-app.yml @@ -0,0 +1,28 @@ +manifestVersion: 1 +id: xin-dify +name: dify +tagline: An open-source LLM app development platform +icon: https://cloud.dify.ai/logo/logo.png +category: Development +version: "1.2.0" +port: 20080 +description: >- + Dify is an open-source LLM app development platform. + Its intuitive interface combines agentic AI workflow, RAG pipeline, agent capabilities, model management, observability features and more, letting you quickly go from prototype to production. + +developer: dify.ai +website: https://dify.ai/ +submitter: xin +submission: https://github.com/getumbrel/umbrel-apps/pulls +repo: https://github.com/langgenius/dify +support: https://github.com/langgenius/dify/issues +gallery: + - https://framerusercontent.com/images/nuR2Y1onHFdk6QAdFxSBFLMLHr8.png + - https://framerusercontent.com/images/gyg9CGdxDSKo0v5kmQeTVXR998.png + - https://framerusercontent.com/images/cbqqxOjnHAZK0bXOSGWSwCZ4Lw.png +releaseNotes: >- + Dify v1.2.0 update with you—it's packed with impressive new features, enhancements, and of course, some good ol' bug fixes +dependencies: [] +path: "" +defaultUsername: "" +defaultPassword: "" \ No newline at end of file