add dify app
This commit is contained in:
parent
7e53f54e9e
commit
481c98aa42
76
xin-dify/data/certbot/README.md
Normal file
76
xin-dify/data/certbot/README.md
Normal file
@ -0,0 +1,76 @@
|
||||
# Launching new servers with SSL certificates
|
||||
|
||||
## Short description
|
||||
|
||||
docker compose certbot configurations with Backward compatibility (without certbot container).
|
||||
Use `docker compose --profile certbot up` to use this features.
|
||||
|
||||
## The simplest way for launching new servers with SSL certificates
|
||||
|
||||
1. Get letsencrypt certs
|
||||
set `.env` values
|
||||
```properties
|
||||
NGINX_SSL_CERT_FILENAME=fullchain.pem
|
||||
NGINX_SSL_CERT_KEY_FILENAME=privkey.pem
|
||||
NGINX_ENABLE_CERTBOT_CHALLENGE=true
|
||||
CERTBOT_DOMAIN=your_domain.com
|
||||
CERTBOT_EMAIL=example@your_domain.com
|
||||
```
|
||||
execute command:
|
||||
```shell
|
||||
docker network prune
|
||||
docker compose --profile certbot up --force-recreate -d
|
||||
```
|
||||
then after the containers launched:
|
||||
```shell
|
||||
docker compose exec -it certbot /bin/sh /update-cert.sh
|
||||
```
|
||||
2. Edit `.env` file and `docker compose --profile certbot up` again.
|
||||
set `.env` value additionally
|
||||
```properties
|
||||
NGINX_HTTPS_ENABLED=true
|
||||
```
|
||||
execute command:
|
||||
```shell
|
||||
docker compose --profile certbot up -d --no-deps --force-recreate nginx
|
||||
```
|
||||
Then you can access your serve with HTTPS.
|
||||
[https://your_domain.com](https://your_domain.com)
|
||||
|
||||
## SSL certificates renewal
|
||||
|
||||
For SSL certificates renewal, execute commands below:
|
||||
|
||||
```shell
|
||||
docker compose exec -it certbot /bin/sh /update-cert.sh
|
||||
docker compose exec nginx nginx -s reload
|
||||
```
|
||||
|
||||
## Options for certbot
|
||||
|
||||
`CERTBOT_OPTIONS` key might be helpful for testing. i.e.,
|
||||
|
||||
```properties
|
||||
CERTBOT_OPTIONS=--dry-run
|
||||
```
|
||||
|
||||
To apply changes to `CERTBOT_OPTIONS`, regenerate the certbot container before updating the certificates.
|
||||
|
||||
```shell
|
||||
docker compose --profile certbot up -d --no-deps --force-recreate certbot
|
||||
docker compose exec -it certbot /bin/sh /update-cert.sh
|
||||
```
|
||||
|
||||
Then, reload the nginx container if necessary.
|
||||
|
||||
```shell
|
||||
docker compose exec nginx nginx -s reload
|
||||
```
|
||||
|
||||
## For legacy servers
|
||||
|
||||
To use cert files dir `nginx/ssl` as before, simply launch containers WITHOUT `--profile certbot` option.
|
||||
|
||||
```shell
|
||||
docker compose up -d
|
||||
```
|
||||
30
xin-dify/data/certbot/docker-entrypoint.sh
Executable file
30
xin-dify/data/certbot/docker-entrypoint.sh
Executable file
@ -0,0 +1,30 @@
|
||||
#!/bin/sh
|
||||
set -e
|
||||
|
||||
printf '%s\n' "Docker entrypoint script is running"
|
||||
|
||||
printf '%s\n' "\nChecking specific environment variables:"
|
||||
printf '%s\n' "CERTBOT_EMAIL: ${CERTBOT_EMAIL:-Not set}"
|
||||
printf '%s\n' "CERTBOT_DOMAIN: ${CERTBOT_DOMAIN:-Not set}"
|
||||
printf '%s\n' "CERTBOT_OPTIONS: ${CERTBOT_OPTIONS:-Not set}"
|
||||
|
||||
printf '%s\n' "\nChecking mounted directories:"
|
||||
for dir in "/etc/letsencrypt" "/var/www/html" "/var/log/letsencrypt"; do
|
||||
if [ -d "$dir" ]; then
|
||||
printf '%s\n' "$dir exists. Contents:"
|
||||
ls -la "$dir"
|
||||
else
|
||||
printf '%s\n' "$dir does not exist."
|
||||
fi
|
||||
done
|
||||
|
||||
printf '%s\n' "\nGenerating update-cert.sh from template"
|
||||
sed -e "s|\${CERTBOT_EMAIL}|$CERTBOT_EMAIL|g" \
|
||||
-e "s|\${CERTBOT_DOMAIN}|$CERTBOT_DOMAIN|g" \
|
||||
-e "s|\${CERTBOT_OPTIONS}|$CERTBOT_OPTIONS|g" \
|
||||
/update-cert.template.txt > /update-cert.sh
|
||||
|
||||
chmod +x /update-cert.sh
|
||||
|
||||
printf '%s\n' "\nExecuting command:" "$@"
|
||||
exec "$@"
|
||||
19
xin-dify/data/certbot/update-cert.template.txt
Executable file
19
xin-dify/data/certbot/update-cert.template.txt
Executable file
@ -0,0 +1,19 @@
|
||||
#!/bin/bash
|
||||
set -e
|
||||
|
||||
DOMAIN="${CERTBOT_DOMAIN}"
|
||||
EMAIL="${CERTBOT_EMAIL}"
|
||||
OPTIONS="${CERTBOT_OPTIONS}"
|
||||
CERT_NAME="${DOMAIN}" # 証明書名をドメイン名と同じにする
|
||||
|
||||
# Check if the certificate already exists
|
||||
if [ -f "/etc/letsencrypt/renewal/${CERT_NAME}.conf" ]; then
|
||||
echo "Certificate exists. Attempting to renew..."
|
||||
certbot renew --noninteractive --cert-name ${CERT_NAME} --webroot --webroot-path=/var/www/html --email ${EMAIL} --agree-tos --no-eff-email ${OPTIONS}
|
||||
else
|
||||
echo "Certificate does not exist. Obtaining a new certificate..."
|
||||
certbot certonly --noninteractive --webroot --webroot-path=/var/www/html --email ${EMAIL} --agree-tos --no-eff-email -d ${DOMAIN} ${OPTIONS}
|
||||
fi
|
||||
echo "Certificate operation successful"
|
||||
# Note: Nginx reload should be handled outside this container
|
||||
echo "Please ensure to reload Nginx to apply any certificate changes."
|
||||
4
xin-dify/data/couchbase-server/Dockerfile
Normal file
4
xin-dify/data/couchbase-server/Dockerfile
Normal file
@ -0,0 +1,4 @@
|
||||
FROM couchbase/server:latest AS stage_base
|
||||
# FROM couchbase:latest AS stage_base
|
||||
COPY init-cbserver.sh /opt/couchbase/init/
|
||||
RUN chmod +x /opt/couchbase/init/init-cbserver.sh
|
||||
44
xin-dify/data/couchbase-server/init-cbserver.sh
Executable file
44
xin-dify/data/couchbase-server/init-cbserver.sh
Executable file
@ -0,0 +1,44 @@
|
||||
#!/bin/bash
|
||||
# used to start couchbase server - can't get around this as docker compose only allows you to start one command - so we have to start couchbase like the standard couchbase Dockerfile would
|
||||
# https://github.com/couchbase/docker/blob/master/enterprise/couchbase-server/7.2.0/Dockerfile#L88
|
||||
|
||||
/entrypoint.sh couchbase-server &
|
||||
|
||||
# track if setup is complete so we don't try to setup again
|
||||
FILE=/opt/couchbase/init/setupComplete.txt
|
||||
|
||||
if ! [ -f "$FILE" ]; then
|
||||
# used to automatically create the cluster based on environment variables
|
||||
# https://docs.couchbase.com/server/current/cli/cbcli/couchbase-cli-cluster-init.html
|
||||
|
||||
echo $COUCHBASE_ADMINISTRATOR_USERNAME ":" $COUCHBASE_ADMINISTRATOR_PASSWORD
|
||||
|
||||
sleep 20s
|
||||
/opt/couchbase/bin/couchbase-cli cluster-init -c 127.0.0.1 \
|
||||
--cluster-username $COUCHBASE_ADMINISTRATOR_USERNAME \
|
||||
--cluster-password $COUCHBASE_ADMINISTRATOR_PASSWORD \
|
||||
--services data,index,query,fts \
|
||||
--cluster-ramsize $COUCHBASE_RAM_SIZE \
|
||||
--cluster-index-ramsize $COUCHBASE_INDEX_RAM_SIZE \
|
||||
--cluster-eventing-ramsize $COUCHBASE_EVENTING_RAM_SIZE \
|
||||
--cluster-fts-ramsize $COUCHBASE_FTS_RAM_SIZE \
|
||||
--index-storage-setting default
|
||||
|
||||
sleep 2s
|
||||
|
||||
# used to auto create the bucket based on environment variables
|
||||
# https://docs.couchbase.com/server/current/cli/cbcli/couchbase-cli-bucket-create.html
|
||||
|
||||
/opt/couchbase/bin/couchbase-cli bucket-create -c localhost:8091 \
|
||||
--username $COUCHBASE_ADMINISTRATOR_USERNAME \
|
||||
--password $COUCHBASE_ADMINISTRATOR_PASSWORD \
|
||||
--bucket $COUCHBASE_BUCKET \
|
||||
--bucket-ramsize $COUCHBASE_BUCKET_RAMSIZE \
|
||||
--bucket-type couchbase
|
||||
|
||||
# create file so we know that the cluster is setup and don't run the setup again
|
||||
touch $FILE
|
||||
fi
|
||||
# docker compose will stop the container from running unless we do this
|
||||
# known issue and workaround
|
||||
tail -f /dev/null
|
||||
656
xin-dify/data/docker-compose-template.yaml
Normal file
656
xin-dify/data/docker-compose-template.yaml
Normal file
@ -0,0 +1,656 @@
|
||||
x-shared-env: &shared-api-worker-env
|
||||
services:
|
||||
# API service
|
||||
api:
|
||||
image: langgenius/dify-api:1.2.0
|
||||
restart: always
|
||||
environment:
|
||||
# Use the shared environment variables.
|
||||
<<: *shared-api-worker-env
|
||||
# Startup mode, 'api' starts the API server.
|
||||
MODE: api
|
||||
SENTRY_DSN: ${API_SENTRY_DSN:-}
|
||||
SENTRY_TRACES_SAMPLE_RATE: ${API_SENTRY_TRACES_SAMPLE_RATE:-1.0}
|
||||
SENTRY_PROFILES_SAMPLE_RATE: ${API_SENTRY_PROFILES_SAMPLE_RATE:-1.0}
|
||||
PLUGIN_REMOTE_INSTALL_HOST: ${EXPOSE_PLUGIN_DEBUGGING_HOST:-localhost}
|
||||
PLUGIN_REMOTE_INSTALL_PORT: ${EXPOSE_PLUGIN_DEBUGGING_PORT:-5003}
|
||||
PLUGIN_MAX_PACKAGE_SIZE: ${PLUGIN_MAX_PACKAGE_SIZE:-52428800}
|
||||
INNER_API_KEY_FOR_PLUGIN: ${PLUGIN_DIFY_INNER_API_KEY:-QaHbTe77CtuXmsfyhR7+vRjI/+XbV1AaFy691iy+kGDv2Jvy0/eAh8Y1}
|
||||
depends_on:
|
||||
- db
|
||||
- redis
|
||||
volumes:
|
||||
# Mount the storage directory to the container, for storing user files.
|
||||
- ./volumes/app/storage:/app/api/storage
|
||||
networks:
|
||||
- ssrf_proxy_network
|
||||
- default
|
||||
|
||||
# worker service
|
||||
# The Celery worker for processing the queue.
|
||||
worker:
|
||||
image: langgenius/dify-api:1.2.0
|
||||
restart: always
|
||||
environment:
|
||||
# Use the shared environment variables.
|
||||
<<: *shared-api-worker-env
|
||||
# Startup mode, 'worker' starts the Celery worker for processing the queue.
|
||||
MODE: worker
|
||||
SENTRY_DSN: ${API_SENTRY_DSN:-}
|
||||
SENTRY_TRACES_SAMPLE_RATE: ${API_SENTRY_TRACES_SAMPLE_RATE:-1.0}
|
||||
SENTRY_PROFILES_SAMPLE_RATE: ${API_SENTRY_PROFILES_SAMPLE_RATE:-1.0}
|
||||
PLUGIN_MAX_PACKAGE_SIZE: ${PLUGIN_MAX_PACKAGE_SIZE:-52428800}
|
||||
INNER_API_KEY_FOR_PLUGIN: ${PLUGIN_DIFY_INNER_API_KEY:-QaHbTe77CtuXmsfyhR7+vRjI/+XbV1AaFy691iy+kGDv2Jvy0/eAh8Y1}
|
||||
depends_on:
|
||||
- db
|
||||
- redis
|
||||
volumes:
|
||||
# Mount the storage directory to the container, for storing user files.
|
||||
- ./volumes/app/storage:/app/api/storage
|
||||
networks:
|
||||
- ssrf_proxy_network
|
||||
- default
|
||||
|
||||
# Frontend web application.
|
||||
web:
|
||||
image: langgenius/dify-web:1.2.0
|
||||
restart: always
|
||||
environment:
|
||||
CONSOLE_API_URL: ${CONSOLE_API_URL:-}
|
||||
APP_API_URL: ${APP_API_URL:-}
|
||||
SENTRY_DSN: ${WEB_SENTRY_DSN:-}
|
||||
NEXT_TELEMETRY_DISABLED: ${NEXT_TELEMETRY_DISABLED:-0}
|
||||
TEXT_GENERATION_TIMEOUT_MS: ${TEXT_GENERATION_TIMEOUT_MS:-60000}
|
||||
CSP_WHITELIST: ${CSP_WHITELIST:-}
|
||||
MARKETPLACE_API_URL: ${MARKETPLACE_API_URL:-https://marketplace.dify.ai}
|
||||
MARKETPLACE_URL: ${MARKETPLACE_URL:-https://marketplace.dify.ai}
|
||||
TOP_K_MAX_VALUE: ${TOP_K_MAX_VALUE:-}
|
||||
INDEXING_MAX_SEGMENTATION_TOKENS_LENGTH: ${INDEXING_MAX_SEGMENTATION_TOKENS_LENGTH:-}
|
||||
PM2_INSTANCES: ${PM2_INSTANCES:-2}
|
||||
LOOP_NODE_MAX_COUNT: ${LOOP_NODE_MAX_COUNT:-100}
|
||||
MAX_TOOLS_NUM: ${MAX_TOOLS_NUM:-10}
|
||||
MAX_PARALLEL_LIMIT: ${MAX_PARALLEL_LIMIT:-10}
|
||||
MAX_ITERATIONS_NUM: ${MAX_ITERATIONS_NUM:-5}
|
||||
|
||||
# The postgres database.
|
||||
db:
|
||||
image: postgres:15-alpine
|
||||
restart: always
|
||||
environment:
|
||||
PGUSER: ${PGUSER:-postgres}
|
||||
POSTGRES_PASSWORD: ${POSTGRES_PASSWORD:-difyai123456}
|
||||
POSTGRES_DB: ${POSTGRES_DB:-dify}
|
||||
PGDATA: ${PGDATA:-/var/lib/postgresql/data/pgdata}
|
||||
command: >
|
||||
postgres -c 'max_connections=${POSTGRES_MAX_CONNECTIONS:-100}'
|
||||
-c 'shared_buffers=${POSTGRES_SHARED_BUFFERS:-128MB}'
|
||||
-c 'work_mem=${POSTGRES_WORK_MEM:-4MB}'
|
||||
-c 'maintenance_work_mem=${POSTGRES_MAINTENANCE_WORK_MEM:-64MB}'
|
||||
-c 'effective_cache_size=${POSTGRES_EFFECTIVE_CACHE_SIZE:-4096MB}'
|
||||
volumes:
|
||||
- ./volumes/db/data:/var/lib/postgresql/data
|
||||
healthcheck:
|
||||
test: [ 'CMD', 'pg_isready', '-h', 'db', '-U', '${PGUSER:-postgres}', '-d', '${POSTGRES_DB:-dify}' ]
|
||||
interval: 1s
|
||||
timeout: 3s
|
||||
retries: 60
|
||||
|
||||
# The redis cache.
|
||||
redis:
|
||||
image: redis:6-alpine
|
||||
restart: always
|
||||
environment:
|
||||
REDISCLI_AUTH: ${REDIS_PASSWORD:-difyai123456}
|
||||
volumes:
|
||||
# Mount the redis data directory to the container.
|
||||
- ./volumes/redis/data:/data
|
||||
# Set the redis password when startup redis server.
|
||||
command: redis-server --requirepass ${REDIS_PASSWORD:-difyai123456}
|
||||
healthcheck:
|
||||
test: [ 'CMD', 'redis-cli', 'ping' ]
|
||||
|
||||
# The DifySandbox
|
||||
sandbox:
|
||||
image: langgenius/dify-sandbox:0.2.11
|
||||
restart: always
|
||||
environment:
|
||||
# The DifySandbox configurations
|
||||
# Make sure you are changing this key for your deployment with a strong key.
|
||||
# You can generate a strong key using `openssl rand -base64 42`.
|
||||
API_KEY: ${SANDBOX_API_KEY:-dify-sandbox}
|
||||
GIN_MODE: ${SANDBOX_GIN_MODE:-release}
|
||||
WORKER_TIMEOUT: ${SANDBOX_WORKER_TIMEOUT:-15}
|
||||
ENABLE_NETWORK: ${SANDBOX_ENABLE_NETWORK:-true}
|
||||
HTTP_PROXY: ${SANDBOX_HTTP_PROXY:-http://ssrf_proxy:3128}
|
||||
HTTPS_PROXY: ${SANDBOX_HTTPS_PROXY:-http://ssrf_proxy:3128}
|
||||
SANDBOX_PORT: ${SANDBOX_PORT:-8194}
|
||||
volumes:
|
||||
- ./volumes/sandbox/dependencies:/dependencies
|
||||
- ./volumes/sandbox/conf:/conf
|
||||
healthcheck:
|
||||
test: [ 'CMD', 'curl', '-f', 'http://localhost:8194/health' ]
|
||||
networks:
|
||||
- ssrf_proxy_network
|
||||
|
||||
# plugin daemon
|
||||
plugin_daemon:
|
||||
image: langgenius/dify-plugin-daemon:0.0.7-local
|
||||
restart: always
|
||||
environment:
|
||||
# Use the shared environment variables.
|
||||
<<: *shared-api-worker-env
|
||||
DB_DATABASE: ${DB_PLUGIN_DATABASE:-dify_plugin}
|
||||
SERVER_PORT: ${PLUGIN_DAEMON_PORT:-5002}
|
||||
SERVER_KEY: ${PLUGIN_DAEMON_KEY:-lYkiYYT6owG+71oLerGzA7GXCgOT++6ovaezWAjpCjf+Sjc3ZtU+qUEi}
|
||||
MAX_PLUGIN_PACKAGE_SIZE: ${PLUGIN_MAX_PACKAGE_SIZE:-52428800}
|
||||
PPROF_ENABLED: ${PLUGIN_PPROF_ENABLED:-false}
|
||||
DIFY_INNER_API_URL: ${PLUGIN_DIFY_INNER_API_URL:-http://api:5001}
|
||||
DIFY_INNER_API_KEY: ${PLUGIN_DIFY_INNER_API_KEY:-QaHbTe77CtuXmsfyhR7+vRjI/+XbV1AaFy691iy+kGDv2Jvy0/eAh8Y1}
|
||||
PLUGIN_REMOTE_INSTALLING_HOST: ${PLUGIN_DEBUGGING_HOST:-0.0.0.0}
|
||||
PLUGIN_REMOTE_INSTALLING_PORT: ${PLUGIN_DEBUGGING_PORT:-5003}
|
||||
PLUGIN_WORKING_PATH: ${PLUGIN_WORKING_PATH:-/app/storage/cwd}
|
||||
FORCE_VERIFYING_SIGNATURE: ${FORCE_VERIFYING_SIGNATURE:-true}
|
||||
PYTHON_ENV_INIT_TIMEOUT: ${PLUGIN_PYTHON_ENV_INIT_TIMEOUT:-120}
|
||||
PLUGIN_MAX_EXECUTION_TIMEOUT: ${PLUGIN_MAX_EXECUTION_TIMEOUT:-600}
|
||||
PIP_MIRROR_URL: ${PIP_MIRROR_URL:-}
|
||||
PLUGIN_STORAGE_TYPE: ${PLUGIN_STORAGE_TYPE:-local}
|
||||
PLUGIN_STORAGE_LOCAL_ROOT: ${PLUGIN_STORAGE_LOCAL_ROOT:-/app/storage}
|
||||
PLUGIN_INSTALLED_PATH: ${PLUGIN_INSTALLED_PATH:-plugin}
|
||||
PLUGIN_PACKAGE_CACHE_PATH: ${PLUGIN_PACKAGE_CACHE_PATH:-plugin_packages}
|
||||
PLUGIN_MEDIA_CACHE_PATH: ${PLUGIN_MEDIA_CACHE_PATH:-assets}
|
||||
PLUGIN_STORAGE_OSS_BUCKET: ${PLUGIN_STORAGE_OSS_BUCKET:-}
|
||||
S3_USE_AWS_MANAGED_IAM: ${PLUGIN_S3_USE_AWS_MANAGED_IAM:-false}
|
||||
S3_ENDPOINT: ${PLUGIN_S3_ENDPOINT:-}
|
||||
S3_USE_PATH_STYLE: ${PLUGIN_S3_USE_PATH_STYLE:-false}
|
||||
AWS_ACCESS_KEY: ${PLUGIN_AWS_ACCESS_KEY:-}
|
||||
AWS_SECRET_KEY: ${PLUGIN_AWS_SECRET_KEY:-}
|
||||
AWS_REGION: ${PLUGIN_AWS_REGION:-}
|
||||
AZURE_BLOB_STORAGE_CONNECTION_STRING: ${PLUGIN_AZURE_BLOB_STORAGE_CONNECTION_STRING:-}
|
||||
AZURE_BLOB_STORAGE_CONTAINER_NAME: ${PLUGIN_AZURE_BLOB_STORAGE_CONTAINER_NAME:-}
|
||||
TENCENT_COS_SECRET_KEY: ${PLUGIN_TENCENT_COS_SECRET_KEY:-}
|
||||
TENCENT_COS_SECRET_ID: ${PLUGIN_TENCENT_COS_SECRET_ID:-}
|
||||
TENCENT_COS_REGION: ${PLUGIN_TENCENT_COS_REGION:-}
|
||||
ports:
|
||||
- "${EXPOSE_PLUGIN_DEBUGGING_PORT:-5003}:${PLUGIN_DEBUGGING_PORT:-5003}"
|
||||
volumes:
|
||||
- ./volumes/plugin_daemon:/app/storage
|
||||
depends_on:
|
||||
db:
|
||||
condition: service_healthy
|
||||
|
||||
# ssrf_proxy server
|
||||
# for more information, please refer to
|
||||
# https://docs.dify.ai/learn-more/faq/install-faq#id-18.-why-is-ssrf_proxy-needed
|
||||
ssrf_proxy:
|
||||
image: ubuntu/squid:latest
|
||||
restart: always
|
||||
volumes:
|
||||
- ./ssrf_proxy/squid.conf.template:/etc/squid/squid.conf.template
|
||||
- ./ssrf_proxy/docker-entrypoint.sh:/docker-entrypoint-mount.sh
|
||||
entrypoint: [ 'sh', '-c', "cp /docker-entrypoint-mount.sh /docker-entrypoint.sh && sed -i 's/\r$$//' /docker-entrypoint.sh && chmod +x /docker-entrypoint.sh && /docker-entrypoint.sh" ]
|
||||
environment:
|
||||
# pls clearly modify the squid env vars to fit your network environment.
|
||||
HTTP_PORT: ${SSRF_HTTP_PORT:-3128}
|
||||
COREDUMP_DIR: ${SSRF_COREDUMP_DIR:-/var/spool/squid}
|
||||
REVERSE_PROXY_PORT: ${SSRF_REVERSE_PROXY_PORT:-8194}
|
||||
SANDBOX_HOST: ${SSRF_SANDBOX_HOST:-sandbox}
|
||||
SANDBOX_PORT: ${SANDBOX_PORT:-8194}
|
||||
networks:
|
||||
- ssrf_proxy_network
|
||||
- default
|
||||
|
||||
# Certbot service
|
||||
# use `docker-compose --profile certbot up` to start the certbot service.
|
||||
certbot:
|
||||
image: certbot/certbot
|
||||
profiles:
|
||||
- certbot
|
||||
volumes:
|
||||
- ./volumes/certbot/conf:/etc/letsencrypt
|
||||
- ./volumes/certbot/www:/var/www/html
|
||||
- ./volumes/certbot/logs:/var/log/letsencrypt
|
||||
- ./volumes/certbot/conf/live:/etc/letsencrypt/live
|
||||
- ./certbot/update-cert.template.txt:/update-cert.template.txt
|
||||
- ./certbot/docker-entrypoint.sh:/docker-entrypoint.sh
|
||||
environment:
|
||||
- CERTBOT_EMAIL=${CERTBOT_EMAIL}
|
||||
- CERTBOT_DOMAIN=${CERTBOT_DOMAIN}
|
||||
- CERTBOT_OPTIONS=${CERTBOT_OPTIONS:-}
|
||||
entrypoint: [ '/docker-entrypoint.sh' ]
|
||||
command: [ 'tail', '-f', '/dev/null' ]
|
||||
|
||||
# The nginx reverse proxy.
|
||||
# used for reverse proxying the API service and Web service.
|
||||
nginx:
|
||||
image: nginx:latest
|
||||
restart: always
|
||||
volumes:
|
||||
- ./nginx/nginx.conf.template:/etc/nginx/nginx.conf.template
|
||||
- ./nginx/proxy.conf.template:/etc/nginx/proxy.conf.template
|
||||
- ./nginx/https.conf.template:/etc/nginx/https.conf.template
|
||||
- ./nginx/conf.d:/etc/nginx/conf.d
|
||||
- ./nginx/docker-entrypoint.sh:/docker-entrypoint-mount.sh
|
||||
- ./nginx/ssl:/etc/ssl # cert dir (legacy)
|
||||
- ./volumes/certbot/conf/live:/etc/letsencrypt/live # cert dir (with certbot container)
|
||||
- ./volumes/certbot/conf:/etc/letsencrypt
|
||||
- ./volumes/certbot/www:/var/www/html
|
||||
entrypoint: [ 'sh', '-c', "cp /docker-entrypoint-mount.sh /docker-entrypoint.sh && sed -i 's/\r$$//' /docker-entrypoint.sh && chmod +x /docker-entrypoint.sh && /docker-entrypoint.sh" ]
|
||||
environment:
|
||||
NGINX_SERVER_NAME: ${NGINX_SERVER_NAME:-_}
|
||||
NGINX_HTTPS_ENABLED: ${NGINX_HTTPS_ENABLED:-false}
|
||||
NGINX_SSL_PORT: ${NGINX_SSL_PORT:-443}
|
||||
NGINX_PORT: ${NGINX_PORT:-80}
|
||||
# You're required to add your own SSL certificates/keys to the `./nginx/ssl` directory
|
||||
# and modify the env vars below in .env if HTTPS_ENABLED is true.
|
||||
NGINX_SSL_CERT_FILENAME: ${NGINX_SSL_CERT_FILENAME:-dify.crt}
|
||||
NGINX_SSL_CERT_KEY_FILENAME: ${NGINX_SSL_CERT_KEY_FILENAME:-dify.key}
|
||||
NGINX_SSL_PROTOCOLS: ${NGINX_SSL_PROTOCOLS:-TLSv1.1 TLSv1.2 TLSv1.3}
|
||||
NGINX_WORKER_PROCESSES: ${NGINX_WORKER_PROCESSES:-auto}
|
||||
NGINX_CLIENT_MAX_BODY_SIZE: ${NGINX_CLIENT_MAX_BODY_SIZE:-15M}
|
||||
NGINX_KEEPALIVE_TIMEOUT: ${NGINX_KEEPALIVE_TIMEOUT:-65}
|
||||
NGINX_PROXY_READ_TIMEOUT: ${NGINX_PROXY_READ_TIMEOUT:-3600s}
|
||||
NGINX_PROXY_SEND_TIMEOUT: ${NGINX_PROXY_SEND_TIMEOUT:-3600s}
|
||||
NGINX_ENABLE_CERTBOT_CHALLENGE: ${NGINX_ENABLE_CERTBOT_CHALLENGE:-false}
|
||||
CERTBOT_DOMAIN: ${CERTBOT_DOMAIN:-}
|
||||
depends_on:
|
||||
- api
|
||||
- web
|
||||
ports:
|
||||
- '${EXPOSE_NGINX_PORT:-80}:${NGINX_PORT:-80}'
|
||||
- '${EXPOSE_NGINX_SSL_PORT:-443}:${NGINX_SSL_PORT:-443}'
|
||||
|
||||
# The Weaviate vector store.
|
||||
weaviate:
|
||||
image: semitechnologies/weaviate:1.19.0
|
||||
profiles:
|
||||
- ''
|
||||
- weaviate
|
||||
restart: always
|
||||
volumes:
|
||||
# Mount the Weaviate data directory to the con tainer.
|
||||
- ./volumes/weaviate:/var/lib/weaviate
|
||||
environment:
|
||||
# The Weaviate configurations
|
||||
# You can refer to the [Weaviate](https://weaviate.io/developers/weaviate/config-refs/env-vars) documentation for more information.
|
||||
PERSISTENCE_DATA_PATH: ${WEAVIATE_PERSISTENCE_DATA_PATH:-/var/lib/weaviate}
|
||||
QUERY_DEFAULTS_LIMIT: ${WEAVIATE_QUERY_DEFAULTS_LIMIT:-25}
|
||||
AUTHENTICATION_ANONYMOUS_ACCESS_ENABLED: ${WEAVIATE_AUTHENTICATION_ANONYMOUS_ACCESS_ENABLED:-false}
|
||||
DEFAULT_VECTORIZER_MODULE: ${WEAVIATE_DEFAULT_VECTORIZER_MODULE:-none}
|
||||
CLUSTER_HOSTNAME: ${WEAVIATE_CLUSTER_HOSTNAME:-node1}
|
||||
AUTHENTICATION_APIKEY_ENABLED: ${WEAVIATE_AUTHENTICATION_APIKEY_ENABLED:-true}
|
||||
AUTHENTICATION_APIKEY_ALLOWED_KEYS: ${WEAVIATE_AUTHENTICATION_APIKEY_ALLOWED_KEYS:-WVF5YThaHlkYwhGUSmCRgsX3tD5ngdN8pkih}
|
||||
AUTHENTICATION_APIKEY_USERS: ${WEAVIATE_AUTHENTICATION_APIKEY_USERS:-hello@dify.ai}
|
||||
AUTHORIZATION_ADMINLIST_ENABLED: ${WEAVIATE_AUTHORIZATION_ADMINLIST_ENABLED:-true}
|
||||
AUTHORIZATION_ADMINLIST_USERS: ${WEAVIATE_AUTHORIZATION_ADMINLIST_USERS:-hello@dify.ai}
|
||||
|
||||
# Qdrant vector store.
|
||||
# (if used, you need to set VECTOR_STORE to qdrant in the api & worker service.)
|
||||
qdrant:
|
||||
image: langgenius/qdrant:v1.7.3
|
||||
profiles:
|
||||
- qdrant
|
||||
restart: always
|
||||
volumes:
|
||||
- ./volumes/qdrant:/qdrant/storage
|
||||
environment:
|
||||
QDRANT_API_KEY: ${QDRANT_API_KEY:-difyai123456}
|
||||
|
||||
# The Couchbase vector store.
|
||||
couchbase-server:
|
||||
build: ./couchbase-server
|
||||
profiles:
|
||||
- couchbase
|
||||
restart: always
|
||||
environment:
|
||||
- CLUSTER_NAME=dify_search
|
||||
- COUCHBASE_ADMINISTRATOR_USERNAME=${COUCHBASE_USER:-Administrator}
|
||||
- COUCHBASE_ADMINISTRATOR_PASSWORD=${COUCHBASE_PASSWORD:-password}
|
||||
- COUCHBASE_BUCKET=${COUCHBASE_BUCKET_NAME:-Embeddings}
|
||||
- COUCHBASE_BUCKET_RAMSIZE=512
|
||||
- COUCHBASE_RAM_SIZE=2048
|
||||
- COUCHBASE_EVENTING_RAM_SIZE=512
|
||||
- COUCHBASE_INDEX_RAM_SIZE=512
|
||||
- COUCHBASE_FTS_RAM_SIZE=1024
|
||||
hostname: couchbase-server
|
||||
container_name: couchbase-server
|
||||
working_dir: /opt/couchbase
|
||||
stdin_open: true
|
||||
tty: true
|
||||
entrypoint: [ "" ]
|
||||
command: sh -c "/opt/couchbase/init/init-cbserver.sh"
|
||||
volumes:
|
||||
- ./volumes/couchbase/data:/opt/couchbase/var/lib/couchbase/data
|
||||
healthcheck:
|
||||
# ensure bucket was created before proceeding
|
||||
test: [ "CMD-SHELL", "curl -s -f -u Administrator:password http://localhost:8091/pools/default/buckets | grep -q '\\[{' || exit 1" ]
|
||||
interval: 10s
|
||||
retries: 10
|
||||
start_period: 30s
|
||||
timeout: 10s
|
||||
|
||||
# The pgvector vector database.
|
||||
pgvector:
|
||||
image: pgvector/pgvector:pg16
|
||||
profiles:
|
||||
- pgvector
|
||||
restart: always
|
||||
environment:
|
||||
PGUSER: ${PGVECTOR_PGUSER:-postgres}
|
||||
# The password for the default postgres user.
|
||||
POSTGRES_PASSWORD: ${PGVECTOR_POSTGRES_PASSWORD:-difyai123456}
|
||||
# The name of the default postgres database.
|
||||
POSTGRES_DB: ${PGVECTOR_POSTGRES_DB:-dify}
|
||||
# postgres data directory
|
||||
PGDATA: ${PGVECTOR_PGDATA:-/var/lib/postgresql/data/pgdata}
|
||||
# pg_bigm module for full text search
|
||||
PG_BIGM: ${PGVECTOR_PG_BIGM:-false}
|
||||
PG_BIGM_VERSION: ${PGVECTOR_PG_BIGM_VERSION:-1.2-20240606}
|
||||
volumes:
|
||||
- ./volumes/pgvector/data:/var/lib/postgresql/data
|
||||
- ./pgvector/docker-entrypoint.sh:/docker-entrypoint.sh
|
||||
entrypoint: [ '/docker-entrypoint.sh' ]
|
||||
healthcheck:
|
||||
test: [ 'CMD', 'pg_isready' ]
|
||||
interval: 1s
|
||||
timeout: 3s
|
||||
retries: 30
|
||||
|
||||
# pgvecto-rs vector store
|
||||
pgvecto-rs:
|
||||
image: tensorchord/pgvecto-rs:pg16-v0.3.0
|
||||
profiles:
|
||||
- pgvecto-rs
|
||||
restart: always
|
||||
environment:
|
||||
PGUSER: ${PGVECTOR_PGUSER:-postgres}
|
||||
# The password for the default postgres user.
|
||||
POSTGRES_PASSWORD: ${PGVECTOR_POSTGRES_PASSWORD:-difyai123456}
|
||||
# The name of the default postgres database.
|
||||
POSTGRES_DB: ${PGVECTOR_POSTGRES_DB:-dify}
|
||||
# postgres data directory
|
||||
PGDATA: ${PGVECTOR_PGDATA:-/var/lib/postgresql/data/pgdata}
|
||||
volumes:
|
||||
- ./volumes/pgvecto_rs/data:/var/lib/postgresql/data
|
||||
healthcheck:
|
||||
test: [ 'CMD', 'pg_isready' ]
|
||||
interval: 1s
|
||||
timeout: 3s
|
||||
retries: 30
|
||||
|
||||
# Chroma vector database
|
||||
chroma:
|
||||
image: ghcr.io/chroma-core/chroma:0.5.20
|
||||
profiles:
|
||||
- chroma
|
||||
restart: always
|
||||
volumes:
|
||||
- ./volumes/chroma:/chroma/chroma
|
||||
environment:
|
||||
CHROMA_SERVER_AUTHN_CREDENTIALS: ${CHROMA_SERVER_AUTHN_CREDENTIALS:-difyai123456}
|
||||
CHROMA_SERVER_AUTHN_PROVIDER: ${CHROMA_SERVER_AUTHN_PROVIDER:-chromadb.auth.token_authn.TokenAuthenticationServerProvider}
|
||||
IS_PERSISTENT: ${CHROMA_IS_PERSISTENT:-TRUE}
|
||||
|
||||
# OceanBase vector database
|
||||
oceanbase:
|
||||
image: oceanbase/oceanbase-ce:4.3.5.1-101000042025031818
|
||||
container_name: oceanbase
|
||||
profiles:
|
||||
- oceanbase
|
||||
restart: always
|
||||
volumes:
|
||||
- ./volumes/oceanbase/data:/root/ob
|
||||
- ./volumes/oceanbase/conf:/root/.obd/cluster
|
||||
- ./volumes/oceanbase/init.d:/root/boot/init.d
|
||||
environment:
|
||||
OB_MEMORY_LIMIT: ${OCEANBASE_MEMORY_LIMIT:-6G}
|
||||
OB_SYS_PASSWORD: ${OCEANBASE_VECTOR_PASSWORD:-difyai123456}
|
||||
OB_TENANT_PASSWORD: ${OCEANBASE_VECTOR_PASSWORD:-difyai123456}
|
||||
OB_CLUSTER_NAME: ${OCEANBASE_CLUSTER_NAME:-difyai}
|
||||
MODE: MINI
|
||||
ports:
|
||||
- "${OCEANBASE_VECTOR_PORT:-2881}:2881"
|
||||
|
||||
# Oracle vector database
|
||||
oracle:
|
||||
image: container-registry.oracle.com/database/free:latest
|
||||
profiles:
|
||||
- oracle
|
||||
restart: always
|
||||
volumes:
|
||||
- source: oradata
|
||||
type: volume
|
||||
target: /opt/oracle/oradata
|
||||
- ./startupscripts:/opt/oracle/scripts/startup
|
||||
environment:
|
||||
ORACLE_PWD: ${ORACLE_PWD:-Dify123456}
|
||||
ORACLE_CHARACTERSET: ${ORACLE_CHARACTERSET:-AL32UTF8}
|
||||
|
||||
# Milvus vector database services
|
||||
etcd:
|
||||
container_name: milvus-etcd
|
||||
image: quay.io/coreos/etcd:v3.5.5
|
||||
profiles:
|
||||
- milvus
|
||||
environment:
|
||||
ETCD_AUTO_COMPACTION_MODE: ${ETCD_AUTO_COMPACTION_MODE:-revision}
|
||||
ETCD_AUTO_COMPACTION_RETENTION: ${ETCD_AUTO_COMPACTION_RETENTION:-1000}
|
||||
ETCD_QUOTA_BACKEND_BYTES: ${ETCD_QUOTA_BACKEND_BYTES:-4294967296}
|
||||
ETCD_SNAPSHOT_COUNT: ${ETCD_SNAPSHOT_COUNT:-50000}
|
||||
volumes:
|
||||
- ./volumes/milvus/etcd:/etcd
|
||||
command: etcd -advertise-client-urls=http://127.0.0.1:2379 -listen-client-urls http://0.0.0.0:2379 --data-dir /etcd
|
||||
healthcheck:
|
||||
test: [ 'CMD', 'etcdctl', 'endpoint', 'health' ]
|
||||
interval: 30s
|
||||
timeout: 20s
|
||||
retries: 3
|
||||
networks:
|
||||
- milvus
|
||||
|
||||
minio:
|
||||
container_name: milvus-minio
|
||||
image: minio/minio:RELEASE.2023-03-20T20-16-18Z
|
||||
profiles:
|
||||
- milvus
|
||||
environment:
|
||||
MINIO_ACCESS_KEY: ${MINIO_ACCESS_KEY:-minioadmin}
|
||||
MINIO_SECRET_KEY: ${MINIO_SECRET_KEY:-minioadmin}
|
||||
volumes:
|
||||
- ./volumes/milvus/minio:/minio_data
|
||||
command: minio server /minio_data --console-address ":9001"
|
||||
healthcheck:
|
||||
test: [ 'CMD', 'curl', '-f', 'http://localhost:9000/minio/health/live' ]
|
||||
interval: 30s
|
||||
timeout: 20s
|
||||
retries: 3
|
||||
networks:
|
||||
- milvus
|
||||
|
||||
milvus-standalone:
|
||||
container_name: milvus-standalone
|
||||
image: milvusdb/milvus:v2.5.0-beta
|
||||
profiles:
|
||||
- milvus
|
||||
command: [ 'milvus', 'run', 'standalone' ]
|
||||
environment:
|
||||
ETCD_ENDPOINTS: ${ETCD_ENDPOINTS:-etcd:2379}
|
||||
MINIO_ADDRESS: ${MINIO_ADDRESS:-minio:9000}
|
||||
common.security.authorizationEnabled: ${MILVUS_AUTHORIZATION_ENABLED:-true}
|
||||
volumes:
|
||||
- ./volumes/milvus/milvus:/var/lib/milvus
|
||||
healthcheck:
|
||||
test: [ 'CMD', 'curl', '-f', 'http://localhost:9091/healthz' ]
|
||||
interval: 30s
|
||||
start_period: 90s
|
||||
timeout: 20s
|
||||
retries: 3
|
||||
depends_on:
|
||||
- etcd
|
||||
- minio
|
||||
ports:
|
||||
- 19530:19530
|
||||
- 9091:9091
|
||||
networks:
|
||||
- milvus
|
||||
|
||||
# Opensearch vector database
|
||||
opensearch:
|
||||
container_name: opensearch
|
||||
image: opensearchproject/opensearch:latest
|
||||
profiles:
|
||||
- opensearch
|
||||
environment:
|
||||
discovery.type: ${OPENSEARCH_DISCOVERY_TYPE:-single-node}
|
||||
bootstrap.memory_lock: ${OPENSEARCH_BOOTSTRAP_MEMORY_LOCK:-true}
|
||||
OPENSEARCH_JAVA_OPTS: -Xms${OPENSEARCH_JAVA_OPTS_MIN:-512m} -Xmx${OPENSEARCH_JAVA_OPTS_MAX:-1024m}
|
||||
OPENSEARCH_INITIAL_ADMIN_PASSWORD: ${OPENSEARCH_INITIAL_ADMIN_PASSWORD:-Qazwsxedc!@#123}
|
||||
ulimits:
|
||||
memlock:
|
||||
soft: ${OPENSEARCH_MEMLOCK_SOFT:--1}
|
||||
hard: ${OPENSEARCH_MEMLOCK_HARD:--1}
|
||||
nofile:
|
||||
soft: ${OPENSEARCH_NOFILE_SOFT:-65536}
|
||||
hard: ${OPENSEARCH_NOFILE_HARD:-65536}
|
||||
volumes:
|
||||
- ./volumes/opensearch/data:/usr/share/opensearch/data
|
||||
networks:
|
||||
- opensearch-net
|
||||
|
||||
opensearch-dashboards:
|
||||
container_name: opensearch-dashboards
|
||||
image: opensearchproject/opensearch-dashboards:latest
|
||||
profiles:
|
||||
- opensearch
|
||||
environment:
|
||||
OPENSEARCH_HOSTS: '["https://opensearch:9200"]'
|
||||
volumes:
|
||||
- ./volumes/opensearch/opensearch_dashboards.yml:/usr/share/opensearch-dashboards/config/opensearch_dashboards.yml
|
||||
networks:
|
||||
- opensearch-net
|
||||
depends_on:
|
||||
- opensearch
|
||||
|
||||
# opengauss vector database.
|
||||
opengauss:
|
||||
image: opengauss/opengauss:7.0.0-RC1
|
||||
profiles:
|
||||
- opengauss
|
||||
privileged: true
|
||||
restart: always
|
||||
environment:
|
||||
GS_USERNAME: ${OPENGAUSS_USER:-postgres}
|
||||
GS_PASSWORD: ${OPENGAUSS_PASSWORD:-Dify@123}
|
||||
GS_PORT: ${OPENGAUSS_PORT:-6600}
|
||||
GS_DB: ${OPENGAUSS_DATABASE:-dify}
|
||||
volumes:
|
||||
- ./volumes/opengauss/data:/var/lib/opengauss/data
|
||||
healthcheck:
|
||||
test: ["CMD-SHELL", "netstat -lntp | grep tcp6 > /dev/null 2>&1"]
|
||||
interval: 10s
|
||||
timeout: 10s
|
||||
retries: 10
|
||||
ports:
|
||||
- ${OPENGAUSS_PORT:-6600}:${OPENGAUSS_PORT:-6600}
|
||||
|
||||
# MyScale vector database
|
||||
myscale:
|
||||
container_name: myscale
|
||||
image: myscale/myscaledb:1.6.4
|
||||
profiles:
|
||||
- myscale
|
||||
restart: always
|
||||
tty: true
|
||||
volumes:
|
||||
- ./volumes/myscale/data:/var/lib/clickhouse
|
||||
- ./volumes/myscale/log:/var/log/clickhouse-server
|
||||
- ./volumes/myscale/config/users.d/custom_users_config.xml:/etc/clickhouse-server/users.d/custom_users_config.xml
|
||||
ports:
|
||||
- ${MYSCALE_PORT:-8123}:${MYSCALE_PORT:-8123}
|
||||
|
||||
# https://www.elastic.co/guide/en/elasticsearch/reference/current/settings.html
|
||||
# https://www.elastic.co/guide/en/elasticsearch/reference/current/docker.html#docker-prod-prerequisites
|
||||
elasticsearch:
|
||||
image: docker.elastic.co/elasticsearch/elasticsearch:8.14.3
|
||||
container_name: elasticsearch
|
||||
profiles:
|
||||
- elasticsearch
|
||||
- elasticsearch-ja
|
||||
restart: always
|
||||
volumes:
|
||||
- ./elasticsearch/docker-entrypoint.sh:/docker-entrypoint-mount.sh
|
||||
- dify_es01_data:/usr/share/elasticsearch/data
|
||||
environment:
|
||||
ELASTIC_PASSWORD: ${ELASTICSEARCH_PASSWORD:-elastic}
|
||||
VECTOR_STORE: ${VECTOR_STORE:-}
|
||||
cluster.name: dify-es-cluster
|
||||
node.name: dify-es0
|
||||
discovery.type: single-node
|
||||
xpack.license.self_generated.type: basic
|
||||
xpack.security.enabled: 'true'
|
||||
xpack.security.enrollment.enabled: 'false'
|
||||
xpack.security.http.ssl.enabled: 'false'
|
||||
ports:
|
||||
- ${ELASTICSEARCH_PORT:-9200}:9200
|
||||
deploy:
|
||||
resources:
|
||||
limits:
|
||||
memory: 2g
|
||||
entrypoint: [ 'sh', '-c', "sh /docker-entrypoint-mount.sh" ]
|
||||
healthcheck:
|
||||
test: [ 'CMD', 'curl', '-s', 'http://localhost:9200/_cluster/health?pretty' ]
|
||||
interval: 30s
|
||||
timeout: 10s
|
||||
retries: 50
|
||||
|
||||
# https://www.elastic.co/guide/en/kibana/current/docker.html
|
||||
# https://www.elastic.co/guide/en/kibana/current/settings.html
|
||||
kibana:
|
||||
image: docker.elastic.co/kibana/kibana:8.14.3
|
||||
container_name: kibana
|
||||
profiles:
|
||||
- elasticsearch
|
||||
depends_on:
|
||||
- elasticsearch
|
||||
restart: always
|
||||
environment:
|
||||
XPACK_ENCRYPTEDSAVEDOBJECTS_ENCRYPTIONKEY: d1a66dfd-c4d3-4a0a-8290-2abcb83ab3aa
|
||||
NO_PROXY: localhost,127.0.0.1,elasticsearch,kibana
|
||||
XPACK_SECURITY_ENABLED: 'true'
|
||||
XPACK_SECURITY_ENROLLMENT_ENABLED: 'false'
|
||||
XPACK_SECURITY_HTTP_SSL_ENABLED: 'false'
|
||||
XPACK_FLEET_ISAIRGAPPED: 'true'
|
||||
I18N_LOCALE: zh-CN
|
||||
SERVER_PORT: '5601'
|
||||
ELASTICSEARCH_HOSTS: http://elasticsearch:9200
|
||||
ports:
|
||||
- ${KIBANA_PORT:-5601}:5601
|
||||
healthcheck:
|
||||
test: [ 'CMD-SHELL', 'curl -s http://localhost:5601 >/dev/null || exit 1' ]
|
||||
interval: 30s
|
||||
timeout: 10s
|
||||
retries: 3
|
||||
|
||||
# unstructured .
|
||||
# (if used, you need to set ETL_TYPE to Unstructured in the api & worker service.)
|
||||
unstructured:
|
||||
image: downloads.unstructured.io/unstructured-io/unstructured-api:latest
|
||||
profiles:
|
||||
- unstructured
|
||||
restart: always
|
||||
volumes:
|
||||
- ./volumes/unstructured:/app/data
|
||||
|
||||
networks:
|
||||
# create a network between sandbox, api and ssrf_proxy, and can not access outside.
|
||||
ssrf_proxy_network:
|
||||
driver: bridge
|
||||
internal: true
|
||||
milvus:
|
||||
driver: bridge
|
||||
opensearch-net:
|
||||
driver: bridge
|
||||
internal: true
|
||||
|
||||
volumes:
|
||||
oradata:
|
||||
dify_es01_data:
|
||||
25
xin-dify/data/elasticsearch/docker-entrypoint.sh
Executable file
25
xin-dify/data/elasticsearch/docker-entrypoint.sh
Executable file
@ -0,0 +1,25 @@
|
||||
#!/bin/bash
|
||||
|
||||
set -e
|
||||
|
||||
if [ "${VECTOR_STORE}" = "elasticsearch-ja" ]; then
|
||||
# Check if the ICU tokenizer plugin is installed
|
||||
if ! /usr/share/elasticsearch/bin/elasticsearch-plugin list | grep -q analysis-icu; then
|
||||
printf '%s\n' "Installing the ICU tokenizer plugin"
|
||||
if ! /usr/share/elasticsearch/bin/elasticsearch-plugin install analysis-icu; then
|
||||
printf '%s\n' "Failed to install the ICU tokenizer plugin"
|
||||
exit 1
|
||||
fi
|
||||
fi
|
||||
# Check if the Japanese language analyzer plugin is installed
|
||||
if ! /usr/share/elasticsearch/bin/elasticsearch-plugin list | grep -q analysis-kuromoji; then
|
||||
printf '%s\n' "Installing the Japanese language analyzer plugin"
|
||||
if ! /usr/share/elasticsearch/bin/elasticsearch-plugin install analysis-kuromoji; then
|
||||
printf '%s\n' "Failed to install the Japanese language analyzer plugin"
|
||||
exit 1
|
||||
fi
|
||||
fi
|
||||
fi
|
||||
|
||||
# Run the original entrypoint script
|
||||
exec /bin/tini -- /usr/local/bin/docker-entrypoint.sh
|
||||
48
xin-dify/data/nginx/conf.d/default.conf.template
Normal file
48
xin-dify/data/nginx/conf.d/default.conf.template
Normal file
@ -0,0 +1,48 @@
|
||||
# Please do not directly edit this file. Instead, modify the .env variables related to NGINX configuration.
|
||||
|
||||
server {
|
||||
listen ${NGINX_PORT};
|
||||
server_name ${NGINX_SERVER_NAME};
|
||||
|
||||
location /console/api {
|
||||
proxy_pass http://api:5001;
|
||||
include proxy.conf;
|
||||
}
|
||||
|
||||
location /api {
|
||||
proxy_pass http://api:5001;
|
||||
include proxy.conf;
|
||||
}
|
||||
|
||||
location /v1 {
|
||||
proxy_pass http://api:5001;
|
||||
include proxy.conf;
|
||||
}
|
||||
|
||||
location /files {
|
||||
proxy_pass http://api:5001;
|
||||
include proxy.conf;
|
||||
}
|
||||
|
||||
location /explore {
|
||||
proxy_pass http://web:3000;
|
||||
include proxy.conf;
|
||||
}
|
||||
|
||||
location /e/ {
|
||||
proxy_pass http://plugin_daemon:5002;
|
||||
proxy_set_header Dify-Hook-Url $scheme://$host$request_uri;
|
||||
include proxy.conf;
|
||||
}
|
||||
|
||||
location / {
|
||||
proxy_pass http://web:3000;
|
||||
include proxy.conf;
|
||||
}
|
||||
|
||||
# placeholder for acme challenge location
|
||||
${ACME_CHALLENGE_LOCATION}
|
||||
|
||||
# placeholder for https config defined in https.conf.template
|
||||
${HTTPS_CONFIG}
|
||||
}
|
||||
42
xin-dify/data/nginx/docker-entrypoint.sh
Executable file
42
xin-dify/data/nginx/docker-entrypoint.sh
Executable file
@ -0,0 +1,42 @@
|
||||
#!/bin/bash
|
||||
|
||||
HTTPS_CONFIG=''
|
||||
|
||||
if [ "${NGINX_HTTPS_ENABLED}" = "true" ]; then
|
||||
# Check if the certificate and key files for the specified domain exist
|
||||
if [ -n "${CERTBOT_DOMAIN}" ] && \
|
||||
[ -f "/etc/letsencrypt/live/${CERTBOT_DOMAIN}/${NGINX_SSL_CERT_FILENAME}" ] && \
|
||||
[ -f "/etc/letsencrypt/live/${CERTBOT_DOMAIN}/${NGINX_SSL_CERT_KEY_FILENAME}" ]; then
|
||||
SSL_CERTIFICATE_PATH="/etc/letsencrypt/live/${CERTBOT_DOMAIN}/${NGINX_SSL_CERT_FILENAME}"
|
||||
SSL_CERTIFICATE_KEY_PATH="/etc/letsencrypt/live/${CERTBOT_DOMAIN}/${NGINX_SSL_CERT_KEY_FILENAME}"
|
||||
else
|
||||
SSL_CERTIFICATE_PATH="/etc/ssl/${NGINX_SSL_CERT_FILENAME}"
|
||||
SSL_CERTIFICATE_KEY_PATH="/etc/ssl/${NGINX_SSL_CERT_KEY_FILENAME}"
|
||||
fi
|
||||
export SSL_CERTIFICATE_PATH
|
||||
export SSL_CERTIFICATE_KEY_PATH
|
||||
|
||||
# set the HTTPS_CONFIG environment variable to the content of the https.conf.template
|
||||
HTTPS_CONFIG=$(envsubst < /etc/nginx/https.conf.template)
|
||||
export HTTPS_CONFIG
|
||||
# Substitute the HTTPS_CONFIG in the default.conf.template with content from https.conf.template
|
||||
envsubst '${HTTPS_CONFIG}' < /etc/nginx/conf.d/default.conf.template > /etc/nginx/conf.d/default.conf
|
||||
fi
|
||||
export HTTPS_CONFIG
|
||||
|
||||
if [ "${NGINX_ENABLE_CERTBOT_CHALLENGE}" = "true" ]; then
|
||||
ACME_CHALLENGE_LOCATION='location /.well-known/acme-challenge/ { root /var/www/html; }'
|
||||
else
|
||||
ACME_CHALLENGE_LOCATION=''
|
||||
fi
|
||||
export ACME_CHALLENGE_LOCATION
|
||||
|
||||
env_vars=$(printenv | cut -d= -f1 | sed 's/^/$/g' | paste -sd, -)
|
||||
|
||||
envsubst "$env_vars" < /etc/nginx/nginx.conf.template > /etc/nginx/nginx.conf
|
||||
envsubst "$env_vars" < /etc/nginx/proxy.conf.template > /etc/nginx/proxy.conf
|
||||
|
||||
envsubst "$env_vars" < /etc/nginx/conf.d/default.conf.template > /etc/nginx/conf.d/default.conf
|
||||
|
||||
# Start Nginx using the default entrypoint
|
||||
exec nginx -g 'daemon off;'
|
||||
9
xin-dify/data/nginx/https.conf.template
Normal file
9
xin-dify/data/nginx/https.conf.template
Normal file
@ -0,0 +1,9 @@
|
||||
# Please do not directly edit this file. Instead, modify the .env variables related to NGINX configuration.
|
||||
|
||||
listen ${NGINX_SSL_PORT} ssl;
|
||||
ssl_certificate ${SSL_CERTIFICATE_PATH};
|
||||
ssl_certificate_key ${SSL_CERTIFICATE_KEY_PATH};
|
||||
ssl_protocols ${NGINX_SSL_PROTOCOLS};
|
||||
ssl_prefer_server_ciphers on;
|
||||
ssl_session_cache shared:SSL:10m;
|
||||
ssl_session_timeout 10m;
|
||||
34
xin-dify/data/nginx/nginx.conf.template
Normal file
34
xin-dify/data/nginx/nginx.conf.template
Normal file
@ -0,0 +1,34 @@
|
||||
# Please do not directly edit this file. Instead, modify the .env variables related to NGINX configuration.
|
||||
|
||||
user nginx;
|
||||
worker_processes ${NGINX_WORKER_PROCESSES};
|
||||
|
||||
error_log /var/log/nginx/error.log notice;
|
||||
pid /var/run/nginx.pid;
|
||||
|
||||
|
||||
events {
|
||||
worker_connections 1024;
|
||||
}
|
||||
|
||||
|
||||
http {
|
||||
include /etc/nginx/mime.types;
|
||||
default_type application/octet-stream;
|
||||
|
||||
log_format main '$remote_addr - $remote_user [$time_local] "$request" '
|
||||
'$status $body_bytes_sent "$http_referer" '
|
||||
'"$http_user_agent" "$http_x_forwarded_for"';
|
||||
|
||||
access_log /var/log/nginx/access.log main;
|
||||
|
||||
sendfile on;
|
||||
#tcp_nopush on;
|
||||
|
||||
keepalive_timeout ${NGINX_KEEPALIVE_TIMEOUT};
|
||||
|
||||
#gzip on;
|
||||
client_max_body_size ${NGINX_CLIENT_MAX_BODY_SIZE};
|
||||
|
||||
include /etc/nginx/conf.d/*.conf;
|
||||
}
|
||||
11
xin-dify/data/nginx/proxy.conf.template
Normal file
11
xin-dify/data/nginx/proxy.conf.template
Normal file
@ -0,0 +1,11 @@
|
||||
# Please do not directly edit this file. Instead, modify the .env variables related to NGINX configuration.
|
||||
|
||||
proxy_set_header Host $host;
|
||||
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
|
||||
proxy_set_header X-Forwarded-Proto $scheme;
|
||||
proxy_set_header X-Forwarded-Port $server_port;
|
||||
proxy_http_version 1.1;
|
||||
proxy_set_header Connection "";
|
||||
proxy_buffering off;
|
||||
proxy_read_timeout ${NGINX_PROXY_READ_TIMEOUT};
|
||||
proxy_send_timeout ${NGINX_PROXY_SEND_TIMEOUT};
|
||||
0
xin-dify/data/nginx/ssl/.gitkeep
Normal file
0
xin-dify/data/nginx/ssl/.gitkeep
Normal file
24
xin-dify/data/pgvector/docker-entrypoint.sh
Executable file
24
xin-dify/data/pgvector/docker-entrypoint.sh
Executable file
@ -0,0 +1,24 @@
|
||||
#!/bin/bash
|
||||
|
||||
PG_MAJOR=16
|
||||
|
||||
if [ "${PG_BIGM}" = "true" ]; then
|
||||
# install pg_bigm
|
||||
apt-get update
|
||||
apt-get install -y curl make gcc postgresql-server-dev-${PG_MAJOR}
|
||||
|
||||
curl -LO https://github.com/pgbigm/pg_bigm/archive/refs/tags/v${PG_BIGM_VERSION}.tar.gz
|
||||
tar xf v${PG_BIGM_VERSION}.tar.gz
|
||||
cd pg_bigm-${PG_BIGM_VERSION} || exit 1
|
||||
make USE_PGXS=1 PG_CONFIG=/usr/bin/pg_config
|
||||
make USE_PGXS=1 PG_CONFIG=/usr/bin/pg_config install
|
||||
|
||||
cd - || exit 1
|
||||
rm -rf v${PG_BIGM_VERSION}.tar.gz pg_bigm-${PG_BIGM_VERSION}
|
||||
|
||||
# enable pg_bigm
|
||||
sed -i -e 's/^#\s*shared_preload_libraries.*/shared_preload_libraries = '\''pg_bigm'\''/' /var/lib/postgresql/data/pgdata/postgresql.conf
|
||||
fi
|
||||
|
||||
# Run the original entrypoint script
|
||||
exec /usr/local/bin/docker-entrypoint.sh postgres
|
||||
42
xin-dify/data/ssrf_proxy/docker-entrypoint.sh
Executable file
42
xin-dify/data/ssrf_proxy/docker-entrypoint.sh
Executable file
@ -0,0 +1,42 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Modified based on Squid OCI image entrypoint
|
||||
|
||||
# This entrypoint aims to forward the squid logs to stdout to assist users of
|
||||
# common container related tooling (e.g., kubernetes, docker-compose, etc) to
|
||||
# access the service logs.
|
||||
|
||||
# Moreover, it invokes the squid binary, leaving all the desired parameters to
|
||||
# be provided by the "command" passed to the spawned container. If no command
|
||||
# is provided by the user, the default behavior (as per the CMD statement in
|
||||
# the Dockerfile) will be to use Ubuntu's default configuration [1] and run
|
||||
# squid with the "-NYC" options to mimic the behavior of the Ubuntu provided
|
||||
# systemd unit.
|
||||
|
||||
# [1] The default configuration is changed in the Dockerfile to allow local
|
||||
# network connections. See the Dockerfile for further information.
|
||||
|
||||
echo "[ENTRYPOINT] re-create snakeoil self-signed certificate removed in the build process"
|
||||
if [ ! -f /etc/ssl/private/ssl-cert-snakeoil.key ]; then
|
||||
/usr/sbin/make-ssl-cert generate-default-snakeoil --force-overwrite > /dev/null 2>&1
|
||||
fi
|
||||
|
||||
tail -F /var/log/squid/access.log 2>/dev/null &
|
||||
tail -F /var/log/squid/error.log 2>/dev/null &
|
||||
tail -F /var/log/squid/store.log 2>/dev/null &
|
||||
tail -F /var/log/squid/cache.log 2>/dev/null &
|
||||
|
||||
# Replace environment variables in the template and output to the squid.conf
|
||||
echo "[ENTRYPOINT] replacing environment variables in the template"
|
||||
awk '{
|
||||
while(match($0, /\${[A-Za-z_][A-Za-z_0-9]*}/)) {
|
||||
var = substr($0, RSTART+2, RLENGTH-3)
|
||||
val = ENVIRON[var]
|
||||
$0 = substr($0, 1, RSTART-1) val substr($0, RSTART+RLENGTH)
|
||||
}
|
||||
print
|
||||
}' /etc/squid/squid.conf.template > /etc/squid/squid.conf
|
||||
|
||||
/usr/sbin/squid -Nz
|
||||
echo "[ENTRYPOINT] starting squid"
|
||||
/usr/sbin/squid -f /etc/squid/squid.conf -NYC 1
|
||||
56
xin-dify/data/ssrf_proxy/squid.conf.template
Normal file
56
xin-dify/data/ssrf_proxy/squid.conf.template
Normal file
@ -0,0 +1,56 @@
|
||||
acl localnet src 0.0.0.1-0.255.255.255 # RFC 1122 "this" network (LAN)
|
||||
acl localnet src 10.0.0.0/8 # RFC 1918 local private network (LAN)
|
||||
acl localnet src 100.64.0.0/10 # RFC 6598 shared address space (CGN)
|
||||
acl localnet src 169.254.0.0/16 # RFC 3927 link-local (directly plugged) machines
|
||||
acl localnet src 172.16.0.0/12 # RFC 1918 local private network (LAN)
|
||||
acl localnet src 192.168.0.0/16 # RFC 1918 local private network (LAN)
|
||||
acl localnet src fc00::/7 # RFC 4193 local private network range
|
||||
acl localnet src fe80::/10 # RFC 4291 link-local (directly plugged) machines
|
||||
acl SSL_ports port 443
|
||||
# acl SSL_ports port 1025-65535 # Enable the configuration to resolve this issue: https://github.com/langgenius/dify/issues/12792
|
||||
acl Safe_ports port 80 # http
|
||||
acl Safe_ports port 21 # ftp
|
||||
acl Safe_ports port 443 # https
|
||||
acl Safe_ports port 70 # gopher
|
||||
acl Safe_ports port 210 # wais
|
||||
acl Safe_ports port 1025-65535 # unregistered ports
|
||||
acl Safe_ports port 280 # http-mgmt
|
||||
acl Safe_ports port 488 # gss-http
|
||||
acl Safe_ports port 591 # filemaker
|
||||
acl Safe_ports port 777 # multiling http
|
||||
acl CONNECT method CONNECT
|
||||
acl allowed_domains dstdomain .marketplace.dify.ai
|
||||
http_access allow allowed_domains
|
||||
http_access deny !Safe_ports
|
||||
http_access deny CONNECT !SSL_ports
|
||||
http_access allow localhost manager
|
||||
http_access deny manager
|
||||
http_access allow localhost
|
||||
include /etc/squid/conf.d/*.conf
|
||||
http_access deny all
|
||||
|
||||
################################## Proxy Server ################################
|
||||
http_port ${HTTP_PORT}
|
||||
coredump_dir ${COREDUMP_DIR}
|
||||
refresh_pattern ^ftp: 1440 20% 10080
|
||||
refresh_pattern ^gopher: 1440 0% 1440
|
||||
refresh_pattern -i (/cgi-bin/|\?) 0 0% 0
|
||||
refresh_pattern \/(Packages|Sources)(|\.bz2|\.gz|\.xz)$ 0 0% 0 refresh-ims
|
||||
refresh_pattern \/Release(|\.gpg)$ 0 0% 0 refresh-ims
|
||||
refresh_pattern \/InRelease$ 0 0% 0 refresh-ims
|
||||
refresh_pattern \/(Translation-.*)(|\.bz2|\.gz|\.xz)$ 0 0% 0 refresh-ims
|
||||
refresh_pattern . 0 20% 4320
|
||||
|
||||
|
||||
# cache_dir ufs /var/spool/squid 100 16 256
|
||||
# upstream proxy, set to your own upstream proxy IP to avoid SSRF attacks
|
||||
# cache_peer 172.1.1.1 parent 3128 0 no-query no-digest no-netdb-exchange default
|
||||
|
||||
################################## Reverse Proxy To Sandbox ################################
|
||||
http_port ${REVERSE_PROXY_PORT} accel vhost
|
||||
cache_peer ${SANDBOX_HOST} parent ${SANDBOX_PORT} 0 no-query originserver
|
||||
acl src_all src all
|
||||
http_access allow src_all
|
||||
|
||||
# Unless the option's size is increased, an error will occur when uploading more than two files.
|
||||
client_request_buffer_max_size 100 MB
|
||||
13
xin-dify/data/startupscripts/init.sh
Executable file
13
xin-dify/data/startupscripts/init.sh
Executable file
@ -0,0 +1,13 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
DB_INITIALIZED="/opt/oracle/oradata/dbinit"
|
||||
#[ -f ${DB_INITIALIZED} ] && exit
|
||||
#touch ${DB_INITIALIZED}
|
||||
if [ -f ${DB_INITIALIZED} ]; then
|
||||
echo 'File exists. Standards for have been Init'
|
||||
exit
|
||||
else
|
||||
echo 'File does not exist. Standards for first time Start up this DB'
|
||||
"$ORACLE_HOME"/bin/sqlplus -s "/ as sysdba" @"/opt/oracle/scripts/startup/init_user.script";
|
||||
touch ${DB_INITIALIZED}
|
||||
fi
|
||||
10
xin-dify/data/startupscripts/init_user.script
Executable file
10
xin-dify/data/startupscripts/init_user.script
Executable file
@ -0,0 +1,10 @@
|
||||
show pdbs;
|
||||
ALTER SYSTEM SET PROCESSES=500 SCOPE=SPFILE;
|
||||
alter session set container= freepdb1;
|
||||
create user dify identified by dify DEFAULT TABLESPACE users quota unlimited on users;
|
||||
grant DB_DEVELOPER_ROLE to dify;
|
||||
|
||||
BEGIN
|
||||
CTX_DDL.CREATE_PREFERENCE('dify.world_lexer','WORLD_LEXER');
|
||||
END;
|
||||
/
|
||||
4
xin-dify/data/tidb/config/pd.toml
Normal file
4
xin-dify/data/tidb/config/pd.toml
Normal file
@ -0,0 +1,4 @@
|
||||
# PD Configuration File reference:
|
||||
# https://docs.pingcap.com/tidb/stable/pd-configuration-file#pd-configuration-file
|
||||
[replication]
|
||||
max-replicas = 1
|
||||
13
xin-dify/data/tidb/config/tiflash-learner.toml
Normal file
13
xin-dify/data/tidb/config/tiflash-learner.toml
Normal file
@ -0,0 +1,13 @@
|
||||
# TiFlash tiflash-learner.toml Configuration File reference:
|
||||
# https://docs.pingcap.com/tidb/stable/tiflash-configuration#configure-the-tiflash-learnertoml-file
|
||||
|
||||
log-file = "/logs/tiflash_tikv.log"
|
||||
|
||||
[server]
|
||||
engine-addr = "tiflash:4030"
|
||||
addr = "0.0.0.0:20280"
|
||||
advertise-addr = "tiflash:20280"
|
||||
status-addr = "tiflash:20292"
|
||||
|
||||
[storage]
|
||||
data-dir = "/data/flash"
|
||||
19
xin-dify/data/tidb/config/tiflash.toml
Normal file
19
xin-dify/data/tidb/config/tiflash.toml
Normal file
@ -0,0 +1,19 @@
|
||||
# TiFlash tiflash.toml Configuration File reference:
|
||||
# https://docs.pingcap.com/tidb/stable/tiflash-configuration#configure-the-tiflashtoml-file
|
||||
|
||||
listen_host = "0.0.0.0"
|
||||
path = "/data"
|
||||
|
||||
[flash]
|
||||
tidb_status_addr = "tidb:10080"
|
||||
service_addr = "tiflash:4030"
|
||||
|
||||
[flash.proxy]
|
||||
config = "/tiflash-learner.toml"
|
||||
|
||||
[logger]
|
||||
errorlog = "/logs/tiflash_error.log"
|
||||
log = "/logs/tiflash.log"
|
||||
|
||||
[raft]
|
||||
pd_addr = "pd0:2379"
|
||||
62
xin-dify/data/tidb/docker-compose.yaml
Normal file
62
xin-dify/data/tidb/docker-compose.yaml
Normal file
@ -0,0 +1,62 @@
|
||||
services:
|
||||
pd0:
|
||||
image: pingcap/pd:v8.5.1
|
||||
# ports:
|
||||
# - "2379"
|
||||
volumes:
|
||||
- ./config/pd.toml:/pd.toml:ro
|
||||
- ./volumes/data:/data
|
||||
- ./volumes/logs:/logs
|
||||
command:
|
||||
- --name=pd0
|
||||
- --client-urls=http://0.0.0.0:2379
|
||||
- --peer-urls=http://0.0.0.0:2380
|
||||
- --advertise-client-urls=http://pd0:2379
|
||||
- --advertise-peer-urls=http://pd0:2380
|
||||
- --initial-cluster=pd0=http://pd0:2380
|
||||
- --data-dir=/data/pd
|
||||
- --config=/pd.toml
|
||||
- --log-file=/logs/pd.log
|
||||
restart: on-failure
|
||||
tikv:
|
||||
image: pingcap/tikv:v8.5.1
|
||||
volumes:
|
||||
- ./volumes/data:/data
|
||||
- ./volumes/logs:/logs
|
||||
command:
|
||||
- --addr=0.0.0.0:20160
|
||||
- --advertise-addr=tikv:20160
|
||||
- --status-addr=tikv:20180
|
||||
- --data-dir=/data/tikv
|
||||
- --pd=pd0:2379
|
||||
- --log-file=/logs/tikv.log
|
||||
depends_on:
|
||||
- "pd0"
|
||||
restart: on-failure
|
||||
tidb:
|
||||
image: pingcap/tidb:v8.5.1
|
||||
# ports:
|
||||
# - "4000:4000"
|
||||
volumes:
|
||||
- ./volumes/logs:/logs
|
||||
command:
|
||||
- --advertise-address=tidb
|
||||
- --store=tikv
|
||||
- --path=pd0:2379
|
||||
- --log-file=/logs/tidb.log
|
||||
depends_on:
|
||||
- "tikv"
|
||||
restart: on-failure
|
||||
tiflash:
|
||||
image: pingcap/tiflash:v8.5.1
|
||||
volumes:
|
||||
- ./config/tiflash.toml:/tiflash.toml:ro
|
||||
- ./config/tiflash-learner.toml:/tiflash-learner.toml:ro
|
||||
- ./volumes/data:/data
|
||||
- ./volumes/logs:/logs
|
||||
command:
|
||||
- --config=/tiflash.toml
|
||||
depends_on:
|
||||
- "tikv"
|
||||
- "tidb"
|
||||
restart: on-failure
|
||||
@ -0,0 +1,17 @@
|
||||
<clickhouse>
|
||||
<users>
|
||||
<default>
|
||||
<password></password>
|
||||
<networks>
|
||||
<ip>::1</ip> <!-- change to ::/0 to allow access from all addresses -->
|
||||
<ip>127.0.0.1</ip>
|
||||
<ip>10.0.0.0/8</ip>
|
||||
<ip>172.16.0.0/12</ip>
|
||||
<ip>192.168.0.0/16</ip>
|
||||
</networks>
|
||||
<profile>default</profile>
|
||||
<quota>default</quota>
|
||||
<access_management>1</access_management>
|
||||
</default>
|
||||
</users>
|
||||
</clickhouse>
|
||||
1
xin-dify/data/volumes/oceanbase/init.d/vec_memory.sql
Normal file
1
xin-dify/data/volumes/oceanbase/init.d/vec_memory.sql
Normal file
@ -0,0 +1 @@
|
||||
ALTER SYSTEM SET ob_vector_memory_limit_percentage = 30;
|
||||
222
xin-dify/data/volumes/opensearch/opensearch_dashboards.yml
Normal file
222
xin-dify/data/volumes/opensearch/opensearch_dashboards.yml
Normal file
@ -0,0 +1,222 @@
|
||||
---
|
||||
# Copyright OpenSearch Contributors
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
# Description:
|
||||
# Default configuration for OpenSearch Dashboards
|
||||
|
||||
# OpenSearch Dashboards is served by a back end server. This setting specifies the port to use.
|
||||
# server.port: 5601
|
||||
|
||||
# Specifies the address to which the OpenSearch Dashboards server will bind. IP addresses and host names are both valid values.
|
||||
# The default is 'localhost', which usually means remote machines will not be able to connect.
|
||||
# To allow connections from remote users, set this parameter to a non-loopback address.
|
||||
# server.host: "localhost"
|
||||
|
||||
# Enables you to specify a path to mount OpenSearch Dashboards at if you are running behind a proxy.
|
||||
# Use the `server.rewriteBasePath` setting to tell OpenSearch Dashboards if it should remove the basePath
|
||||
# from requests it receives, and to prevent a deprecation warning at startup.
|
||||
# This setting cannot end in a slash.
|
||||
# server.basePath: ""
|
||||
|
||||
# Specifies whether OpenSearch Dashboards should rewrite requests that are prefixed with
|
||||
# `server.basePath` or require that they are rewritten by your reverse proxy.
|
||||
# server.rewriteBasePath: false
|
||||
|
||||
# The maximum payload size in bytes for incoming server requests.
|
||||
# server.maxPayloadBytes: 1048576
|
||||
|
||||
# The OpenSearch Dashboards server's name. This is used for display purposes.
|
||||
# server.name: "your-hostname"
|
||||
|
||||
# The URLs of the OpenSearch instances to use for all your queries.
|
||||
# opensearch.hosts: ["http://localhost:9200"]
|
||||
|
||||
# OpenSearch Dashboards uses an index in OpenSearch to store saved searches, visualizations and
|
||||
# dashboards. OpenSearch Dashboards creates a new index if the index doesn't already exist.
|
||||
# opensearchDashboards.index: ".opensearch_dashboards"
|
||||
|
||||
# The default application to load.
|
||||
# opensearchDashboards.defaultAppId: "home"
|
||||
|
||||
# Setting for an optimized healthcheck that only uses the local OpenSearch node to do Dashboards healthcheck.
|
||||
# This settings should be used for large clusters or for clusters with ingest heavy nodes.
|
||||
# It allows Dashboards to only healthcheck using the local OpenSearch node rather than fan out requests across all nodes.
|
||||
#
|
||||
# It requires the user to create an OpenSearch node attribute with the same name as the value used in the setting
|
||||
# This node attribute should assign all nodes of the same cluster an integer value that increments with each new cluster that is spun up
|
||||
# e.g. in opensearch.yml file you would set the value to a setting using node.attr.cluster_id:
|
||||
# Should only be enabled if there is a corresponding node attribute created in your OpenSearch config that matches the value here
|
||||
# opensearch.optimizedHealthcheckId: "cluster_id"
|
||||
|
||||
# If your OpenSearch is protected with basic authentication, these settings provide
|
||||
# the username and password that the OpenSearch Dashboards server uses to perform maintenance on the OpenSearch Dashboards
|
||||
# index at startup. Your OpenSearch Dashboards users still need to authenticate with OpenSearch, which
|
||||
# is proxied through the OpenSearch Dashboards server.
|
||||
# opensearch.username: "opensearch_dashboards_system"
|
||||
# opensearch.password: "pass"
|
||||
|
||||
# Enables SSL and paths to the PEM-format SSL certificate and SSL key files, respectively.
|
||||
# These settings enable SSL for outgoing requests from the OpenSearch Dashboards server to the browser.
|
||||
# server.ssl.enabled: false
|
||||
# server.ssl.certificate: /path/to/your/server.crt
|
||||
# server.ssl.key: /path/to/your/server.key
|
||||
|
||||
# Optional settings that provide the paths to the PEM-format SSL certificate and key files.
|
||||
# These files are used to verify the identity of OpenSearch Dashboards to OpenSearch and are required when
|
||||
# xpack.security.http.ssl.client_authentication in OpenSearch is set to required.
|
||||
# opensearch.ssl.certificate: /path/to/your/client.crt
|
||||
# opensearch.ssl.key: /path/to/your/client.key
|
||||
|
||||
# Optional setting that enables you to specify a path to the PEM file for the certificate
|
||||
# authority for your OpenSearch instance.
|
||||
# opensearch.ssl.certificateAuthorities: [ "/path/to/your/CA.pem" ]
|
||||
|
||||
# To disregard the validity of SSL certificates, change this setting's value to 'none'.
|
||||
# opensearch.ssl.verificationMode: full
|
||||
|
||||
# Time in milliseconds to wait for OpenSearch to respond to pings. Defaults to the value of
|
||||
# the opensearch.requestTimeout setting.
|
||||
# opensearch.pingTimeout: 1500
|
||||
|
||||
# Time in milliseconds to wait for responses from the back end or OpenSearch. This value
|
||||
# must be a positive integer.
|
||||
# opensearch.requestTimeout: 30000
|
||||
|
||||
# List of OpenSearch Dashboards client-side headers to send to OpenSearch. To send *no* client-side
|
||||
# headers, set this value to [] (an empty list).
|
||||
# opensearch.requestHeadersWhitelist: [ authorization ]
|
||||
|
||||
# Header names and values that are sent to OpenSearch. Any custom headers cannot be overwritten
|
||||
# by client-side headers, regardless of the opensearch.requestHeadersWhitelist configuration.
|
||||
# opensearch.customHeaders: {}
|
||||
|
||||
# Time in milliseconds for OpenSearch to wait for responses from shards. Set to 0 to disable.
|
||||
# opensearch.shardTimeout: 30000
|
||||
|
||||
# Logs queries sent to OpenSearch. Requires logging.verbose set to true.
|
||||
# opensearch.logQueries: false
|
||||
|
||||
# Specifies the path where OpenSearch Dashboards creates the process ID file.
|
||||
# pid.file: /var/run/opensearchDashboards.pid
|
||||
|
||||
# Enables you to specify a file where OpenSearch Dashboards stores log output.
|
||||
# logging.dest: stdout
|
||||
|
||||
# Set the value of this setting to true to suppress all logging output.
|
||||
# logging.silent: false
|
||||
|
||||
# Set the value of this setting to true to suppress all logging output other than error messages.
|
||||
# logging.quiet: false
|
||||
|
||||
# Set the value of this setting to true to log all events, including system usage information
|
||||
# and all requests.
|
||||
# logging.verbose: false
|
||||
|
||||
# Set the interval in milliseconds to sample system and process performance
|
||||
# metrics. Minimum is 100ms. Defaults to 5000.
|
||||
# ops.interval: 5000
|
||||
|
||||
# Specifies locale to be used for all localizable strings, dates and number formats.
|
||||
# Supported languages are the following: English - en , by default , Chinese - zh-CN .
|
||||
# i18n.locale: "en"
|
||||
|
||||
# Set the allowlist to check input graphite Url. Allowlist is the default check list.
|
||||
# vis_type_timeline.graphiteAllowedUrls: ['https://www.hostedgraphite.com/UID/ACCESS_KEY/graphite']
|
||||
|
||||
# Set the blocklist to check input graphite Url. Blocklist is an IP list.
|
||||
# Below is an example for reference
|
||||
# vis_type_timeline.graphiteBlockedIPs: [
|
||||
# //Loopback
|
||||
# '127.0.0.0/8',
|
||||
# '::1/128',
|
||||
# //Link-local Address for IPv6
|
||||
# 'fe80::/10',
|
||||
# //Private IP address for IPv4
|
||||
# '10.0.0.0/8',
|
||||
# '172.16.0.0/12',
|
||||
# '192.168.0.0/16',
|
||||
# //Unique local address (ULA)
|
||||
# 'fc00::/7',
|
||||
# //Reserved IP address
|
||||
# '0.0.0.0/8',
|
||||
# '100.64.0.0/10',
|
||||
# '192.0.0.0/24',
|
||||
# '192.0.2.0/24',
|
||||
# '198.18.0.0/15',
|
||||
# '192.88.99.0/24',
|
||||
# '198.51.100.0/24',
|
||||
# '203.0.113.0/24',
|
||||
# '224.0.0.0/4',
|
||||
# '240.0.0.0/4',
|
||||
# '255.255.255.255/32',
|
||||
# '::/128',
|
||||
# '2001:db8::/32',
|
||||
# 'ff00::/8',
|
||||
# ]
|
||||
# vis_type_timeline.graphiteBlockedIPs: []
|
||||
|
||||
# opensearchDashboards.branding:
|
||||
# logo:
|
||||
# defaultUrl: ""
|
||||
# darkModeUrl: ""
|
||||
# mark:
|
||||
# defaultUrl: ""
|
||||
# darkModeUrl: ""
|
||||
# loadingLogo:
|
||||
# defaultUrl: ""
|
||||
# darkModeUrl: ""
|
||||
# faviconUrl: ""
|
||||
# applicationTitle: ""
|
||||
|
||||
# Set the value of this setting to true to capture region blocked warnings and errors
|
||||
# for your map rendering services.
|
||||
# map.showRegionBlockedWarning: false%
|
||||
|
||||
# Set the value of this setting to false to suppress search usage telemetry
|
||||
# for reducing the load of OpenSearch cluster.
|
||||
# data.search.usageTelemetry.enabled: false
|
||||
|
||||
# 2.4 renames 'wizard.enabled: false' to 'vis_builder.enabled: false'
|
||||
# Set the value of this setting to false to disable VisBuilder
|
||||
# functionality in Visualization.
|
||||
# vis_builder.enabled: false
|
||||
|
||||
# 2.4 New Experimental Feature
|
||||
# Set the value of this setting to true to enable the experimental multiple data source
|
||||
# support feature. Use with caution.
|
||||
# data_source.enabled: false
|
||||
# Set the value of these settings to customize crypto materials to encryption saved credentials
|
||||
# in data sources.
|
||||
# data_source.encryption.wrappingKeyName: 'changeme'
|
||||
# data_source.encryption.wrappingKeyNamespace: 'changeme'
|
||||
# data_source.encryption.wrappingKey: [0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
|
||||
|
||||
# 2.6 New ML Commons Dashboards Feature
|
||||
# Set the value of this setting to true to enable the ml commons dashboards
|
||||
# ml_commons_dashboards.enabled: false
|
||||
|
||||
# 2.12 New experimental Assistant Dashboards Feature
|
||||
# Set the value of this setting to true to enable the assistant dashboards
|
||||
# assistant.chat.enabled: false
|
||||
|
||||
# 2.13 New Query Assistant Feature
|
||||
# Set the value of this setting to false to disable the query assistant
|
||||
# observability.query_assist.enabled: false
|
||||
|
||||
# 2.14 Enable Ui Metric Collectors in Usage Collector
|
||||
# Set the value of this setting to true to enable UI Metric collections
|
||||
# usageCollection.uiMetric.enabled: false
|
||||
|
||||
opensearch.hosts: [https://localhost:9200]
|
||||
opensearch.ssl.verificationMode: none
|
||||
opensearch.username: admin
|
||||
opensearch.password: 'Qazwsxedc!@#123'
|
||||
opensearch.requestHeadersWhitelist: [authorization, securitytenant]
|
||||
|
||||
opensearch_security.multitenancy.enabled: true
|
||||
opensearch_security.multitenancy.tenants.preferred: [Private, Global]
|
||||
opensearch_security.readonly_mode.roles: [kibana_read_only]
|
||||
# Use this setting if you are running opensearch-dashboards without https
|
||||
opensearch_security.cookie.secure: false
|
||||
server.host: '0.0.0.0'
|
||||
14
xin-dify/data/volumes/sandbox/conf/config.yaml
Normal file
14
xin-dify/data/volumes/sandbox/conf/config.yaml
Normal file
@ -0,0 +1,14 @@
|
||||
app:
|
||||
port: 8194
|
||||
debug: True
|
||||
key: dify-sandbox
|
||||
max_workers: 4
|
||||
max_requests: 50
|
||||
worker_timeout: 5
|
||||
python_path: /usr/local/bin/python3
|
||||
enable_network: True # please make sure there is no network risk in your environment
|
||||
allowed_syscalls: # please leave it empty if you have no idea how seccomp works
|
||||
proxy:
|
||||
socks5: ''
|
||||
http: ''
|
||||
https: ''
|
||||
35
xin-dify/data/volumes/sandbox/conf/config.yaml.example
Normal file
35
xin-dify/data/volumes/sandbox/conf/config.yaml.example
Normal file
@ -0,0 +1,35 @@
|
||||
app:
|
||||
port: 8194
|
||||
debug: True
|
||||
key: dify-sandbox
|
||||
max_workers: 4
|
||||
max_requests: 50
|
||||
worker_timeout: 5
|
||||
python_path: /usr/local/bin/python3
|
||||
python_lib_path:
|
||||
- /usr/local/lib/python3.10
|
||||
- /usr/lib/python3.10
|
||||
- /usr/lib/python3
|
||||
- /usr/lib/x86_64-linux-gnu
|
||||
- /etc/ssl/certs/ca-certificates.crt
|
||||
- /etc/nsswitch.conf
|
||||
- /etc/hosts
|
||||
- /etc/resolv.conf
|
||||
- /run/systemd/resolve/stub-resolv.conf
|
||||
- /run/resolvconf/resolv.conf
|
||||
- /etc/localtime
|
||||
- /usr/share/zoneinfo
|
||||
- /etc/timezone
|
||||
# add more paths if needed
|
||||
python_pip_mirror_url: https://pypi.tuna.tsinghua.edu.cn/simple
|
||||
nodejs_path: /usr/local/bin/node
|
||||
enable_network: True
|
||||
allowed_syscalls:
|
||||
- 1
|
||||
- 2
|
||||
- 3
|
||||
# add all the syscalls which you require
|
||||
proxy:
|
||||
socks5: ''
|
||||
http: ''
|
||||
https: ''
|
||||
1124
xin-dify/docker-compose.yaml
Normal file
1124
xin-dify/docker-compose.yaml
Normal file
File diff suppressed because it is too large
Load Diff
28
xin-dify/umbrel-app.yml
Normal file
28
xin-dify/umbrel-app.yml
Normal file
@ -0,0 +1,28 @@
|
||||
manifestVersion: 1
|
||||
id: xin-dify
|
||||
name: dify
|
||||
tagline: An open-source LLM app development platform
|
||||
icon: https://cloud.dify.ai/logo/logo.png
|
||||
category: Development
|
||||
version: "1.2.0"
|
||||
port: 20080
|
||||
description: >-
|
||||
Dify is an open-source LLM app development platform.
|
||||
Its intuitive interface combines agentic AI workflow, RAG pipeline, agent capabilities, model management, observability features and more, letting you quickly go from prototype to production.
|
||||
|
||||
developer: dify.ai
|
||||
website: https://dify.ai/
|
||||
submitter: xin
|
||||
submission: https://github.com/getumbrel/umbrel-apps/pulls
|
||||
repo: https://github.com/langgenius/dify
|
||||
support: https://github.com/langgenius/dify/issues
|
||||
gallery:
|
||||
- https://framerusercontent.com/images/nuR2Y1onHFdk6QAdFxSBFLMLHr8.png
|
||||
- https://framerusercontent.com/images/gyg9CGdxDSKo0v5kmQeTVXR998.png
|
||||
- https://framerusercontent.com/images/cbqqxOjnHAZK0bXOSGWSwCZ4Lw.png
|
||||
releaseNotes: >-
|
||||
Dify v1.2.0 update with you—it's packed with impressive new features, enhancements, and of course, some good ol' bug fixes
|
||||
dependencies: []
|
||||
path: ""
|
||||
defaultUsername: ""
|
||||
defaultPassword: ""
|
||||
Loading…
Reference in New Issue
Block a user