SelfHosted AI. Any OpenAI and Ollama model can be used. OpenAI models require API key. Ollama models will be automatically downloaded and run locally after being selected. This is currently using ChatGPT 4-O model.
CloudFlare Tunnel URL: https://ai.risingflow.com
Internal URL: http://192.168.1.240:28669
GitHub URL: https://github.com/SecureAI-Tools/SecureAI-Tools
`Docker Compose:
services:
chatgpt-client:
image: soulteary/chatgpt
restart: always
ports:
- 8090:8090
environment:
# Service port
APP_PORT: 8090
# The ChatGPT client domain, adjusted to the new IP
APP_HOSTNAME: "http://192.168.1.240:8090"
# The ChatGPT backend upstream, pointing to the sparrow service
APP_UPSTREAM: "http://sparrow:8091"
sparrow:
image: soulteary/sparrow
restart: always
environment:
# Basic Settings
WEB_CLIENT_HOSTNAME: "http://192.168.1.240:8090"
APP_PORT: 8091
# Advanced Settings (optional)
FEATURE_NEW_UI: "on"
ENABLE_HISTORY_LIST: "on"
ENABLE_I18N: "on"
ENABLE_DATA_CONTROL: "on"
ENABLE_MODEL_SWITCH: "on"
# ENABLE_OPENAI_OFFICIAL_MODEL: "on"
# Plugin Settings (optional)
# ENABLE_PLUGIN: "on"
# ENABLE_PLUGIN_BROWSING: "on"
# ENABLE_PLUGIN_CODE_INTERPRETER: "on"
# ENABLE_PLUGIN_PLUGIN_DEV: "on"
# Private OpenAI API Server Settings (optional)
ENABLE_OPENAI_API: "on"
OPENAI_API_KEY: "redacted"
# OPENAI_API_PROXY_ENABLE: "on"
# OPENAI_API_PROXY_ADDR: "http://127.0.0.1:1234"
# Private Midjourney Server Settings (optional)
# ENABLE_MIDJOURNEY: "on"
# ENABLE_MIDJOURNEY_ONLY: "on"
# MIDJOURNEY_API_SECRET: "your-secret"
# MIDJOURNEY_API_URL: "ws://localhost:8092/ws"
# Private FlagStudio Server Settings (optional)
# ENABLE_FLAGSTUDIO: "on"
# ENABLE_FLAGSTUDIO_ONLY: "off"
# FLAGSTUDIO_API_KEY: "your-flagstudio-api-key"
# Private Claude Server Settings (optional)
# ENABLE_CLAUDE: "on"
# ENABLE_CLAUDE_ONLY: "on"
# CLAUDE_API_SECRET: "your-secret"
# CLAUDE_API_URL: "ws://localhost:8093/ws"
logging:
driver: "json-file"
options:
max-size: "10m"`