41 lines
1.1 KiB
YAML
41 lines
1.1 KiB
YAML
services:
|
|
openwebui:
|
|
image: ghcr.io/open-webui/open-webui:main
|
|
container_name: openwebui
|
|
restart: unless-stopped
|
|
ports:
|
|
- "127.0.0.1:3000:8080" # Solo localhost — nginx proxifica con TLS
|
|
volumes:
|
|
- openwebui_data:/app/backend/data
|
|
environment:
|
|
- WEBUI_SECRET_KEY=${WEBUI_SECRET_KEY}
|
|
- WEBUI_AUTH=true
|
|
- DEFAULT_MODELS=${DEFAULT_MODEL:-gpt-4o}
|
|
# Usa solo lo que necesites:
|
|
- OPENAI_API_KEY=${OPENAI_API_KEY}
|
|
- ANTHROPIC_API_KEY=${ANTHROPIC_API_KEY}
|
|
# Descomenta si usas Ollama para modelos locales:
|
|
# - OLLAMA_BASE_URL=http://ollama:11434
|
|
# depends_on:
|
|
# - ollama
|
|
|
|
# Descomenta si quieres modelos locales con Ollama
|
|
# ollama:
|
|
# image: ollama/ollama:latest
|
|
# container_name: ollama
|
|
# restart: unless-stopped
|
|
# volumes:
|
|
# - ollama_data:/root/.ollama
|
|
# # GPU (requiere nvidia-container-toolkit):
|
|
# # deploy:
|
|
# # resources:
|
|
# # reservations:
|
|
# # devices:
|
|
# # - driver: nvidia
|
|
# # count: all
|
|
# # capabilities: [gpu]
|
|
|
|
volumes:
|
|
openwebui_data:
|
|
# ollama_data:
|