Run llama3
ollama
Instructions
Docker-compose
version: "3.8"
services:
ollama:
image: ollama/ollama:latest
container_name: ollama
restart: unless-stopped
volumes:
- ./ollama/ollama:/root/.ollama
tty: true
ports:
- 11434:11434
networks:
- ollama-docker
# deploy:
# resources:
# reservations:
# devices:
# - driver: nvidia
# count: 1
# capabilities: [gpu]
ollama-webui:
image: ghcr.io/ollama-webui/ollama-webui:main
container_name: ollama-webui
restart: unless-stopped
volumes:
- ./ollama/ollama-webui:/app/backend/data
ports:
- 8080:8080
environment:
- "/ollama/api=http://ollama:11434/api"
extra_hosts:
- host.docker.internal:host-gateway
networks:
- ollama-docker
networks:
ollama-docker:
Setup
# Run docker-compose
docker-compose up -d
# Pull model(https://ollama.com/library)
docker exec -it ollama /bin/bash
ollama pull llama3
Chat with Web-UI
port defined in docker-compose.yml ollama-webui.ports