# Configuration for ollama-cpu setup # Name of the big-bear-ollama-cpu application name: big-bear-ollama-cpu # Service definitions for the big-bear-ollama-cpu application services: # Service name: big-bear-ollama-cpu # The `big-bear-ollama-cpu` service definition big-bear-ollama-cpu: # Name of the container container_name: big-bear-ollama-cpu # Image to be used for the container image: ollama/ollama:0.6.5 # Container restart policy restart: unless-stopped # Environment variables environment: - PORT=11434 # Volumes to be mounted to the container volumes: - /DATA/AppData/$AppID/.ollama:/root/.ollama # Ports mapping between host and container ports: # Mapping port 11434 of the host to port 11434 of the container - "11434:11434" devices: # Attach GPU - /dev/kfd - /dev/dri x-casaos: # CasaOS specific configuration volumes: - container: /root/.ollama description: en_us: "Container Path: /root/.ollama" ports: - container: "11434" description: en_us: "Container Port: 11434" # CasaOS specific configuration x-casaos: # Supported CPU architectures for the application architectures: - amd64 - arm64 # Main service of the application main: big-bear-ollama-cpu description: # Description in English en_us: Get up and running with Llama 3, Mistral, Gemma, and other large language models. tagline: # Short description or tagline in English en_us: LLMs inference server with OpenAI compatible API # Developer's name or identifier developer: "ollama" # Author of this configuration author: BigBearTechWorld # Icon for the application icon: https://cdn.jsdelivr.net/gh/walkxcode/dashboard-icons/png/ollama.png # Thumbnail image (currently empty) thumbnail: "" title: # Title in English en_us: Ollama - CPU # Application category category: BigBearCasaOS # Port mapping information port_map: "11434"