Reading Ollama discord channel, I notice many people want to self-host their chatGPT with Docker and don’t know how to do it. Here’s how to host the whole stack with docker compose.

Here’s my docker-compose.yml including the mitmproxy from the previous article.

version: "3"

services:
  ollama:
    build: ollama
    user: 1001:1001
    environment:
      - OLLAMA_HOST=0.0.0.0
      - OLLAMA_DEBUG=1
      - OLLAMA_KEEP_ALIVE=60m
    volumes:
      - /etc/localtime:/etc/localtime:ro
      - ollama_models:/home/ollama/.ollama/models

  mitmproxy:
    image: mitmproxy/mitmproxy
    command: mitmweb --web-host 0.0.0.0 --web-port 8080 --mode reverse:http://ollama:11434@11434 --verbose --anticache --anticomp
    depends_on:
      - ollama
    labels:
      - "traefik.enable=true"
      # ollama endpoint
      - "traefik.http.routers.ollama.rule=Host(`llm.example.com`)"
      - "traefik.http.routers.ollama.tls=true"
      - "traefik.http.routers.ollama.entrypoints=websecure"
      - "traefik.http.routers.ollama.tls.certresolver=le"
      - "traefik.http.routers.ollama.service=ollama"
      - "traefik.http.services.ollama.loadbalancer.server.port=11434"
      - "traefik.http.services.ollama.loadbalancer.server.scheme=http"
      # mitmweb endpoint
      - "traefik.http.routers.ollama-mitm.rule=Host(`inspector.example.com`)"
      - "traefik.http.routers.ollama-mitm.tls=true"
      - "traefik.http.routers.ollama-mitm.entrypoints=websecure"
      - "traefik.http.routers.ollama-mitm.tls.certresolver=le"
      - "traefik.http.routers.ollama-mitm.service=ollama-mitm"
      - "traefik.http.services.ollama-mitm.loadbalancer.server.port=8080"
      - "traefik.http.services.ollama-mitm.loadbalancer.server.scheme=http"
      - "traefik.http.middlewares.ollama-mitm-headers.headers.customrequestheaders.Host=0.0.0.0"
      - "traefik.http.middlewares.ollama-mitm-headers.headers.customrequestheaders.Origin="

  open-webui:
    build:
      context: .
      args:
        OLLAMA_API_BASE_URL: '/ollama/api'
      dockerfile: Dockerfile
    image: ghcr.io/open-webui/open-webui:main
    volumes:
      - /etc/localtime:/etc/localtime:ro
      - open-webui:/app/backend/data
    depends_on:
    environment:
      - 'OLLAMA_API_BASE_URL=http://mitmproxy:11434/api'
      - 'WEBUI_SECRET_KEY='
    labels:
      - "traefik.enable=true"
      - "traefik.http.routers.open-webui.rule=Host(`chatgpt.example.com`)"
      - "traefik.http.routers.open-webui.tls=true"
      - "traefik.http.routers.open-webui.entrypoints=websecure"
      - "traefik.http.routers.open-webui.tls.certresolver=le"
      - "traefik.http.routers.open-webui.service=open-webui"
      - "traefik.http.services.open-webui.loadbalancer.server.port=8080"
      - "traefik.http.services.open-webui.loadbalancer.server.scheme=http"

volumes:
  ollama_models:
  open-webui: 

This exposes 3 different endpoints:

  1. llm.example.com: that’s the Ollama API behind mitmproxy, you can query like a normal Ollama server
  2. inspector.example.com: that’s mitmproxy web interface to inspect the requests/responses of Ollama
  3. chatgpt.example.com: that’s Open-Webui

I strongly suggest you use middlewares (IP whitelist and/or authentication) to secure your endpoints.

Have fun!