Skip to main content

Grafana + Prometheus + Loki

 

services:
  grafana:
    image: grafana/grafana-oss:11.5.1-ubuntu
    container_name: grafana
    hostname: grafana
    restart: unless-stopped
    environment:
      - GF_SERVER_ROOT_URL=https://grafana.MEINEDOMAIN.de
      - GF_PLUGINS_PREINSTALL=grafana-clock-panel
      - GF_SECURITY_ADMIN_USER=admin
     #- GF_SECURITY_ADMIN_PASSWORD=admin
    ports:
      - 3030:3000
    volumes:
      - grafana:/var/lib/grafana
    networks:
      grafana-monitoring:

  prometheus:
    image: prom/prometheus:v3.1.0
    container_name: prometheus
    hostname: prometheus
    restart: unless-stopped
    ports:
      - 9090:9090
    volumes:
      - prometheus:/etc/prometheus
    networks:
      grafana-monitoring:

  cadvisor:
    image: gcr.io/cadvisor/cadvisor:v0.47.2
    devices:
        - /dev/kmsg
    privileged: true
    container_name: cadvisor
    hostname: cadvisor
    restart: unless-stopped
    ports:
        - 8088:8080
    volumes:
        - /dev/disk/:/dev/disk:ro
        - /mnt/praxis-volume-01/docker-data/:/var/lib/docker:ro
        - /sys:/sys:ro
        - /var/run:/var/run:ro
        - /:/rootfs:ro
    networks:
      grafana-monitoring:


volumes:
  grafana:
  prometheus:

networks:
  grafana-monitoring:
    driver: bridge

Danach in das Prometheus Volume gehen und die Prometheus.yml bearbeiten:

# my global config
global:
  scrape_interval: 5s # Set the scrape interval to every 15 seconds. Default is every 1 minute.
  evaluation_interval: 5s # Evaluate rules every 15 seconds. The default is every 1 minute.
  # scrape_timeout is set to the global default (10s).

# Alertmanager configuration
alerting:
  alertmanagers:
    - static_configs:
        - targets:
          # - alertmanager:9093

# Load rules once and periodically evaluate them according to the global 'evaluation_interval'.
rule_files:
  # - "first_rules.yml"
  # - "second_rules.yml"

# A scrape configuration containing exactly one endpoint to scrape:
# Here it's Prometheus itself.
scrape_configs:
  # The job name is added as a label `job=<job_name>` to any timeseries scraped from this config.
  - job_name: "prometheus"

    # metrics_path defaults to '/metrics'
    # scheme defaults to 'http'.

    static_configs:
      - targets: ["localhost:9090"]
  - job_name: 'cadvisor'
    static_configs:
      #- targets: ['cadvisor:8080'] 
      # ^ LÄUFT LEIDER NICHT, HOSTNAME KANN NUR KURZ AM ANFANG VOM PROMETHEUS CONTANER MIT NSLOOKUP CADVISOR GEFUNDEN WERDEN, DANACH NICHT MEHR... ICH MACH ES EINFACH UEBER DIE EXTERNE IP ADRESSE. 
      # docker exec -it prometheus /bin/sh
      # nslookup cadvisor
      # Bemerke dass der Host Port 8088 auf intern 8080 umleitet. Wenn du also cadvisor versuchst, nimm den internen Port 8080 und nicht 8088 so wie ich
      - targets: ['hetzner-01.MEINEDOMAIN.de:8088']