services: grafana: container_name: monitoring-grafana image: grafana/grafana:latest hostname: rpi-grafana restart: unless-stopped networks: - monitor-network - internal env_file: - ./grafana/.env volumes: - grafana-data:/var/lib/grafana depends_on: - prometheus healthcheck: test: ["CMD", "wget", "-O", "/dev/null", "http://localhost:3000/api/health"] interval: 30s timeout: 10s retries: 3 start_period: 30s labels: com.example.description: Grafana Dashboard com.example.service: monitoring glance.name: Grafana glance.icon: si:grafana glance.url: https://grafana.tulkdan.dev glance.id: grafana logging: driver: "json-file" options: max-size: "10m" max-file: "3" cadvisor: container_name: monitoring-cadvisor image: gcr.io/cadvisor/cadvisor:latest hostname: rpi-cadvisor restart: unless-stopped cap_add: - SYS_ADMIN networks: - internal expose: - 8080 command: - '-housekeeping_interval=15s' - '-docker_only=true' - '-store_container_labels=false' devices: - /dev/kmsg volumes: - /:/rootfs:ro - /var/run:/var/run:rw - /sys:/sys:ro - /var/lib/docker/:/var/lib/docker:ro - /dev/disk/:/dev/disk:ro - /etc/machine-id:/etc/machine-id:ro healthcheck: test: ["CMD", "wget", "-O", "/dev/null", "http://localhost:8080/healthz"] interval: 30s timeout: 10s retries: 3 labels: com.example.description: cAdvisor Container Monitoring com.example.service: monitoring glance.name: CAdvisor glance.parent: grafana logging: driver: "json-file" options: max-size: "10m" max-file: "3" mem_limit: 256m mem_reservation: 128m node-exporter: container_name: monitoring-node-exporter image: prom/node-exporter:latest hostname: rpi-exporter restart: unless-stopped networks: - internal expose: - 9100 command: - --path.procfs=/host/proc - --path.sysfs=/host/sys - --path.rootfs=/host - --collector.filesystem.ignored-mount-points - ^/(sys|proc|dev|host|etc|rootfs/var/lib/docker/containers|rootfs/var/lib/docker/overlay2|rootfs/run/docker/netns|rootfs/var/lib/docker/aufs)($$|/) volumes: - /proc:/host/proc:ro - /sys:/host/sys:ro - /:/rootfs:ro - /:/host:ro,rslave healthcheck: test: ["CMD", "wget", "-O", "/dev/null", "http://localhost:9100/metrics"] interval: 30s timeout: 10s retries: 3 labels: com.example.description: Node Exporter com.example.service: monitoring glance.name: Node exporter glance.parent: grafana logging: driver: "json-file" options: max-size: "10m" max-file: "3" mem_limit: 128m mem_reservation: 64m prometheus: container_name: monitoring-prometheus image: prom/prometheus:latest hostname: rpi-prometheus restart: unless-stopped user: "nobody" command: - '--config.file=/etc/prometheus/prometheus.yml' - '--storage.tsdb.path=/prometheus' - '--storage.tsdb.retention.time=1y' - '--storage.tsdb.retention.size=10GB' - '--web.console.libraries=/usr/share/prometheus/console_libraries' - '--web.console.templates=/usr/share/prometheus/consoles' networks: - internal expose: - 9090 volumes: - prometheus-data:/prometheus - ./prometheus.yml:/etc/prometheus/prometheus.yml depends_on: - cadvisor - node-exporter healthcheck: test: ["CMD", "wget", "-O", "/dev/null", "http://localhost:9090/-/healthy"] interval: 30s timeout: 10s retries: 3 start_period: 30s labels: com.example.description: Prometheus Time Series Database com.example.service: monitoring glance.name: Prometheus glance.parent: grafana logging: driver: "json-file" options: max-size: "10m" max-file: "3" mem_limit: 1g mem_reservation: 512m volumes: grafana-data: labels: - "com.example.description=Grafana Persistent Data" - "com.example.service=monitoring" prometheus-data: labels: - "com.example.description=Prometheus Persistent Data" - "com.example.service=monitoring" networks: internal: driver: bridge ipam: driver: default config: - subnet: 172.20.0.0/16 gateway: 172.20.0.1 labels: - "com.example.description=Monitoring Network" - "com.example.service=monitoring" monitor-network: name: proxy-network external: true