monitoring_install_dir: "/srv/monitoring" monitoring_grafana_selinux_level: "{{ omit }}" monitoring_loki_selinux_level: "{{ omit }}" monitoring_prometheus_selinux_level: "{{ omit }}" monitoring_grafana_containerimage: "docker.io/grafana/grafana" # renovate: depName=docker.io/grafana/grafana monitoring_grafana_image_tag: "10.4.1" monitoring_loki_containerimage: "docker.io/grafana/loki" # renovate: depName=docker.io/grafana/loki monitoring_loki_image_tag: "2.9.6" monitoring_prometheus_containerimage: quay.io/prometheus/prometheus # renovate: depName=quay.io/prometheus/prometheus monitoring_prometheus_image_tag: "v2.51.1" monitoring_image_renderer_containerimage: docker.io/grafana/grafana-image-renderer # renovate: depName=docker.io/grafana/grafana-image-renderer monitoring_image_renderer_image_tag: "3.10.1" # These settings allow resource management of the container workload. # While memory_high and memory_low are quite straightforward, swap_max is sadly not. # Memory_high and memory_low allow to 'directly' set the corresponding cgroupv2 setting, # while swap_max sets swap.max to `memory_high - swap_max`. # Special values: memory_high = 0 and swap_max = -1 set no limit and unlimited swap respectively. # A unit can be appended to all these. # A unit can be b (bytes), k (kibibytes), m (mebibytes), or g (gibibytes). monitoring_grafana_memory_low: 128m monitoring_grafana_memory_high: 0 monitoring_grafana_swap_max: -1 monitoring_loki_memory_low: 256m monitoring_loki_memory_high: 0 monitoring_loki_swap_max: -1 monitoring_prometheus_memory_low: 256m monitoring_prometheus_memory_high: 0 monitoring_prometheus_swap_max: -1 monitoring_image_renderer_memory_low: 256m monitoring_image_renderer_memory_high: 0 monitoring_image_renderer_swap_max: -1 monitoring_grafana_domain: "grafana.example.com" monitoring_grafana_loglevel: "info" monitoring_loki_domain: "loki.example.com" # Supported values [debug, info, warn, error] monitoring_loki_loglevel: "info" monitoring_prometheus_domain: "prometheus.example.com" # Supported values [debug, info, warn, error] monitoring_prometheus_loglevel: "info" ### Grafana specific options ### monitoring_grafana_oauth: enabled: false name: "OAuth" allow_sign_up: false signout_url: "https://auth.example.com/auth/realms/sso/protocol/openid-connect/logout" auth_url: "https://auth.example.com/auth/realms/sso/protocol/openid-connect/auth" token_url: "https://auth.example.com/auth/realms/sso/protocol/openid-connect/token" api_url: "https://auth.example.com/auth/realms/sso/protocol/openid-connect/userinfo" client_id: "grafana" client_secret: "something-secret123" allow_assign_grafana_admin: false monitoring_grafana_feature_toggles: [] # When true this will remove all alerting provisioning files not managed by this ansible role. # This won't make any backups so be warned. monitoring_grafana_remove_unmanaged_alerting_files: false monitoring_loki_schema_config: configs: - from: "2023-11-30" store: tsdb object_store: filesystem schema: v12 index: prefix: index_ period: 24h chunks: prefix: chunks_ period: 24h monitoring_loki_retention_period: 15d # `hashed_password` has to be hashed using md5, sha1 or BCrypt # e.g. using `mkpasswd --method=bcrypt --stdin` # e.g. using `htpasswd -Bin ` # Ref.: https://caddyserver.com/docs/caddyfile/directives/basicauth monitoring_loki_basic_auth: [] # - username: "{{ }}" # hashed_password: "{{ }}" ### Prometheus specific options ### # Prometheus native TLS and basic auth is experimental. So we are using caddy (for now). # `hashed_password` has to be hashed using md5, sha1 or BCrypt # e.g. using `mkpasswd --method=bcrypt --stdin` # e.g. using `htpasswd -Bin ` # Ref.: https://caddyserver.com/docs/caddyfile/directives/basicauth monitoring_prometheus_basic_auth: [] # - username: "{{ }}" # hashed_password: "{{ }}" monitoring_prometheus_retention_time: 15d monitoring_prometheus_write_receiver_enable: false monitoring_prometheus_scrape_configs: - job_name: prometheus static_configs: - targets: ["prometheus:9090"] - job_name: grafana static_configs: - targets: ["grafana:3000"] - job_name: loki static_configs: - targets: ["loki:3100"] - job_name: file_configs file_sd_configs: - files: - /etc/prometheus/file_configs/*.yml - /etc/prometheus/file_configs/*.json