Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Allow to run blackbox exporter for SSL certs expiration #198

Open
wants to merge 6 commits into
base: main
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions .github/workflows/release.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -27,6 +27,7 @@ jobs:
helm repo add ingress-nginx https://kubernetes.github.io/ingress-nginx
helm repo add minio https://charts.min.io
helm repo add bitnami https://charts.bitnami.com/bitnami
helm repo add prometheus-community https://prometheus-community.github.io/helm-charts

- name: Run chart-releaser
uses: helm/[email protected]
Expand Down
7 changes: 5 additions & 2 deletions charts/signoz/Chart.lock
Original file line number Diff line number Diff line change
Expand Up @@ -17,5 +17,8 @@ dependencies:
- name: keycloak
repository: https://charts.bitnami.com/bitnami
version: 10.1.5
digest: sha256:a2056dea59f72dc965d3c154925ff5bd13697c0ec9545dabeb8ba064430c334e
generated: "2023-03-31T17:07:30.454673+05:45"
- name: prometheus-blackbox-exporter
repository: https://prometheus-community.github.io/helm-charts
version: 7.7.0
digest: sha256:dc2062dcdae4bae56364948c79fb0161913df9030c327c6e9fb43513ff0cd153
generated: "2023-04-04T01:44:05.579243+05:30"
4 changes: 4 additions & 0 deletions charts/signoz/Chart.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -43,6 +43,10 @@ dependencies:
repository: https://charts.bitnami.com/bitnami
condition: keycloak.enabled
version: 10.1.5
- name: prometheus-blackbox-exporter
repository: https://prometheus-community.github.io/helm-charts
condition: prometheus-blackbox-exporter.enabled
version: 7.7.0
maintainers:
- name: SigNoz
email: [email protected]
Expand Down
Binary file not shown.
256 changes: 256 additions & 0 deletions charts/signoz/templates/_otel_collector_metrics_config.tpl
Original file line number Diff line number Diff line change
@@ -0,0 +1,256 @@
{{- define "blackexporterJob" -}}
- job_name: blackbox
metrics_path: /probe
params:
module: [http_2xx]
static_configs:
- targets:
{{ toYaml .Values.otelCollectorMetrics.blackboxexporter.targets | indent 8 }}
relabel_configs:
- source_labels: [__address__]
target_label: __param_target
- source_labels: [__param_target]
target_label: endpoint
- target_label: __address__
replacement: my-release-prometheus-blackbox-exporter:9115
{{- end }}

{{- define "prometheus-receiver" -}}
prometheus:
config:
scrape_configs:
- job_name: signoz-spanmetrics-collector
scrape_interval: 60s
kubernetes_sd_configs:
- role: pod
relabel_configs:
- source_labels:
- __meta_kubernetes_pod_annotation_apm_signoz_io_scrape
action: keep
regex: true
- source_labels:
- __meta_kubernetes_pod_annotation_apm_signoz_io_path
action: replace
target_label: __metrics_path__
regex: (.+)
- source_labels:
- __meta_kubernetes_pod_ip
- __meta_kubernetes_pod_annotation_apm_signoz_io_port
action: replace
separator: ':'
target_label: __address__
- target_label: job_name
replacement: signoz-spanmetrics-collector
- source_labels:
- __meta_kubernetes_pod_label_app_kubernetes_io_name
action: replace
target_label: signoz_k8s_name
- source_labels:
- __meta_kubernetes_pod_label_app_kubernetes_io_instance
action: replace
target_label: signoz_k8s_instance
- source_labels:
- __meta_kubernetes_pod_label_app_kubernetes_io_component
action: replace
target_label: signoz_k8s_component
- source_labels:
- __meta_kubernetes_namespace
action: replace
target_label: k8s_namespace_name
- source_labels:
- __meta_kubernetes_pod_name
action: replace
target_label: k8s_pod_name
- source_labels:
- __meta_kubernetes_pod_uid
action: replace
target_label: k8s_pod_uid
- source_labels:
- __meta_kubernetes_pod_container_name
action: replace
target_label: k8s_container_name
- source_labels:
- __meta_kubernetes_pod_container_name
regex: (.+)-init
action: drop
- source_labels:
- __meta_kubernetes_pod_node_name
action: replace
target_label: k8s_node_name
- source_labels:
- __meta_kubernetes_pod_ready
action: replace
target_label: k8s_pod_ready
- source_labels:
- __meta_kubernetes_pod_phase
action: replace
target_label: k8s_pod_phase
- job_name: generic-collector
scrape_interval: 60s
kubernetes_sd_configs:
- role: pod
relabel_configs:
- source_labels:
- __meta_kubernetes_pod_annotation_signoz_io_scrape
action: keep
regex: true
- source_labels:
- __meta_kubernetes_pod_annotation_signoz_io_path
action: replace
target_label: __metrics_path__
regex: (.+)
- source_labels:
- __meta_kubernetes_pod_ip
- __meta_kubernetes_pod_annotation_signoz_io_port
action: replace
separator: ':'
target_label: __address__
- target_label: job_name
replacement: generic-collector
- source_labels:
- __meta_kubernetes_pod_label_app_kubernetes_io_name
action: replace
target_label: signoz_k8s_name
- source_labels:
- __meta_kubernetes_pod_label_app_kubernetes_io_instance
action: replace
target_label: signoz_k8s_instance
- source_labels:
- __meta_kubernetes_pod_label_app_kubernetes_io_component
action: replace
target_label: signoz_k8s_component
- source_labels:
- __meta_kubernetes_namespace
action: replace
target_label: k8s_namespace_name
- source_labels:
- __meta_kubernetes_pod_name
action: replace
target_label: k8s_pod_name
- source_labels:
- __meta_kubernetes_pod_uid
action: replace
target_label: k8s_pod_uid
- source_labels:
- __meta_kubernetes_pod_container_name
action: replace
target_label: k8s_container_name
- source_labels:
- __meta_kubernetes_pod_container_name
regex: (.+)-init
action: drop
- source_labels:
- __meta_kubernetes_pod_node_name
action: replace
target_label: k8s_node_name
- source_labels:
- __meta_kubernetes_pod_ready
action: replace
target_label: k8s_pod_ready
- source_labels:
- __meta_kubernetes_pod_phase
action: replace
target_label: k8s_pod_phase
{{- if and .Values.otelCollectorMetrics.blackboxexporter.enabled (gt (len .Values.otelCollectorMetrics.blackboxexporter.targets) 0) }}
{{- include "blackexporterJob" . | nindent 6 }}
{{- end }}
{{- end }}

{{- define "otelCollectorMetrics" -}}
receivers:
hostmetrics:
collection_interval: 30s
scrapers:
cpu: {}
load: {}
memory: {}
disk: {}
filesystem: {}
network: {}
{{- include "prometheus-receiver" . | nindent 2 }}
processors:
# Batch processor config.
# ref: https://github.com/open-telemetry/opentelemetry-collector/blob/main/processor/batchprocessor/README.md
batch:
send_batch_size: 10000
timeout: 1s
# Resource detection processor config.
# ref: https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/main/processor/resourcedetectionprocessor/README.md
resourcedetection:
# detectors: include ec2/eks for AWS, gce/gke for GCP and azure/aks for Azure
# env detector included below adds custom labels using OTEL_RESOURCE_ATTRIBUTES envvar
detectors:
- env
# - elastic_beanstalk
# - eks
# - ecs
# - ec2
# - gke
# - gce
# - azure
# - heroku
- system
timeout: 2s
system:
hostname_sources: [dns, os]
# Memory Limiter processor.
# If set to null, will be overridden with values based on k8s resource limits.
# ref: https://github.com/open-telemetry/opentelemetry-collector/blob/main/processor/memorylimiterprocessor/README.md
memory_limiter: null
# K8s Attribute processor config.
# ref: https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/main/processor/k8sattributesprocessor/README.md
k8sattributes/hostmetrics:
# -- Whether to detect the IP address of agents and add it as an attribute to all telemetry resources.
# If set to true, Agents will not make any k8s API calls, do any discovery of pods or extract any metadata.
passthrough: false
# -- Filters can be used to limit each OpenTelemetry agent to query pods based on specific
# selector to only dramatically reducing resource requirements for very large clusters.
filter:
# -- Restrict each OpenTelemetry agent to query pods running on the same node
node_from_env_var: K8S_NODE_NAME
pod_association:
- sources:
- from: resource_attribute
name: k8s.pod.ip
- sources:
- from: resource_attribute
name: k8s.pod.uid
- sources:
- from: connection
extract:
metadata:
- k8s.namespace.name
- k8s.pod.name
- k8s.pod.uid
- k8s.pod.start_time
- k8s.deployment.name
- k8s.node.name
extensions:
health_check:
endpoint: 0.0.0.0:13133
zpages:
endpoint: localhost:55679
pprof:
endpoint: localhost:1777
exporters:
clickhousemetricswrite:
endpoint: tcp://${CLICKHOUSE_HOST}:${CLICKHOUSE_PORT}/?database=${CLICKHOUSE_DATABASE}&username=${CLICKHOUSE_USER}&password=${CLICKHOUSE_PASSWORD}
clickhousemetricswrite/hostmetrics:
endpoint: tcp://${CLICKHOUSE_HOST}:${CLICKHOUSE_PORT}/?database=${CLICKHOUSE_DATABASE}&username=${CLICKHOUSE_USER}&password=${CLICKHOUSE_PASSWORD}
resource_to_telemetry_conversion:
enabled: true
service:
telemetry:
metrics:
address: 0.0.0.0:8888
extensions: [health_check, zpages, pprof]
pipelines:
metrics:
receivers: [prometheus]
processors: [batch]
exporters: [clickhousemetricswrite]
metrics/hostmetrics:
receivers: [hostmetrics]
processors: [resourcedetection, k8sattributes/hostmetrics, batch]
exporters: [clickhousemetricswrite/hostmetrics]
{{- end }}
Original file line number Diff line number Diff line change
Expand Up @@ -6,4 +6,4 @@ metadata:
{{- include "otelCollectorMetrics.labels" . | nindent 4 }}
data:
otel-collector-metrics-config.yaml: |-
{{- toYaml .Values.otelCollectorMetrics.config | nindent 4 }}
{{ include "otelCollectorMetrics" . | nindent 4 }}
Loading