Tempo
Last updated
Was this helpful?
Last updated
Was this helpful?
The component structure is similar with loki
It accepts spans in multiple formats including Jaeger, OpenTelemetry, Zipkin. It routes spans to ingesters by hashing the traceID
global:
# -- Common labels for all object directly managed by this chart.
commonLabels: {}
# -- Overrides the chart's name
nameOverride: ""
# -- Overrides the chart's computed fullname
fullnameOverride: ""
# -- Define the amount of instances
replicas: 1
# -- Number of old history to retain to allow rollback (If not set, default Kubernetes value is set to 10)
# revisionHistoryLimit: 1
# -- labels for tempo
labels: {}
# -- Annotations for the StatefulSet
annotations: {}
tempo:
repository: grafana/tempo
tag: ""
pullPolicy: IfNotPresent
## Optionally specify an array of imagePullSecrets.
## Secrets must be manually created in the namespace.
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/
##
# pullSecrets:
# - myRegistryKeySecretName
updateStrategy: RollingUpdate
resources: {}
# requests:
# cpu: 1000m
# memory: 4Gi
# limits:
# cpu: 2000m
# memory: 6Gi
memBallastSizeMbs: 1024
multitenancyEnabled: false
# -- If true, Tempo will report anonymous usage data about the shape of a deployment to Grafana Labs
reportingEnabled: true
metricsGenerator:
# -- If true, enables Tempo's metrics generator (https://grafana.com/docs/tempo/next/metrics-generator/)
enabled: false
remoteWriteUrl: "http://prometheus.monitoring:9090/api/v1/write"
# -- Configuration options for the ingester.
# Refers to: https://grafana.com/docs/tempo/latest/configuration/#ingester
ingester: {}
# flush_check_period: 10s
# trace_idle_period: 10s
# max_block_duration: 30m
# complete_block_timeout: 1h
# -- Configuration options for the querier.
# Refers to: https://grafana.com/docs/tempo/latest/configuration/#querier
querier: {}
# max_concurrent_queries: 20
# -- Configuration options for the query-fronted.
# Refers to: https://grafana.com/docs/tempo/latest/configuration/#query-frontend
queryFrontend: {}
# search:
# concurrent_jobs: 2000
retention: 24h
# -- The standard overrides configuration section. This can include a `defaults` object for applying to all tenants (not to be confused with the `global` property of the same name, which overrides `max_byte_per_trace` for all tenants). For an example on how to enable the metrics generator using the `overrides` object, see the 'Activate metrics generator' section below. Refer to [Standard overrides](https://grafana.com/docs/tempo/latest/configuration/#standard-overrides) for more details.
overrides:
# -- Default config values for all tenants, can be overridden by per-tenant overrides. If a tenant's specific overrides are not found in the `per_tenant_overrides` block, the values in this `default` block will be used. Configs inside this block should follow the new overrides indentation format
defaults: {}
# metrics_generator:
# processors:
# - service-graphs
# - span-metrics
# -- Path to the per tenant override config file. The values of the `per_tenant_overrides` config below will be written to the default path `/conf/overrides.yaml`. Users can set tenant-specific overrides settings in a separate file and point per_tenant_override_config to it if not using the per_tenant_overrides block below.
per_tenant_override_config: /conf/overrides.yaml
# -- The `per tenant` aka `tenant-specific` runtime overrides. This allows overriding values set in the configuration on a per-tenant basis. Note that *all* values must be given for each per-tenant configuration block. Refer to [Runtime overrides](https://grafana.com/docs/tempo/latest/configuration/#runtime-overrides) and [Tenant-Specific overrides](https://grafana.com/docs/tempo/latest/configuration/#tenant-specific-overrides) documentation for more details.
per_tenant_overrides: {}
# 'tenant-id':
# metrics_generator:
# processors:
# - service-graphs
# - span-metrics
# Tempo server configuration.
# Refers to: https://grafana.com/docs/tempo/latest/configuration/#server
server:
# -- HTTP server listen port
http_listen_port: 3100
# Readiness and Liveness Probe Configuration Options
livenessProbe:
httpGet:
path: /ready
port: 3100
initialDelaySeconds: 30
periodSeconds: 10
timeoutSeconds: 5
failureThreshold: 3
successThreshold: 1
readinessProbe:
httpGet:
path: /ready
port: 3100
initialDelaySeconds: 20
periodSeconds: 10
timeoutSeconds: 5
failureThreshold: 3
successThreshold: 1
storage:
trace:
# tempo storage backend.
# Refers to: https://grafana.com/docs/tempo/latest/configuration/#storage
## Use s3 for example
# backend: s3
# store traces in s3
# s3:
# bucket: <your s3 bucket> # store traces in this bucket
# endpoint: s3.dualstack.us-east-2.amazonaws.com # api endpoint
# access_key: ... # optional. access key when using static credentials.
# secret_key: ... # optional. secret key when using static credentials.
# insecure: false # optional. enable if endpoint is http
backend: local
local:
path: /var/tempo/traces
wal:
path: /var/tempo/wal
# this configuration will listen on all ports and protocols that tempo is capable of.
# the receives all come from the OpenTelemetry collector. more configuration information can
# be found there: https://github.com/open-telemetry/opentelemetry-collector/tree/master/receiver
receivers:
otlp:
protocols:
http:
endpoint: "0.0.0.0:4318"
securityContext: {}
# allowPrivilegeEscalation: false
# capabilities:
# drop:
# - ALL
# readOnlyRootFilesystem: true
## Additional container arguments
extraArgs: {}
# -- Environment variables to add
extraEnv: []
# -- Environment variables from secrets or configmaps to add to the ingester pods
extraEnvFrom: []
# -- Volume mounts to add
extraVolumeMounts: []
# - name: extra-volume
# mountPath: /mnt/volume
# readOnly: true
# existingClaim: volume-claim
# -- Tempo configuration file contents
# @default -- Dynamically generated tempo configmap
config: |
memberlist:
cluster_label: "{{ .Release.Name }}.{{ .Release.Namespace }}"
multitenancy_enabled: {{ .Values.tempo.multitenancyEnabled }}
usage_report:
reporting_enabled: {{ .Values.tempo.reportingEnabled }}
compactor:
compaction:
block_retention: {{ .Values.tempo.retention }}
distributor:
receivers:
{{- toYaml .Values.tempo.receivers | nindent 8 }}
ingester:
{{- toYaml .Values.tempo.ingester | nindent 6 }}
server:
{{- toYaml .Values.tempo.server | nindent 6 }}
storage:
{{- toYaml .Values.tempo.storage | nindent 6 }}
querier:
{{- toYaml .Values.tempo.querier | nindent 6 }}
query_frontend:
{{- toYaml .Values.tempo.queryFrontend | nindent 6 }}
overrides:
{{- toYaml .Values.tempo.overrides | nindent 6 }}
{{- if .Values.tempo.metricsGenerator.enabled }}
metrics_generator:
storage:
path: "/tmp/tempo"
remote_write:
- url: {{ .Values.tempo.metricsGenerator.remoteWriteUrl }}
traces_storage:
path: "/tmp/traces"
{{- end }}
persistence:
enabled: false
# -- Enable StatefulSetAutoDeletePVC feature
enableStatefulSetAutoDeletePVC: false
# storageClassName: local-path
accessModes:
- ReadWriteOnce
size: 10Gi
helm install tempo grafana/tempo -f tempo/values.yaml
Connect with loki , to allow checking the flow from trace to log
To extract the trace id from logging line and connect to tempo
It is allowed to trace whole of flow but also click to entering the logging page for each spans
It is allowed view log by trace id, see the whole of the flow by trace id
The batches trace into blocks, creates bloom filters and indexes, and then flushes it all to the backend.