PLG Setup
Last updated
Was this helpful?
Last updated
Was this helpful?
Add to Helm:
helm repo add grafana https://grafana.github.io/helm-charts
helm repo update
Declare the values.yaml
(File system - Single Binary)
deploymentMode: SingleBinary
loki:
commonConfig:
replication_factor: 1
schemaConfig:
configs:
- from: "2024-04-01"
store: tsdb
object_store: filesystem
schema: v13
index:
prefix: loki_index_
period: 24h
storage:
type: filesystem
filesystem:
admin_api_directory: "/var/loki/admin"
chunks_directory: "/var/loki/chunks"
rules_directory: "/var/loki/rules"
ingester:
chunk_encoding: snappy
querier:
# Default is 4, if you have enough memory and CPU you can increase, reduce if OOMing
max_concurrent: 4
pattern_ingester:
enabled: true
limits_config:
allow_structured_metadata: true
volume_enabled: true
auth_enabled: false
# Zero out replica counts of other deployment modes
backend:
replicas: 0
read:
replicas: 0
write:
replicas: 0
ingester:
replicas: 0
querier:
replicas: 0
queryFrontend:
replicas: 0
queryScheduler:
replicas: 0
distributor:
replicas: 0
compactor:
replicas: 0
indexGateway:
replicas: 0
bloomCompactor:
replicas: 0
bloomGateway:
replicas: 0
singleBinary:
replicas: 1
persistence:
enabled: true
size: "20Gi"
storageClass: "alicloud-disk-essd" # Specify your storage class here
accessModes:
- ReadWriteOnce
annotations:
volume.beta.kubernetes.io/storage-provisioner: diskplugin.csi.alibabacloud.com
(Object Storage - Distributed)
deploymentMode: Distributed
ingester:
replicas: 3 # To ensure data durability with replication
zoneAwareReplication:
enabled: false
querier:
replicas: 3 # Improve query performance via parallelism
maxUnavailable: 2
queryFrontend:
replicas: 2
maxUnavailable: 1
queryScheduler:
replicas: 2
distributor:
replicas: 3
maxUnavailable: 2
compactor:
replicas: 1
indexGateway:
replicas: 2
maxUnavailable: 1
bloomPlanner:
replicas: 0
bloomBuilder:
replicas: 0
bloomGateway:
replicas: 0
backend:
replicas: 0
read:
replicas: 0
write:
replicas: 0
singleBinary:
replicas: 0
# This exposes the Loki gateway so it can be written to and queried externaly
gateway:
service:
type: LoadBalancer
loki:
commonConfig:
replication_factor: 1
schemaConfig:
configs:
- from: "2024-04-01"
store: tsdb
object_store: alibabacloud
schema: v13
index:
prefix: loki_index_
period: 24h
storage:
type: alibabacloud
alibabacloud:
bucket: bot-builder-loki-storage-2
endpoint: oss-cn-hongkong.aliyuncs.com
access_key_id: LTAI5t6HzfurJWztX2QiFFJ1
secret_access_key: DAlGCJBxwCkxJIutplh0XeNw4hcVW7
bucketNames:
chunks: bot-builder-loki-storage-2
ingester:
chunk_encoding: snappy
querier:
# Default is 4, if you have enough memory and CPU you can increase, reduce if OOMing
max_concurrent: 4
pattern_ingester:
enabled: true
limits_config:
allow_structured_metadata: true
volume_enabled: true
auth_enabled: false
Apply the setting and deploy to k8s via helm
helm install/upgrade loki grafana/loki -f loki/values.yaml
Make sure all the pods and volume are available
Declare the values.yaml
adminUser: admin
adminPassword: admin
persistence:
type: pvc
enabled: true
storageClassName: "alicloud-disk-essd"
size: 20Gi
datasources:
datasources.yaml:
apiVersion: 1
datasources:
- name: Loki
uid: loki
type: loki
url: http://loki-2-gateway
isDefault: false
grafana.ini:
server:
domain: ''
root_url: "%(protocol)s://%(domain)s/grafana2/"
serve_from_sub_path: true
Apply the setting and deploy
helm install/upgrade grafana grafana/grafana -f grafana/values.yaml
Set up the Loki data source (Connect to your deployed loki gateway)
Test the logging by posting the message to loki gateway, make sure that the timestamp is updated
curl -XPOST -H "Content-Type: application/json" -H "X-Scope-OrgID: cg" http://loki-gateway/loki/api/v1/push -d '{
"streams": [{
"stream": { "app": "test" },
// timestamp with message
"values": [[ "1746173727000000000", "This is a test log message" ]]
}]
}'
Create config-map.yaml
apiVersion: v1
kind: ConfigMap
metadata:
name: "promtail-test-configmap"
data:
test: "test"
---
// declare the promtail setting
apiVersion: v1
kind: ConfigMap
metadata:
name: promtail-test-configmap-promtail
data:
promtail.yaml: |
server:
http_listen_port: 9080
grpc_listen_port: 0
log_level: "debug"
positions:
filename: /tmp/positions.yaml
// send to loki
clients: # Specify target
- url: ${LOKI_PUSH_URL}
// scrape the log file
scrape_configs:
- job_name: promtail-sidecar-${DEPLOYMENT_NAME}
pipeline_stages:
- cri: {}
static_configs:
- targets:
- localhost
labels:
app: ${DEPLOYMENT_NAME}
job: ${NAMESPACE}/${DEPLOYMENT_NAME}
pod: ${POD_NAME}
node_name: ${NODE_NAME}
namespace: ${NAMESPACE}
__path__: /tmp/*.log # Any file .log in the EmptyDir Volume.
Create deployment.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: promtail-test
labels:
app.kubernetes.io/instance: promtail-test
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: promtail-test
app.kubernetes.io/version: v1.0.0-a
helm.sh/chart: promtail-test-1.0.0-a
spec:
replicas: 1
selector:
matchLabels:
app.kubernetes.io/instance: promtail-test
app.kubernetes.io/name: promtail-test
template:
metadata:
labels:
app.kubernetes.io/instance: promtail-test
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: promtail-test
app.kubernetes.io/version: v1.0.0-a
helm.sh/chart: promtail-test-1.0.0-a
spec:
volumes:
- name: config-volume
configMap:
name: promtail-test-configmap
defaultMode: 420
- name: promtail-log-file-volume
emptyDir: {}
- name: promtail-config-volume
configMap:
name: promtail-test-configmap-promtail
defaultMode: 420
containers:
- name: promtail-test
image: docker.io/hirise01/promtail-test:latest
imagePullPolicy: Always
ports:
- name: http
containerPort: 3000
protocol: TCP
envFrom:
- configMapRef:
name: promtail-test-configmap
resources:
limits:
cpu: 500m
memory: 1Gi
requests:
cpu: 500m
memory: 1Gi
// append the log directory
volumeMounts:
- name: promtail-log-file-volume
mountPath: /tmp
- name: promtail
image: grafana/promtail:latest
args:
- '-config.file=/etc/promtail/promtail.yaml'
- '-log.level=debug'
- '-config.expand-env=true'
resources: {}
// append the log directory and promtail setting
volumeMounts:
- name: promtail-config-volume
mountPath: /etc/promtail
- name: promtail-log-file-volume
mountPath: /tmp
terminationMessagePath: /dev/termination-log
terminationMessagePolicy: File
env:
- name: NODE_NAME
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: spec.nodeName
- name: POD_NAME
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: metadata.name
- name: NAMESPACE
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: metadata.namespace
- name: DEPLOYMENT_NAME
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: metadata.labels['app.kubernetes.io/instance']
- name: LOKI_PUSH_URL
value: http://loki-gateway/loki/api/v1/push
Create service.yaml
apiVersion: v1
kind: Service
metadata:
name: promtail-test
labels:
app.kubernetes.io/instance: promtail-test
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: promtail-test
app.kubernetes.io/version: v1.0.40
helm.sh/chart: promtail-test-0.1.0
annotations:
meta.helm.sh/release-name: promtail-test
meta.helm.sh/release-namespace: ebot-poc
selfLink: /api/v1/namespaces/ebot-poc/services/promtail-test
status:
loadBalancer: {}
spec:
ports:
- name: http
protocol: TCP
port: 80
targetPort: 3000
selector:
app.kubernetes.io/instance: promtail-test
app.kubernetes.io/name: promtail-test
Apply the application
helm install promtail-test -f ./helm/values.yaml helm -n ebot-poc
Create config-map.yaml
apiVersion: v1
kind: ConfigMap
metadata:
name: "promtail-test-configmap"
data:
test: "test"
---
// declare alloy setting
apiVersion: v1
kind: ConfigMap
metadata:
name: promtail-test-configmap-alloy
data:
config.alloy: |
// for debug
livedebugging {
enabled = true
}
# local.file_match "local_files" {
# path_targets = [{"__path__" = "/tmp/*.log"}]
# sync_period = "5s"
# }
# loki.source.file "log_scrape" {
# targets = local.file_match.local_files.targets
# forward_to = [loki.process.append_label.receiver]
# tail_from_end = true
# }
# loki.process "append_label" {
# stage.labels {
# values = {
# "app" = "promtail-test" ,
# }
# }
# forward_to = [loki.write.grafana_loki.receiver]
# }
// listen to log file
loki.source.file "example" {
targets = [
{__path__ = "/tmp/promtail-test.log", "app" = "example"},
]
forward_to = [loki.write.grafana_loki.receiver]
}
// push to loki
loki.write "grafana_loki" {
endpoint {
url = "http://loki-gateway/loki/api/v1/push"
}
}
Declare deployment.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: promtail-test
labels:
app.kubernetes.io/instance: promtail-test
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: promtail-test
app.kubernetes.io/version: v1.0.0-a
helm.sh/chart: promtail-test-1.0.0-a
spec:
replicas: 1
selector:
matchLabels:
app.kubernetes.io/instance: promtail-test
app.kubernetes.io/name: promtail-test
template:
metadata:
labels:
app.kubernetes.io/instance: promtail-test
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: promtail-test
app.kubernetes.io/version: v1.0.0-a
helm.sh/chart: promtail-test-1.0.0-a
spec:
volumes:
- name: config-volume
configMap:
name: promtail-test-configmap
defaultMode: 420
- name: promtail-log-file-volume
emptyDir: {}
- name: promtail-config-volume
configMap:
name: promtail-test-configmap-alloy
defaultMode: 420
containers:
- name: promtail-test
image: docker.io/hirise01/promtail-test:latest
imagePullPolicy: Always
ports:
- name: http
containerPort: 3000
protocol: TCP
envFrom:
- configMapRef:
name: promtail-test-configmap
resources:
limits:
cpu: 500m
memory: 1Gi
requests:
cpu: 500m
memory: 1Gi
volumeMounts:
- name: promtail-log-file-volume
mountPath: /tmp
- name: alloy
image: grafana/alloy:latest
ports:
- name: http
containerPort: 12345
protocol: TCP
resources: {}
volumeMounts:
- name: promtail-config-volume
mountPath: /etc/alloy
- name: promtail-log-file-volume
mountPath: /tmp
terminationMessagePath: /dev/termination-log
terminationMessagePolicy: File
env:
- name: NODE_NAME
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: spec.nodeName
- name: POD_NAME
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: metadata.name
- name: NAMESPACE
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: metadata.namespace
- name: DEPLOYMENT_NAME
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: metadata.labels['app.kubernetes.io/instance']
- name: LOKI_PUSH_URL
value: http://loki-2-gateway/loki/api/v1/push
If wanna debug alloy for the flow, port forward to port 12345 , access localhost:12345/graph