telemetry

This commit is contained in:
royalcat 2025-04-22 02:40:28 +04:00
parent 70d2620129
commit 3f4c93de57
10 changed files with 826 additions and 50 deletions

View file

@ -0,0 +1,340 @@
---
# Source: opentelemetry-collector/templates/serviceaccount.yaml
apiVersion: v1
kind: ServiceAccount
metadata:
name: release-name-opentelemetry-collector
namespace: opentelemetry-operator-system
labels:
helm.sh/chart: opentelemetry-collector-0.120.1
app.kubernetes.io/name: opentelemetry-collector
app.kubernetes.io/instance: release-name
app.kubernetes.io/version: "0.122.1"
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: standalone-collector
---
# Source: opentelemetry-collector/templates/configmap.yaml
apiVersion: v1
kind: ConfigMap
metadata:
name: release-name-opentelemetry-collector
namespace: opentelemetry-operator-system
labels:
helm.sh/chart: opentelemetry-collector-0.120.1
app.kubernetes.io/name: opentelemetry-collector
app.kubernetes.io/instance: release-name
app.kubernetes.io/version: "0.122.1"
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: standalone-collector
data:
relay: |
exporters:
debug: {}
extensions:
health_check:
endpoint: ${env:MY_POD_IP}:13133
processors:
batch: {}
k8sattributes:
extract:
metadata:
- k8s.namespace.name
- k8s.deployment.name
- k8s.statefulset.name
- k8s.daemonset.name
- k8s.cronjob.name
- k8s.job.name
- k8s.node.name
- k8s.pod.name
- k8s.pod.uid
- k8s.pod.start_time
passthrough: false
pod_association:
- sources:
- from: resource_attribute
name: k8s.pod.ip
- sources:
- from: resource_attribute
name: k8s.pod.uid
- sources:
- from: connection
memory_limiter:
check_interval: 5s
limit_percentage: 80
spike_limit_percentage: 25
receivers:
jaeger:
protocols:
grpc:
endpoint: ${env:MY_POD_IP}:14250
thrift_compact:
endpoint: ${env:MY_POD_IP}:6831
thrift_http:
endpoint: ${env:MY_POD_IP}:14268
k8s_cluster:
collection_interval: 10s
k8sobjects:
objects:
- exclude_watch_type:
- DELETED
group: events.k8s.io
mode: watch
name: events
otlp:
protocols:
grpc:
endpoint: ${env:MY_POD_IP}:4317
http:
endpoint: ${env:MY_POD_IP}:4318
prometheus:
config:
scrape_configs:
- job_name: opentelemetry-collector
scrape_interval: 10s
static_configs:
- targets:
- ${env:MY_POD_IP}:8888
zipkin:
endpoint: ${env:MY_POD_IP}:9411
service:
extensions:
- health_check
pipelines:
logs:
exporters:
- debug
processors:
- k8sattributes
- memory_limiter
- batch
receivers:
- otlp
- k8sobjects
metrics:
exporters:
- debug
processors:
- k8sattributes
- memory_limiter
- batch
receivers:
- otlp
- prometheus
- k8s_cluster
traces:
exporters:
- debug
processors:
- k8sattributes
- memory_limiter
- batch
receivers:
- otlp
- jaeger
- zipkin
telemetry:
metrics:
address: ${env:MY_POD_IP}:8888
---
# Source: opentelemetry-collector/templates/clusterrole.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: release-name-opentelemetry-collector
labels:
helm.sh/chart: opentelemetry-collector-0.120.1
app.kubernetes.io/name: opentelemetry-collector
app.kubernetes.io/instance: release-name
app.kubernetes.io/version: "0.122.1"
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: standalone-collector
rules:
- apiGroups: [""]
resources: ["pods", "namespaces"]
verbs: ["get", "watch", "list"]
- apiGroups: ["apps"]
resources: ["replicasets"]
verbs: ["get", "list", "watch"]
- apiGroups: ["extensions"]
resources: ["replicasets"]
verbs: ["get", "list", "watch"]
- apiGroups: [""]
resources: ["events", "namespaces", "namespaces/status", "nodes", "nodes/spec", "pods", "pods/status", "replicationcontrollers", "replicationcontrollers/status", "resourcequotas", "services" ]
verbs: ["get", "list", "watch"]
- apiGroups: ["apps"]
resources: ["daemonsets", "deployments", "replicasets", "statefulsets"]
verbs: ["get", "list", "watch"]
- apiGroups: ["extensions"]
resources: ["daemonsets", "deployments", "replicasets"]
verbs: ["get", "list", "watch"]
- apiGroups: ["batch"]
resources: ["jobs", "cronjobs"]
verbs: ["get", "list", "watch"]
- apiGroups: ["autoscaling"]
resources: ["horizontalpodautoscalers"]
verbs: ["get", "list", "watch"]
- apiGroups: ["events.k8s.io"]
resources: ["events"]
verbs: ["watch", "list"]
---
# Source: opentelemetry-collector/templates/clusterrolebinding.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: release-name-opentelemetry-collector
labels:
helm.sh/chart: opentelemetry-collector-0.120.1
app.kubernetes.io/name: opentelemetry-collector
app.kubernetes.io/instance: release-name
app.kubernetes.io/version: "0.122.1"
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: standalone-collector
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: release-name-opentelemetry-collector
subjects:
- kind: ServiceAccount
name: release-name-opentelemetry-collector
namespace: opentelemetry-operator-system
---
# Source: opentelemetry-collector/templates/service.yaml
apiVersion: v1
kind: Service
metadata:
name: release-name-opentelemetry-collector
namespace: opentelemetry-operator-system
labels:
helm.sh/chart: opentelemetry-collector-0.120.1
app.kubernetes.io/name: opentelemetry-collector
app.kubernetes.io/instance: release-name
app.kubernetes.io/version: "0.122.1"
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: standalone-collector
component: standalone-collector
spec:
type: ClusterIP
ports:
- name: jaeger-compact
port: 6831
targetPort: 6831
protocol: UDP
- name: jaeger-grpc
port: 14250
targetPort: 14250
protocol: TCP
- name: jaeger-thrift
port: 14268
targetPort: 14268
protocol: TCP
- name: otlp
port: 4317
targetPort: 4317
protocol: TCP
appProtocol: grpc
- name: otlp-http
port: 4318
targetPort: 4318
protocol: TCP
- name: zipkin
port: 9411
targetPort: 9411
protocol: TCP
selector:
app.kubernetes.io/name: opentelemetry-collector
app.kubernetes.io/instance: release-name
component: standalone-collector
internalTrafficPolicy: Cluster
---
# Source: opentelemetry-collector/templates/deployment.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: release-name-opentelemetry-collector
namespace: opentelemetry-operator-system
labels:
helm.sh/chart: opentelemetry-collector-0.120.1
app.kubernetes.io/name: opentelemetry-collector
app.kubernetes.io/instance: release-name
app.kubernetes.io/version: "0.122.1"
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: standalone-collector
spec:
replicas: 1
revisionHistoryLimit: 10
selector:
matchLabels:
app.kubernetes.io/name: opentelemetry-collector
app.kubernetes.io/instance: release-name
component: standalone-collector
strategy:
type: RollingUpdate
template:
metadata:
annotations:
checksum/config: f0ed171eee9745ba4172872bdeaf2fec411d70861867f59806b75ac4e8b05261
labels:
app.kubernetes.io/name: opentelemetry-collector
app.kubernetes.io/instance: release-name
component: standalone-collector
spec:
serviceAccountName: release-name-opentelemetry-collector
securityContext:
{}
containers:
- name: opentelemetry-collector
args:
- --config=/conf/relay.yaml
securityContext:
{}
image: "otel/opentelemetry-collector-k8s:0.122.1"
imagePullPolicy: IfNotPresent
ports:
- name: jaeger-compact
containerPort: 6831
protocol: UDP
- name: jaeger-grpc
containerPort: 14250
protocol: TCP
- name: jaeger-thrift
containerPort: 14268
protocol: TCP
- name: otlp
containerPort: 4317
protocol: TCP
- name: otlp-http
containerPort: 4318
protocol: TCP
- name: zipkin
containerPort: 9411
protocol: TCP
env:
- name: MY_POD_IP
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: status.podIP
livenessProbe:
httpGet:
path: /
port: 13133
readinessProbe:
httpGet:
path: /
port: 13133
volumeMounts:
- mountPath: /conf
name: opentelemetry-collector-configmap
volumes:
- name: opentelemetry-collector-configmap
configMap:
name: release-name-opentelemetry-collector
items:
- key: relay
path: relay.yaml
hostNetwork: false

View file

@ -0,0 +1,11 @@
mode: deployment
replicaCount: 1
image:
repository: "otel/opentelemetry-collector-k8s"
presets:
kubernetesAttributes:
enabled: true
clusterMetrics:
enabled: true
kubernetesEvents:
enabled: true

View file

@ -1,32 +0,0 @@
apiVersion: opentelemetry.io/v1beta1
kind: OpenTelemetryCollector
metadata:
name: simplest
namespace: opentelemetry-operator-system
spec:
config:
receivers:
otlp:
protocols:
grpc:
endpoint: 0.0.0.0:4317
http:
endpoint: 0.0.0.0:4318
processors:
memory_limiter:
check_interval: 1s
limit_percentage: 75
spike_limit_percentage: 15
batch:
send_batch_size: 10000
timeout: 10s
exporters:
debug: {}
service:
pipelines:
traces:
receivers: [otlp]
processors: [memory_limiter, batch]
exporters: [debug]

View file

@ -0,0 +1,364 @@
---
# Source: opentelemetry-collector/templates/serviceaccount.yaml
apiVersion: v1
kind: ServiceAccount
metadata:
name: release-name-opentelemetry-collector
namespace: opentelemetry-operator-system
labels:
helm.sh/chart: opentelemetry-collector-0.120.1
app.kubernetes.io/name: opentelemetry-collector
app.kubernetes.io/instance: release-name
app.kubernetes.io/version: "0.122.1"
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: agent-collector
---
# Source: opentelemetry-collector/templates/configmap-agent.yaml
apiVersion: v1
kind: ConfigMap
metadata:
name: release-name-opentelemetry-collector-agent
namespace: opentelemetry-operator-system
labels:
helm.sh/chart: opentelemetry-collector-0.120.1
app.kubernetes.io/name: opentelemetry-collector
app.kubernetes.io/instance: release-name
app.kubernetes.io/version: "0.122.1"
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: agent-collector
data:
relay: |
exporters:
debug: {}
extensions:
health_check:
endpoint: ${env:MY_POD_IP}:13133
processors:
batch: {}
k8sattributes:
extract:
metadata:
- k8s.namespace.name
- k8s.deployment.name
- k8s.statefulset.name
- k8s.daemonset.name
- k8s.cronjob.name
- k8s.job.name
- k8s.node.name
- k8s.pod.name
- k8s.pod.uid
- k8s.pod.start_time
filter:
node_from_env_var: K8S_NODE_NAME
passthrough: false
pod_association:
- sources:
- from: resource_attribute
name: k8s.pod.ip
- sources:
- from: resource_attribute
name: k8s.pod.uid
- sources:
- from: connection
memory_limiter:
check_interval: 5s
limit_percentage: 80
spike_limit_percentage: 25
receivers:
filelog:
exclude: []
include:
- /var/log/pods/*/*/*.log
include_file_name: false
include_file_path: true
operators:
- id: container-parser
max_log_size: 102400
type: container
retry_on_failure:
enabled: true
start_at: end
hostmetrics:
collection_interval: 10s
root_path: /hostfs
scrapers:
cpu: null
disk: null
filesystem:
exclude_fs_types:
fs_types:
- autofs
- binfmt_misc
- bpf
- cgroup2
- configfs
- debugfs
- devpts
- devtmpfs
- fusectl
- hugetlbfs
- iso9660
- mqueue
- nsfs
- overlay
- proc
- procfs
- pstore
- rpc_pipefs
- securityfs
- selinuxfs
- squashfs
- sysfs
- tracefs
match_type: strict
exclude_mount_points:
match_type: regexp
mount_points:
- /dev/*
- /proc/*
- /sys/*
- /run/k3s/containerd/*
- /var/lib/docker/*
- /var/lib/kubelet/*
- /snap/*
load: null
memory: null
network: null
jaeger:
protocols:
grpc:
endpoint: ${env:MY_POD_IP}:14250
thrift_compact:
endpoint: ${env:MY_POD_IP}:6831
thrift_http:
endpoint: ${env:MY_POD_IP}:14268
kubeletstats:
auth_type: serviceAccount
collection_interval: 20s
endpoint: ${env:K8S_NODE_IP}:10250
otlp:
protocols:
grpc:
endpoint: ${env:MY_POD_IP}:4317
http:
endpoint: ${env:MY_POD_IP}:4318
prometheus:
config:
scrape_configs:
- job_name: opentelemetry-collector
scrape_interval: 10s
static_configs:
- targets:
- ${env:MY_POD_IP}:8888
zipkin:
endpoint: ${env:MY_POD_IP}:9411
service:
extensions:
- health_check
pipelines:
logs:
exporters:
- debug
processors:
- k8sattributes
- memory_limiter
- batch
receivers:
- otlp
- filelog
metrics:
exporters:
- debug
processors:
- k8sattributes
- memory_limiter
- batch
receivers:
- otlp
- prometheus
- hostmetrics
- kubeletstats
traces:
exporters:
- debug
processors:
- k8sattributes
- memory_limiter
- batch
receivers:
- otlp
- jaeger
- zipkin
telemetry:
metrics:
address: ${env:MY_POD_IP}:8888
---
# Source: opentelemetry-collector/templates/clusterrole.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: release-name-opentelemetry-collector
labels:
helm.sh/chart: opentelemetry-collector-0.120.1
app.kubernetes.io/name: opentelemetry-collector
app.kubernetes.io/instance: release-name
app.kubernetes.io/version: "0.122.1"
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: agent-collector
rules:
- apiGroups: [""]
resources: ["pods", "namespaces"]
verbs: ["get", "watch", "list"]
- apiGroups: ["apps"]
resources: ["replicasets"]
verbs: ["get", "list", "watch"]
- apiGroups: ["extensions"]
resources: ["replicasets"]
verbs: ["get", "list", "watch"]
- apiGroups: [""]
resources: ["nodes/stats"]
verbs: ["get", "watch", "list"]
---
# Source: opentelemetry-collector/templates/clusterrolebinding.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: release-name-opentelemetry-collector
labels:
helm.sh/chart: opentelemetry-collector-0.120.1
app.kubernetes.io/name: opentelemetry-collector
app.kubernetes.io/instance: release-name
app.kubernetes.io/version: "0.122.1"
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: agent-collector
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: release-name-opentelemetry-collector
subjects:
- kind: ServiceAccount
name: release-name-opentelemetry-collector
namespace: opentelemetry-operator-system
---
# Source: opentelemetry-collector/templates/daemonset.yaml
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: release-name-opentelemetry-collector-agent
namespace: opentelemetry-operator-system
labels:
helm.sh/chart: opentelemetry-collector-0.120.1
app.kubernetes.io/name: opentelemetry-collector
app.kubernetes.io/instance: release-name
app.kubernetes.io/version: "0.122.1"
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: agent-collector
spec:
selector:
matchLabels:
app.kubernetes.io/name: opentelemetry-collector
app.kubernetes.io/instance: release-name
component: agent-collector
updateStrategy:
type: RollingUpdate
template:
metadata:
annotations:
checksum/config: 2e2674db160232bba09fa7781298de92185fb5e60b768222469f78d0525b8a69
labels:
app.kubernetes.io/name: opentelemetry-collector
app.kubernetes.io/instance: release-name
component: agent-collector
spec:
serviceAccountName: release-name-opentelemetry-collector
securityContext:
{}
containers:
- name: opentelemetry-collector
args:
- --config=/conf/relay.yaml
securityContext:
{}
image: "otel/opentelemetry-collector-k8s:0.122.1"
imagePullPolicy: IfNotPresent
ports:
- name: jaeger-compact
containerPort: 6831
protocol: UDP
hostPort: 6831
- name: jaeger-grpc
containerPort: 14250
protocol: TCP
hostPort: 14250
- name: jaeger-thrift
containerPort: 14268
protocol: TCP
hostPort: 14268
- name: otlp
containerPort: 4317
protocol: TCP
hostPort: 4317
- name: otlp-http
containerPort: 4318
protocol: TCP
hostPort: 4318
- name: zipkin
containerPort: 9411
protocol: TCP
hostPort: 9411
env:
- name: MY_POD_IP
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: status.podIP
- name: K8S_NODE_NAME
valueFrom:
fieldRef:
fieldPath: spec.nodeName
- name: K8S_NODE_IP
valueFrom:
fieldRef:
fieldPath: status.hostIP
livenessProbe:
httpGet:
path: /
port: 13133
readinessProbe:
httpGet:
path: /
port: 13133
volumeMounts:
- mountPath: /conf
name: opentelemetry-collector-configmap
- name: varlogpods
mountPath: /var/log/pods
readOnly: true
- name: varlibdockercontainers
mountPath: /var/lib/docker/containers
readOnly: true
- name: hostfs
mountPath: /hostfs
readOnly: true
mountPropagation: HostToContainer
volumes:
- name: opentelemetry-collector-configmap
configMap:
name: release-name-opentelemetry-collector-agent
items:
- key: relay
path: relay.yaml
- name: varlogpods
hostPath:
path: /var/log/pods
- name: varlibdockercontainers
hostPath:
path: /var/lib/docker/containers
- name: hostfs
hostPath:
path: /
hostNetwork: false

View file

@ -0,0 +1,13 @@
mode: daemonset
image:
repository: "otel/opentelemetry-collector-k8s"
presets:
kubernetesAttributes:
enabled: true
logsCollection:
enabled: true
includeCollectorLogs: true
kubeletMetrics:
enabled: true
hostMetrics:
enabled: true