Code development platform for open source projects from the European Union institutions :large_blue_circle: EU Login authentication by SMS has been phased out. To see alternatives please check here

Skip to content
Snippets Groups Projects
Commit 038d4d82 authored by Natalia Szakiel's avatar Natalia Szakiel
Browse files

Merge branch 'develop' into 'main'

Develop

See merge request !48
parents 68fe0778 11435aa1
No related branches found
No related tags found
4 merge requests!69Feature/merge develop into main,!67Feature/resolved conflicts,!57Feature/temp merge fix,!48Develop
Pipeline #229274 failed
Showing
with 1826 additions and 0 deletions
*.lock
*.tgz
\ No newline at end of file
.git
# Simpl ELK
Git project dedicated for ELK on Simpl.
Requriments:
- service kube-state-metrics (it is not install by default) must be running on kubernetes cluster to fech metric data about pod status
https://www.elastic.co/guide/en/beats/metricbeat/current/metricbeat-module-kubernetes.html#_state_and_event
Loading dashboards
- download dashboards.ndjson file from repo directory eck-monitoring/kibana/dashdoards/
- login to Kibana
- go to Stack Management -> Saved Object
- click import and choose downloaded file
- next press import
Variables which must be changed/verified from values.yml during instalation on other kubernetes cluster:
- metricbeat.dropEvent - set correct namespace. ( system is mandatory )
name: eck-monitoring
version: ${PROJECT_RELEASE_VERSION}
appVersion: "${PROJECT_RELEASE_VERSION}"
#version: 0.1.0
This diff is collapsed.
PUT kbn:/api/alerting/rule/a8a775e1-7220-405e-863e-e4ea9e61b039
{
"name": "Tags failures business",
"tags": [],
"schedule": {
"interval": "12m"
},
"params": {
"criteria": [
{
"comparator": ">",
"metrics": [
{
"name": "A",
"aggType": "count"
}
],
"threshold": [
0
],
"timeSize": 10,
"timeUnit": "m"
}
],
"alertOnNoData": true,
"alertOnGroupDisappear": true,
"searchConfiguration": {
"query": {
"query": "tags : \"_dateparsefailure\" or tags : \"_grokparsefailure\"",
"language": "kuery"
},
"index": "ee15d384-2d79-43e8-90a4-8012b5c7a46a"
}
},
"actions": [
{
"group": "custom_threshold.fired",
"id": "23a272da-9efb-473c-9486-1466a09c3ada",
"params": {
"documents": [
{
"@timestamp": "{{date}}",
"rule_id": "{{rule.id}}",
"rule_name": "{{rule.name}}",
"context_reason": "{{context.reason}}"
}
]
},
"frequency": {
"notify_when": "onActiveAlert",
"throttle": null,
"summary": false
},
"uuid": "86e129ad-1741-4d05-994a-f17d9d11ca76"
}
]
}
\ No newline at end of file
{{/*
Kibana dns
*/}}
{{- define "kibana.dns" -}}
kibana.{{ default .Release.Namespace .Values.namespaceTag }}.{{ .Values.domainSuffix }}
{{- end -}}
{{/*
Kibana full URL with base path
*/}}
{{- define "kibana.dns.fullPath" -}}
https://{{ template "kibana.dns" . }}{{- if eq .Values.openStandardHttpsPorts false -}}:25602{{- end -}}{{- if and (.Values.kibana.ingressSubpath) (ne "/" .Values.kibana.ingressSubpath) }}{{- .Values.kibana.ingressSubpath}}{{- end }}
{{- end -}}
{{/*
Elasticsearch api dns
*/}}
{{- define "elasticsearch.dns" -}}
elasticsearch.{{ default .Release.Namespace .Values.namespaceTag }}.{{ .Values.domainSuffix }}
{{- end -}}
{{/*
Logstash input dns
*/}}
{{- define "logstash.dns" -}}
logstash.{{ .Values.logstash.beats.pipelines_group_name }}.{{ default .Release.Namespace .Values.namespaceTag }}.{{ .Values.domainSuffix }}
{{- end -}}
{{/*
Logstash input dns for many ingressRouteTCPs
*/}}
{{- define "logstash.dns.array" -}}
{{ $concatUrl := ( printf ".%s" (include "logstash.dns" .)) }}
{{ $urlPrefix := (default "l" .Values.logstash.urlPrefix) }}
{{ $maxRange := (.Values.logstash.count |int ) }}
{{- range $index := until $maxRange -}}
{{- $urlPrefix}}{{$index }}{{ $concatUrl }}{{if lt $index (sub $maxRange 1) }},{{end}}
{{- end -}}
{{- end -}}
{{/*
Filebeat input dns
*/}}
{{- define "filebeat.dns" -}}
filebeat.{{ default .Release.Namespace .Values.namespaceTag }}.{{ .Values.domainSuffix }}
{{- end -}}
{{/*
Filebeat for agents input dns
*/}}
{{- define "filebeat4agents.dns" -}}
filebeat4agents.{{ default .Release.Namespace .Values.namespaceTag }}.{{ .Values.domainSuffix }}
{{- end -}}
\ No newline at end of file
apiVersion: elasticsearch.k8s.elastic.co/v1
kind: Elasticsearch
metadata:
name: {{ .Release.Name }}-elasticsearch
spec:
version: {{ .Values.elasticVersion }}
image: {{ .Values.elasticsearch.image }}:{{ default .Values.elasticVersion .Values.elasticsearch.imageTag }}
monitoring:
logs:
elasticsearchRefs:
- name: {{ .Release.Name }}-elasticsearch
auth:
roles:
- secretName: logstash-writer-role-secret
- secretName: user-monitoring-role-secret
fileRealm:
- secretName: logstash-writer-secret
- secretName: user-monitoring-secret
nodeSets:
{{- range .Values.elasticsearch.nodeSets }}
- name: {{ .name }}
count: {{ .count}}
config:
xpack.security.authc.token.enabled: true
http.cors.enabled : true
http.cors.allow-origin : "*"
http.cors.allow-methods : OPTIONS, HEAD, GET, POST, PUT, DELETE
http.cors.allow-headers: "kbn-version, Origin, X-Requested-With, Content-Type, Accept, Engaged-Auth-Token"
xpack.monitoring.collection.enabled: true
volumeClaimTemplates:
- metadata:
name: elasticsearch-data
spec:
accessModes:
- ReadWriteOnce
resources:
requests:
storage: {{ .diskSpace }}
storageClassName: {{ .storageClassName }}
podTemplate:
metadata:
labels:
stack-namespace: {{$.Release.Namespace}}
spec:
initContainers:
- name: sysctl
imagePullPolicy: Always
command: ["/bin/bash", "-c"]
args:
- |
sysctl -w vm.max_map_count=2628576 &&
sysctl -w net.ipv4.tcp_retries2=5 &&
sysctl -w vm.swappiness=1
securityContext:
runAsUser: 0
runAsGroup: 0
runAsNonRoot: false
privileged: true
containers:
- name: elasticsearch
volumeMounts:
- name: certs
mountPath: /usr/share/elasticsearch/config/certs
imagePullPolicy: Always
securityContext:
runAsNonRoot: true
{{- with .resources }}
resources:
{{- toYaml . | nindent 12 }}
{{- end }}
env:
- name: ELASTICSEARCH_PASSWORD
valueFrom:
secretKeyRef:
name: {{ $.Release.Name -}}-elasticsearch-es-elastic-user
key: elastic
{{- with $.Values.elasticsearch.env }}
{{- toYaml . | nindent 10 }}
{{- end }}
volumes:
- name: certs
secret:
secretName: {{ $.Release.Name }}-elasticsearch-http-cert-secret-internal
{{- end }}
http:
tls:
certificate:
secretName: {{ .Release.Name }}-elasticsearch-http-cert-secret-internal
---
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: {{ .Release.Name }}-elasticsearch-http
annotations:
nginx.ingress.kubernetes.io/backend-protocol: "HTTPS"
acme.cert-manager.io/http01-edit-in-place: "true"
spec:
ingressClassName: nginx
tls:
- hosts:
- {{ .Release.Name }}-elasticsearch-es-http.{{ .Release.Namespace }}.svc
secretName: {{ .Release.Name }}-elasticsearch-http-cert-secret-internal
rules:
- host: {{ .Release.Name }}-elasticsearch-es-http.{{ .Release.Namespace }}.svc
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: {{ .Release.Name }}-elasticsearch-es-http
port:
number: 9200
---
apiVersion: v1
kind: Secret
metadata:
name: logstash-writer-secret
type: kubernetes.io/basic-auth
data:
username: {{ "logstash-writer" | b64enc }}
{{- if .Release.IsInstall }}
password: {{ randAlphaNum 20 | b64enc }}
{{ else }}
password: {{ index (lookup "v1" "Secret" .Release.Namespace "logstash-writer-secret").data "password" }}
{{ end }}
roles: {{ "logstash-writer-role" | b64enc }}
---
kind: Secret
apiVersion: v1
metadata:
name: logstash-writer-role-secret
stringData:
roles.yml: |-
logstash-writer-role:
cluster: ["manage_index_templates", "monitor", "manage_ilm"]
indices:
- names: [ '*' ]
privileges: ["read","write","create","create_index","manage","manage_ilm"]
---
apiVersion: v1
kind: Secret
metadata:
name: user-monitoring-secret
type: kubernetes.io/basic-auth
data:
username: {{ "monitoring_user" | b64enc }}
{{- if .Release.IsInstall }}
password: {{ randAlphaNum 20 | b64enc }}
{{ else }}
password: {{ index (lookup "v1" "Secret" .Release.Namespace "user-monitoring-secret").data "password" }}
{{ end }}
roles: {{ "user-monitoring-role" | b64enc }}
---
kind: Secret
apiVersion: v1
metadata:
name: user-monitoring-role-secret
stringData:
roles.yml: |-
user-monitoring-role:
cluster:
- monitor
- manage_index_templates
- manage_ingest_pipelines
- manage_ilm
- read_ilm
- manage
- cluster:admin/xpack/watcher/watch/put
- cluster:admin/xpack/watcher/watch/delete
indices:
- names:
- .monitoring-*
privileges:
- all
- names:
- .ds-*
privileges:
- all
- names:
- metricbeat-*
privileges:
- manage
- read
- create_doc
- view_index_metadata
- create_index
- names:
- filebeat-*
privileges:
- manage
- read
- create_doc
- view_index_metadata
- create_index
applications: []
---
apiVersion: cert-manager.io/v1
kind: Issuer
metadata:
name: selfsigned-ca
spec:
selfSigned: {}
---
# apiVersion: rbac.authorization.k8s.io/v1
# kind: Role
# metadata:
# name: issuer-reader
# rules:
# - apiGroups: ["cert-manager.io"]
# resources: ["issuers"]
# verbs: ["get", "list", "watch"]
---
apiVersion: cert-manager.io/v1
kind: Certificate
metadata:
name: internal-ca
spec:
isCA: true
commonName: internal-ca
secretName: internal-ca
issuerRef:
name: selfsigned-ca
kind: Issuer
---
apiVersion: cert-manager.io/v1
kind: Issuer
metadata:
name: internal-issuer
spec:
ca:
secretName: internal-ca
---
apiVersion: cert-manager.io/v1
kind: Certificate
metadata:
name: elasticsearch-certificate
spec:
secretName: {{ .Release.Name }}-elasticsearch-http-cert-secret-internal
duration: {{ .Values.elasticsearch.cert.duration }}
renewBefore: {{ .Values.elasticsearch.cert.renewBefore }}
commonName: {{ .Release.Name }}-elasticsearch-es-http.{{ .Release.Namespace }}.svc
dnsNames:
- {{ .Release.Name }}-elasticsearch-es-http.{{ .Release.Namespace }}
- {{ .Release.Name }}-elasticsearch-es-http.{{ .Release.Namespace }}.svc
issuerRef:
name: internal-issuer
kind: Issuer
apiVersion: beat.k8s.elastic.co/v1beta1
kind: Beat
metadata:
name: filebeatagents
spec:
image: {{ .Values.filebeat4agents.image }}:{{ default .Values.elasticVersion .Values.filebeat4agents.imageTag }}
version: {{ .Values.elasticVersion }}
type: filebeat
daemonSet:
podTemplate:
spec:
serviceAccountName: filebeat4agents-account
automountServiceAccountToken: true
terminationGracePeriodSeconds: 30
dnsPolicy: ClusterFirstWithHostNet
hostNetwork: true # Allows to provide richer host metadata
containers:
- name: filebeat
command: ['sh', '-c', 'exec /usr/share/filebeat/filebeat -e -c /usr/share/filebeat/filebeat.yml']
securityContext:
runAsUser: 0
# If using Red Hat OpenShift uncomment this:
#privileged: true
volumeMounts:
- name: varlogcontainers
mountPath: /var/log/containers
- name: varlogpods
mountPath: /var/log/pods
- name: varlibdockercontainers
mountPath: /var/lib/docker/containers
- mountPath: /usr/share/filebeat/filebeat.yml
subPath: filebeat.yml
name: config
- mountPath: /usr/share/filebeat/es-certs # used for monitoring
name: es-certs
- mountPath: /usr/share/filebeat/certs
name: filebeat-certs
env:
- name: NODE_NAME
valueFrom:
fieldRef:
fieldPath: spec.nodeName
- name: ELASTIC_ELASTICSEARCH_ES_HOSTS
value: 'https://elastic-elasticsearch-es-http.{{ .Release.Namespace }}.svc:9200'
- name: LOGSTASH_HOSTS
value: 'logstash-{{ .Values.logstash.beats.pipelines_group_name }}-ls-api.{{ .Release.Namespace }}:5044'
- name: MONITORING_USER
valueFrom:
secretKeyRef:
name: user-monitoring-secret
key: username
- name: MONITORING_PASSWORD
valueFrom:
secretKeyRef:
name: user-monitoring-secret
key: password
volumes:
- name: varlogcontainers
hostPath:
path: /var/log/containers
- name: varlogpods
hostPath:
path: /var/log/pods
- name: varlibdockercontainers
hostPath:
path: /var/lib/docker/containers
- name: config
secret:
secretName: filebeat4agents-config
defaultMode: 0555
- name: es-certs # used for monitoring
secret:
secretName: elastic-elasticsearch-http-cert-secret-internal
- name: filebeat-certs
secret:
secretName: filebeat4agents-certs-secret
---
apiVersion: v1
kind: Secret
metadata:
name: filebeat4agents-config
labels:
app: filebeat
type: Opaque
data:
filebeat.yml: |
{{ printf "%s\n%s" .Values.filebeat4agents.input (tpl .Values.filebeat4agents.output $ ) | b64enc | nindent 6 }}
---
apiVersion: cert-manager.io/v1
kind: Certificate
metadata:
name: filebeat4agents-certificate
spec:
secretName: filebeat4agents-certs-secret
duration: {{ .Values.filebeat4agents.cert.duration }}
renewBefore: {{ .Values.filebeat4agents.cert.renewBefore }}
commonName: {{ template "filebeat4agents.dns" . }}
dnsNames:
- "{{ template "filebeat4agents.dns" . }}"
issuerRef:
name: internal-issuer
kind: Issuer
group: cert-manager.io
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: filebeat4agents-role
rules:
- apiGroups: [""] # "" indicates the core API group
resources:
- namespaces
- pods
- nodes
verbs:
- get
- watch
- list
- apiGroups: ["apps"]
resources:
- replicasets
verbs:
- get
- list
- watch
- apiGroups: ["batch"]
resources:
- jobs
verbs:
- get
- list
- watch
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: filebeat4agents-account
namespace: {{ .Release.Namespace }}
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: filebeat4agents-rolebinding
subjects:
- kind: ServiceAccount
name: filebeat4agents-account
namespace: {{ .Release.Namespace }}
roleRef:
kind: ClusterRole
name: filebeat4agents-role
apiGroup: rbac.authorization.k8s.io
apiVersion: beat.k8s.elastic.co/v1beta1
kind: Beat
metadata:
name: filebeat
spec:
image: {{ .Values.filebeat.image }}:{{ default .Values.elasticVersion .Values.filebeat.imageTag }}
version: {{ .Values.elasticVersion }}
type: filebeat
elasticsearchRef:
name: {{ .Release.Name }}-elasticsearch
deployment:
replicas: {{ .Values.filebeat.count }}
podTemplate:
spec:
securityContext:
runAsUser: 0
fsGroup: 1000
initContainers:
- name: git-clone
image: alpine/git
args:
- clone
- --single-branch
- --branch
- feature/removed-roles # or the branch where your logs are
- https://code.europa.eu/simpl/simpl-open/development/monitoring/eck-monitoring.git
- /mnt/repo
volumeMounts:
- name: repo
mountPath: /mnt/repo
containers:
- name: filebeat
command: ['sh', '-c', 'exec /usr/share/filebeat/logs/example.sh & exec /usr/share/filebeat/filebeat -e -c /usr/share/filebeat/filebeat.yml']
volumeMounts:
- mountPath: /usr/share/filebeat/filebeat.yml
subPath: filebeat.yml
name: config
- mountPath: /usr/share/filebeat/certs
name: filebeat-certs
- mountPath: /usr/share/filebeat/es-certs # used for monitoring
name: es-certs
- mountPath: /mnt/repo
name: repo
- mountPath: /usr/share/filebeat/logs/example.sh
subPath: example.sh
name: example-script
env:
- name: ELASTIC_ELASTICSEARCH_ES_HOSTS
value: 'https://elastic-elasticsearch-es-http.{{ .Release.Namespace }}.svc:9200'
- name: LOGSTASH_HOSTS
value: 'logstash-{{ .Values.logstash.beats.pipelines_group_name }}-ls-api.{{ .Release.Namespace }}:5044'
- name: MONITORING_USER
valueFrom:
secretKeyRef:
name: user-monitoring-secret
key: username
- name: MONITORING_PASSWORD
valueFrom:
secretKeyRef:
name: user-monitoring-secret
key: password
volumes:
- name: config
secret:
secretName: filebeat-config
defaultMode: 0555
- name: filebeat-certs
secret:
secretName: filebeat-certs-secret
- name: es-certs # used for monitoring
secret:
secretName: elastic-elasticsearch-http-cert-secret-internal
- name: repo
emptyDir: {}
- name: example-script
configMap:
name: filebeat-example-script
defaultMode: 0777
---
apiVersion: v1
kind: Secret
metadata:
name: filebeat-config
labels:
app: filebeat
type: Opaque
data:
filebeat.yml: |
{{ printf "%s\n%s" .Values.filebeat.input (tpl .Values.filebeat.output $ ) | b64enc | nindent 6 }}
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: filebeat
namespace: {{ .Release.Namespace }}
# ---
# apiVersion: rbac.authorization.k8s.io/v1
# kind: RoleBinding
# metadata:
# name: filebeat-issuer
# roleRef:
# apiGroup: rbac.authorization.k8s.io
# kind: Role
# name: issuer-reader
# subjects:
# - kind: ServiceAccount
# name: filebeat
# namespace: {{ .Release.Namespace }}
---
apiVersion: cert-manager.io/v1
kind: Certificate
metadata:
name: filebeat-certificate
spec:
secretName: filebeat-certs-secret
duration: {{ .Values.filebeat.cert.duration }}
renewBefore: {{ .Values.filebeat.cert.renewBefore }}
commonName: {{ template "filebeat.dns" . }}
dnsNames:
- "{{ template "filebeat.dns" . }}"
issuerRef:
name: internal-issuer
kind: Issuer
group: cert-manager.io
---
apiVersion: v1
kind: ConfigMap
metadata:
name: filebeat-example-script
data:
example.sh: |
#!/bin/bash
# Function to generate a random string of given length
generate_random_string() {
local LENGTH=$1
tr -dc A-Za-z0-9 </dev/urandom | head -c $LENGTH
}
# Initialize the count variable
count=0
# Path to the log file
LOG_FILE="/usr/share/filebeat/logs/example.log"
# Clear the log file
> "$LOG_FILE"
# Number of messages to generate
TOTAL_MESSAGES="{{ .Values.filebeat.totalMessages }}"
# Messages per minute
MESSAGES_PER_MINUTE="{{ .Values.filebeat.messagesPerMinute }}"
# Generate log entries
while [ "$count" -lt "$TOTAL_MESSAGES" ] || [ "$TOTAL_MESSAGES" -lt 0 ]
do
if [ "$MESSAGES_PER_MINUTE" -gt 0 ]; then
sleep_time=$((60 / MESSAGES_PER_MINUTE)) # Integer division
sleep $sleep_time
fi
ORIGINS=("originA" "originB" "originC" "originD" "originE")
ORIGIN=${ORIGINS[$RANDOM % ${#ORIGINS[@]}]}
DESTINATIONS=("destinationA" "destinationB" "destinationC" "destinationD" "destinationE")
DESTINATION=${DESTINATIONS[$RANDOM % ${#DESTINATIONS[@]}]}
BUSINESS_OPERATIONS=("operationA" "operationB" "operationC" "operationD" "operationE")
BUSINESS_OPERATION=${BUSINESS_OPERATIONS[$RANDOM % ${#BUSINESS_OPERATIONS[@]}]}
MESSAGE_TYPES=("request" "request ACK" "response" "response ACK")
MESSAGE_TYPE=${MESSAGE_TYPES[$RANDOM % ${#MESSAGE_TYPES[@]}]}
CORRELATION_ID=$(generate_random_string 20)
TIMESTAMP=$(date +"%Y-%m-%d %H:%M:%S")
echo "$TIMESTAMP|$ORIGIN|$DESTINATION|$BUSINESS_OPERATION|$MESSAGE_TYPE|$CORRELATION_ID" >> "$LOG_FILE"
count=$((count + 1))
done
echo "Generated $count log entries and saved to $LOG_FILE"
apiVersion: beat.k8s.elastic.co/v1beta1
kind: Beat
metadata:
name: heartbeat
namespace: {{.Release.Namespace}}
spec:
type: heartbeat
version: {{ .Values.elasticVersion }}
elasticsearchRef:
name: {{ .Release.Name }}-elasticsearch
config:
heartbeat.monitors:
- type: tcp
schedule: '@every 5s'
hosts: ["elastic-elasticsearch-es-http.observability.svc:9200"]
- type: tcp
schedule: '@every 5s'
hosts: ["kibana.dev.simpl-europe.eu:443"]
- type: icmp
id: ping-myhost
name: My Host Ping
hosts: ["elastic-kibana-kb-http.observability.svc"]
schedule: '*/5 * * * * * *'
deployment:
replicas: 1
podTemplate:
spec:
securityContext:
runAsUser: 0
{{- if gt (.Values.kibana.count |int) 0}}
apiVersion: kibana.k8s.elastic.co/v1
kind: Kibana
metadata:
name: {{ .Release.Name }}-kibana
spec:
version: {{ .Values.elasticVersion }}
count: {{ .Values.kibana.count }}
image: {{ .Values.kibana.image }}:{{ default .Values.elasticVersion .Values.kibana.imageTag }}
monitoring:
logs:
elasticsearchRefs:
- name: {{ .Release.Name }}-elasticsearch
config:
server.ssl.enabled: true
elasticsearch.requestTimeout: 120000
#elasticsearch.ssl.verificationMode: certificate
#elasticsearch.ssl.certificateAuthorities: ["/usr/share/kibana/config/certs/ca.crt"]
server.publicBaseUrl: "{{ template "kibana.dns.fullPath" . }}"
{{- if and (.Values.kibana.ingressSubpath) (ne "/" .Values.kibana.ingressSubpath) }}
server.basePath: {{ .Values.kibana.ingressSubpath }}
server.rewriteBasePath: true
{{- end }}
xpack.reporting.enabled: true
xpack.reporting.kibanaServer.protocol: "https"
{{- with .Values.kibana.config }}
{{- toYaml .| nindent 4 }}
{{- end }}
elasticsearchRef:
name: {{ default .Values.kibana.elasticsearchReleaseName .Release.Name }}-elasticsearch
podTemplate:
metadata:
labels:
stack-namespace: {{.Release.Namespace}}
spec:
securityContext:
runAsUser: 1000
fsGroup: 1000
containers:
- name: kibana
imagePullPolicy: Always
{{- with .Values.kibana.resources }}
resources:
{{- toYaml . | nindent 12 }}
{{- end }}
#volumeMounts:
#- name: es-certs
# mountPath: /usr/share/kibana/config/certs
#- name: lets-encrypt-ca
# mountPath: /usr/share/kibana/config/certs-ca
readinessProbe:
httpGet:
scheme: HTTPS
path: {{- with .Values.kibana.ingressSubpath }} {{ . }} {{- end }}
port: 5601
#volumes:
#- name: es-certs
# secret:
# secretName: elastic-elasticsearch-es-http-certs-internal
#- name: lets-encrypt-ca
# secret:
# secretName: lets-encrypt-ca
http:
tls:
certificate:
secretName: {{ .Release.Name }}-kibana-cert-secret
---
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: {{ .Release.Name }}-kibana-dashboard
annotations:
nginx.ingress.kubernetes.io/backend-protocol: "HTTPS"
acme.cert-manager.io/http01-edit-in-place: "true"
cert-manager.io/cluster-issuer: dev-staging
spec:
ingressClassName: nginx
tls:
- hosts:
- {{ template "kibana.dns" . }}
secretName: {{ .Release.Name }}-kibana-cert-secret
rules:
- host: {{ template "kibana.dns" . }}
http:
paths:
- path: {{ default "/" .Values.kibana.ingressSubpath }}
pathType: Prefix
backend:
service:
name: {{ .Release.Name }}-kibana-kb-http
port:
number: 5601
---
apiVersion: v1
kind: Secret
metadata:
name: lets-encrypt-ca
data:
ca.crt: "LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUZCakNDQXU2Z0F3SUJBZ0lSQUlwOVBoUFdMekR2STRhOUtRZHJOUGd3RFFZSktvWklodmNOQVFFTEJRQXcKVHpFTE1Ba0dBMVVFQmhNQ1ZWTXhLVEFuQmdOVkJBb1RJRWx1ZEdWeWJtVjBJRk5sWTNWeWFYUjVJRkpsYzJWaApjbU5vSUVkeWIzVndNUlV3RXdZRFZRUURFd3hKVTFKSElGSnZiM1FnV0RFd0hoY05NalF3TXpFek1EQXdNREF3CldoY05NamN3TXpFeU1qTTFPVFU1V2pBek1Rc3dDUVlEVlFRR0V3SlZVekVXTUJRR0ExVUVDaE1OVEdWMEozTWcKUlc1amNubHdkREVNTUFvR0ExVUVBeE1EVWpFeE1JSUJJakFOQmdrcWhraUc5dzBCQVFFRkFBT0NBUThBTUlJQgpDZ0tDQVFFQXVvZThYQnNBT2N2S0NzM1VaeEQ1QVR5bFRxVmh5eWJLVXZzVkFiZTVLUFVvSHUwbnN5UVlPV2NKCkRBanM0RHF3TzNjT3ZmUGxPVlJCREU2dVFkYVpkTjVSMis5Ny8xaTlxTGNUOXQ0eDFmSnl5WEpxQzROMGxaeEcKQUdRVW1mT3gyU0xaemFpU3Fod21lai8rNzFnRmV3aVZnZHR4RDQ3NzR6RUp1d20rVUUxZmo1RjJQVnFkbm9QeQo2Y1JtcytFR1prTklHSUJsb0RjWW1wdUVNcGV4c3IzRStCVUFuU2VJKytKakY1WnNteWRuUzhUYktGNXB3bm53ClNWemdKRkRoeEx5aEJheDdRRzBBdE1KQlA2ZFl1Qy9GWEp1bHV3bWU4Zjdyc0lVNS9hZ0s3MFhFZU90bEtzTFAKWHp6ZTQxeE5HL2NMSnl1cUMwSjNVMDk1YWgySDJRSURBUUFCbzRINE1JSDFNQTRHQTFVZER3RUIvd1FFQXdJQgpoakFkQmdOVkhTVUVGakFVQmdnckJnRUZCUWNEQWdZSUt3WUJCUVVIQXdFd0VnWURWUjBUQVFIL0JBZ3dCZ0VCCi93SUJBREFkQmdOVkhRNEVGZ1FVeGM5R3BPcjB3OEI2YkpYRUxiQmVraThtNDdrd0h3WURWUjBqQkJnd0ZvQVUKZWJSWjVudTI1ZVFCYzRBSWlNZ2FXUGJwbTI0d01nWUlLd1lCQlFVSEFRRUVKakFrTUNJR0NDc0dBUVVGQnpBQwpoaFpvZEhSd09pOHZlREV1YVM1c1pXNWpjaTV2Y21jdk1CTUdBMVVkSUFRTU1Bb3dDQVlHWjRFTUFRSUJNQ2NHCkExVWRId1FnTUI0d0hLQWFvQmlHRm1oMGRIQTZMeTk0TVM1akxteGxibU55TG05eVp5OHdEUVlKS29aSWh2Y04KQVFFTEJRQURnZ0lCQUU3aWlWMEtBeHlRT05EMUgvbHhYUGpEajdJM2lIcHZzQ1VmN2I2MzJJWUdqdWtKaE0xeQp2NEh6L01yUFUwanR2ZlpwUXRTbEVUNDF5Qk95a2gwRlgrb3UxTmo0U2NPdDlabVduTzhtMk9HMEpBdElJRTM4CjAxUzBxY1loeU9FMkcvOTNaQ2tYdWZCTDcxM3F6WG5RdjVDL3ZpT3lrTnBLcVVneGRLbEVDK0hpOWkyRGNhUjEKZTlLVXdRVVpSaHk1ai9QRWRFZ2xLZzNsOWR0RDR0dVRtN2tadEI4djMyb09qekhUWXcrN0tkemRaaXcvc0J0bgpVZmhCUE9STnVheTRwSnhtWS9XcmhTTWR6Rk8ycTNHdTNNVUJjZG8yN2dvWUtqTDlDVEY4ai9aejU1eWN0VW9WCmFuZUNXcy9halVYK0h5cGtCVEErYzhMR0RMbldPMk5LcTBZRC9wbkFSa0FuWUdQZlVEb0hSOWdWU3AvcVJ4K1oKV2doaURMWnNNd2hOMXpqdFNDMHVCV2l1Z0YzdlROellJRUZmYVBHN1dzM2pEckFNTVllYlE5NUpRK0hJQkQvUgpQQnVIUlRCcHFLbHlEbmtTSERIWVBpTlgzYWRQb1BBY2dkRjNIMi9XMHJtb3N3TVdnVGxMbjFXdTBtcmtzNy9xCnBkV2ZTNlBKMWp0eTgwcjJWS3NNL0RqM1lJRGZialhLZGFGVTVDKzhiaGZKR3FVM3RhS2F1dXowd0hWR1QzZW8KNkZsV2tXWXRidDRwZ2RhbWx3VmVaRVcrTE03cVpFSkVzTU5QcmZDMDNBUEttWnNKZ3BXQ0RXT0tadmtaY3ZqVgp1WWtRNG9tWUNUWDVvaHkra25NamRPbWRIOWM3U3BxRVdCREM4NmZpTmV4K08wWE9NRVpTYThEQQotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0tCi0tLS0tQkVHSU4gQ0VSVElGSUNBVEUtLS0tLQpNSUlGYXpDQ0ExT2dBd0lCQWdJUkFJSVF6N0RTUU9OWlJHUGd1Mk9DaXdBd0RRWUpLb1pJaHZjTkFRRUxCUUF3ClR6RUxNQWtHQTFVRUJoTUNWVk14S1RBbkJnTlZCQW9USUVsdWRHVnlibVYwSUZObFkzVnlhWFI1SUZKbGMyVmgKY21Ob0lFZHliM1Z3TVJVd0V3WURWUVFERXd4SlUxSkhJRkp2YjNRZ1dERXdIaGNOTVRVd05qQTBNVEV3TkRNNApXaGNOTXpVd05qQTBNVEV3TkRNNFdqQlBNUXN3Q1FZRFZRUUdFd0pWVXpFcE1DY0dBMVVFQ2hNZ1NXNTBaWEp1ClpYUWdVMlZqZFhKcGRIa2dVbVZ6WldGeVkyZ2dSM0p2ZFhBeEZUQVRCZ05WQkFNVERFbFRVa2NnVW05dmRDQlkKTVRDQ0FpSXdEUVlKS29aSWh2Y05BUUVCQlFBRGdnSVBBRENDQWdvQ2dnSUJBSzNvSkhQMEZEZnptNTRyVnlnYwpoNzdjdDk4NGtJeHVQT1pYb0hqM2RjS2kvdlZxYnZZQVR5amIzbWlHYkVTVHRyRmovUlFTYTc4ZjB1b3hteUYrCjBUTTh1a2oxM1huZnM3ai9FdkVobWt2QmlvWnhhVXBtWm15UGZqeHd2NjBwSWdiejVNRG1nSzdpUzQrM21YNlUKQTUvVFI1ZDhtVWdqVStnNHJrOEtiNE11MFVsWGpJQjB0dG92MERpTmV3TndJUnQxOGpBOCtvK3UzZHBqcStzVwpUOEtPRVV0K3p3dm8vN1YzTHZTeWUwcmdUQklsREhDTkF5bWc0Vk1rN0JQWjdobS9FTE5LakQrSm8yRlIzcXlICkI1VDBZM0hzTHVKdlc1aUI0WWxjTkhsc2R1ODdrR0o1NXR1a21pOG14ZEFRNFE3ZTJSQ09GdnUzOTZqM3grVUMKQjVpUE5naVY1K0kzbGcwMmRaNzdEbkt4SFp1OEEvbEpCZGlCM1FXMEt0WkI2YXdCZHBVS0Q5amYxYjBTSHpVdgpLQmRzMHBqQnFBbGtkMjVITjdyT3JGbGVhSjEvY3RhSnhRWkJLVDVaUHQwbTlTVEpFYWRhbzB4QUgwYWhtYlduCk9sRnVoanVlZlhLbkVnVjRXZTArVVhnVkN3T1BqZEF2QmJJK2Uwb2NTM01GRXZ6RzZ1QlFFM3hEazNTenluVG4Kamg4QkNOQXcxRnR4TnJRSHVzRXdNRnhJdDRJN21LWjlZSXFpb3ltQ3pMcTlnd1Fib29NRFFhSFdCZkVid3JidwpxSHlHTzBhb1NDcUkzSGFhZHI4ZmFxVTlHWS9yT1BOazNzZ3JEUW9vLy9mYjRoVkMxQ0xRSjEzaGVmNFk1M0NJCnJVN20yWXM2eHQwblVXNy92R1QxTTBOUEFnTUJBQUdqUWpCQU1BNEdBMVVkRHdFQi93UUVBd0lCQmpBUEJnTlYKSFJNQkFmOEVCVEFEQVFIL01CMEdBMVVkRGdRV0JCUjV0Rm5tZTdibDVBRnpnQWlJeUJwWTl1bWJiakFOQmdrcQpoa2lHOXcwQkFRc0ZBQU9DQWdFQVZSOVlxYnl5cUZEUURMSFlHbWtnSnlrSXJHRjFYSXB1K0lMbGFTL1Y5bFpMCnViaHpFRm5USVpkKzUweHgrN0xTWUswNXFBdnFGeUZXaGZGUURsbnJ6dUJaNmJySkZlK0duWStFZ1BiazZaR1EKM0JlYllodEY4R2FWMG54dnd1bzc3eC9QeTlhdUovR3BzTWl1L1gxK212b2lCT3YvMlgvcWtTc2lzUmNPai9LSwpORnRZMlB3QnlWUzV1Q2JNaW9nemlVd3RoRHlDMys2V1Z3VzZMTHYzeExmSFRqdUN2akhJSW5Oemt0SENnS1E1Ck9SQXpJNEpNUEorR3NsV1lIYjRwaG93aW01N2lhenRYT29Kd1Rkd0p4NG5MQ2dkTmJPaGRqc252enF2SHU3VXIKVGtYV1N0QW16T1Z5eWdocXBaWGpGYUgzcE8zSkxGK2wrLytzS0FJdXZ0ZDd1K054ZTVBVzB3ZGVSbE44TndkQwpqTlBFbHB6Vm1iVXE0SlVhZ0VpdVREa0h6c3hIcEZLVks3cTQrNjNTTTFOOTVSMU5iZFdoc2NkQ2IrWkFKelZjCm95aTNCNDNualRPUTV5T2YrMUNjZVd4RzFiUVZzNVp1ZnBzTWxqcTRVaTAvMWx2aCt3akNoUDRrcUtPSjJxeHEKNFJncXNhaERZVnZUSDl3N2pYYnlMZWlOZGQ4WE0ydzlVL3Q3eTBGZi85eWkwR0U0NFphNHJGMkxOOWQxMVRQQQptUkd1blVIQmNuV0V2Z0pCUWw5bkpFaVUwWnNudmdjL3ViaFBnWFJSNFhxMzdaMGo0cjdnMVNnRUV6d3hBNTdkCmVteVB4Z2NZeG4vZVI0NC9LSjRFQnMrbFZEUjN2ZXlKbStrWFE5OWIyMS8ramg1WG9zMUFuWDVpSXRyZUdDYz0KLS0tLS1FTkQgQ0VSVElGSUNBVEUtLS0tLQ=="
{{- end }}
apiVersion: logstash.k8s.elastic.co/v1alpha1
kind: Logstash
metadata:
name: logstash-beats
labels:
app: logstash
spec:
image: {{ .Values.logstash.image }}:{{ default .Values.elasticVersion .Values.logstash.imageTag }}
version: {{ .Values.elasticVersion }}
count: {{ .Values.logstash.count }}
elasticsearchRefs:
- name: {{ .Release.Name }}-elasticsearch
clusterName: {{ .Release.Name }}-elasticsearch
monitoring:
logs:
elasticsearchRefs:
- name: {{ .Release.Name }}-elasticsearch
volumeClaimTemplates:
- metadata:
name: logstash-data
spec:
accessModes:
- ReadWriteOnce
resources:
requests:
storage: {{ .Values.logstash.diskSpace }}
storageClassName: {{ .Values.logstash.storageClassName }}
services:
{{- range $index := until (.Values.logstash.count |int ) -}}
{{- printf "\n"}}
- name: beats-{{$index}}
service:
spec:
ports:
- port: 5044
name: {{ $.Values.logstash.beats.pipelines_group_name }}
protocol: TCP
selector:
statefulset.kubernetes.io/pod-name: logstash-beats-ls-{{$index}}
{{- end}}
config:
xpack.monitoring.enabled: true
xpack.monitoring.elasticsearch.hosts: ["${ELASTIC_ELASTICSEARCH_ES_HOSTS}"]
xpack.monitoring.elasticsearch.username: "${MONITORING_USER}"
xpack.monitoring.elasticsearch.password: "${MONITORING_PASSWORD}"
xpack.monitoring.elasticsearch.ssl.certificate_authority: /usr/share/logstash/config/certs/ca.crt
podTemplate:
metadata:
labels:
stack-namespace: {{ .Release.Namespace }}
spec:
containers:
- name: logstash
{{- with .Values.logstash.resources }}
resources:
{{- toYaml . | nindent 10 }}
{{- end }}
volumeMounts:
{{- range .Values.logstash.beats.pipelines }}
- name: pipeline-config-{{- .name }}
mountPath: /app/elastic/logstash/config/pipelines/{{- .name -}}.config
subPath: {{ .name -}}.config
{{- end }}
- name: es-certs
mountPath: /usr/share/logstash/config/certs
- mountPath: /usr/share/logstash/certs-logstash
name: certs-logstash
env:
- name: LOGSTASH_USER
valueFrom:
secretKeyRef:
name: logstash-writer-secret
key: username
- name: LOGSTASH_PASSWORD
valueFrom:
secretKeyRef:
name: logstash-writer-secret
key: password
- name: MONITORING_USER
valueFrom:
secretKeyRef:
name: user-monitoring-secret
key: username
- name: MONITORING_PASSWORD
valueFrom:
secretKeyRef:
name: user-monitoring-secret
key: password
- name: ELASTIC_ELASTICSEARCH_ES_HOSTS
value: 'https://elastic-elasticsearch-es-http.{{ .Release.Namespace }}.svc:9200'
- name: ELASTICSEARCH_SSL_CERTIFICATE_VERIFICATION
value: "true"
- name: ELASTICSEARCH_SSL_CA_PATH
value: "/usr/share/logstash/config/certs/ca.crt"
volumes:
{{- range .Values.logstash.beats.pipelines }}
- name: pipeline-config-{{- .name }}
configMap:
name: logstash-{{- $.Values.logstash.beats.pipelines_group_name -}}-{{- .name -}}-config
defaultMode: 511
{{- end }}
- name: es-certs
secret:
secretName: {{ .Release.Name }}-elasticsearch-http-cert-secret-internal
- name: certs-logstash
secret:
secretName: logstash-secret-{{ .Values.logstash.beats.pipelines_group_name }}
pipelinesRef:
secretName: logstash-{{ .Values.logstash.beats.pipelines_group_name }}-pipelines-yml
---
apiVersion: v1
kind: Secret
metadata:
name: logstash-{{ .Values.logstash.beats.pipelines_group_name }}-pipelines-yml
data:
pipelines.yml: |
{{ tpl .Values.logstash.pipelines_yml_config $ | nindent 6 | b64enc }}
---
{{- range .Values.logstash.beats.pipelines }}
apiVersion: v1
kind: ConfigMap
metadata:
name: logstash-{{ $.Values.logstash.beats.pipelines_group_name }}-{{ .name }}-config
data:
{{ .name }}.config: |
{{ tpl .input $ | nindent 6 }}
{{ tpl .filter $ | nindent 6 }}
{{ tpl .output $ | nindent 6 }}
---
{{- end }}
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: logstash-api-{{ .Values.logstash.beats.pipelines_group_name }}
annotations:
nginx.ingress.kubernetes.io/ssl-redirect: "true"
nginx.ingress.kubernetes.io/proxy-body-size: 50m
external-dns.alpha.kubernetes.io/hostname: "{{ template "logstash.dns" . }},{{- include "logstash.dns.array" . | trim}}"
spec:
ingressClassName: nginx
tls:
- hosts:
- {{ template "logstash.dns" . }}
secretName: logstash-secret-{{ .Values.logstash.beats.pipelines_group_name }}
rules:
- host: {{ template "logstash.dns" . }}
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: logstash-{{ .Values.logstash.beats.pipelines_group_name }}-ls-api
port:
number: 9600
---
{{ $concatUrl := (include "logstash.dns" .) }}
{{ $prefix := (default "l" .Values.logstash.urlPrefix) }}
{{- range $index_i := until (.Values.logstash.count |int ) -}}
{{- printf "\n"}}
apiVersion: v1
kind: ConfigMap
metadata:
name: tcp-services-{{ $.Values.logstash.pipelines_group_name }}-{{$index_i}}
data:
5044: "observability/logstash-{{ $.Values.logstash.beats.pipelines_group_name }}-ls-{{$index_i}}:5044"
{{- end }}
---
apiVersion: cert-manager.io/v1
kind: Certificate
metadata:
name: logstash-{{ .Values.logstash.beats.pipelines_group_name }}
spec:
duration: {{ .Values.logstash.cert.duration }}
renewBefore: {{ .Values.logstash.cert.renewBefore }}
commonName: {{ template "logstash.dns" . }}
secretName: logstash-secret-{{ .Values.logstash.beats.pipelines_group_name }}
dnsNames:
- "{{ template "logstash.dns" . }}"
{{- range $index_i := until (.Values.logstash.count |int ) }}
- "{{$prefix}}{{$index_i}}.{{$concatUrl}}"
{{- end }}
- "logstash.{{ .Release.Namespace }}"
- "logstash-{{ .Values.logstash.beats.pipelines_group_name }}-ls-api.{{ .Release.Namespace }}"
issuerRef:
name: internal-issuer
kind: Issuer
privateKey:
encoding: "PKCS8"
---
\ No newline at end of file
apiVersion: logstash.k8s.elastic.co/v1alpha1
kind: Logstash
metadata:
name: logstash-syslog
labels:
app: logstash
spec:
image: {{ .Values.logstash.image }}:{{ default .Values.elasticVersion .Values.logstash.imageTag }}
version: {{ .Values.elasticVersion }}
count: {{ .Values.logstash.count }}
elasticsearchRefs:
- name: {{ .Release.Name }}-elasticsearch
clusterName: {{ .Release.Name }}-elasticsearch
monitoring:
logs:
elasticsearchRefs:
- name: {{ .Release.Name }}-elasticsearch
volumeClaimTemplates:
- metadata:
name: logstash-data
spec:
accessModes:
- ReadWriteOnce
resources:
requests:
storage: {{ .Values.logstash.diskSpace }}
storageClassName: {{ .Values.logstash.storageClassName }}
services:
{{- range $index := until (.Values.logstash.count |int ) -}}
{{- printf "\n"}}
- name: syslog-{{$index}}
service:
spec:
ports:
- port: 514
name: {{ $.Values.logstash.syslog.pipelines_group_name }}
protocol: TCP
selector:
statefulset.kubernetes.io/pod-name: logstash-syslog-ls-{{$index}}
{{- end}}
config:
{{- with .Values.logstash.config }}
{{- toYaml . | nindent 4 }}
{{- end }}
podTemplate:
metadata:
labels:
stack-namespace: {{ .Release.Namespace }}
spec:
containers:
- name: logstash
{{- with .Values.logstash.resources }}
resources:
{{- toYaml . | nindent 10 }}
{{- end }}
volumeMounts:
{{- range .Values.logstash.syslog.pipelines }}
- name: pipeline-config-{{- .name }}
mountPath: /app/elastic/logstash/config/pipelines/{{- .name -}}.config
subPath: {{ .name -}}.config
{{- end }}
env:
- name: LOGSTASH_USER
valueFrom:
secretKeyRef:
name: logstash-writer-secret
key: username
- name: LOGSTASH_PASSWORD
valueFrom:
secretKeyRef:
name: logstash-writer-secret
key: password
- name: ELASTIC_ELASTICSEARCH_ES_HOSTS
value: 'https://elastic-elasticsearch-es-http.{{ .Release.Namespace }}.svc:9200'
volumes:
{{- range .Values.logstash.syslog.pipelines }}
- name: pipeline-config-{{- .name }}
configMap:
name: logstash-{{- $.Values.logstash.syslog.pipelines_group_name -}}-{{- .name -}}-config
defaultMode: 511
{{- end }}
pipelinesRef:
secretName: logstash-{{ .Values.logstash.syslog.pipelines_group_name }}-pipelines-yml
---
apiVersion: v1
kind: Secret
metadata:
name: logstash-{{ .Values.logstash.syslog.pipelines_group_name }}-pipelines-yml
data:
pipelines.yml: |
{{ tpl .Values.logstash.pipelines_yml_config $ | nindent 6 | b64enc }}
---
{{- range .Values.logstash.syslog.pipelines }}
apiVersion: v1
kind: ConfigMap
metadata:
name: logstash-{{ $.Values.logstash.syslog.pipelines_group_name }}-{{ .name }}-config
data:
{{ .name }}.config: |
{{ tpl .input $ | nindent 6 }}
{{ tpl .filter $ | nindent 6 }}
{{ tpl .output $ | nindent 6 }}
---
{{- end }}
apiVersion: beat.k8s.elastic.co/v1beta1
kind: Beat
metadata:
name: metricbeat
spec:
type: metricbeat
version: {{ .Values.elasticVersion }}
elasticsearchRef:
name: {{ .Release.Name }}-elasticsearch
config:
metricbeat:
autodiscover:
providers:
- hints:
default_config: {}
enabled: "true"
node: ${NODE_NAME}
type: kubernetes
modules:
- module: system
period: 10s
metricsets:
- cpu
- load
- memory
- diskio
- network
- process
- process_summary
process:
include_top_n:
by_cpu: 5
by_memory: 5
processes:
- .*
- module: system
period: 1m
metricsets:
- filesystem
- fsstat
processors:
- drop_event:
when:
regexp:
system:
filesystem:
mount_point: ^/(sys|cgroup|proc|dev|etc|host|lib)($|/)
- module: kubernetes
period: 10s
node: ${NODE_NAME}
hosts:
- https://${NODE_NAME}:10250
bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token
ssl:
verification_mode: none
metricsets:
- node
- system
- pod
- container
- volume
- add_metadata: true
enabled: true
hosts:
- {{ .Values.metricbeat.kubeStateHost }}
metricsets:
- state_pod
module: kubernetes
ssl:
verification_mode: none
processors:
- add_cloud_metadata: {}
- add_host_metadata: {}
{{- with .Values.metricbeat.dropEvent }}
- drop_event:
{{- toYaml . | nindent 8 }}
{{- end }}
daemonSet:
podTemplate:
spec:
serviceAccountName: metricbeat
automountServiceAccountToken: true # some older Beat versions are depending on this settings presence in k8s context
containers:
- args:
- -e
- -c
- /etc/beat.yml
- -system.hostfs=/hostfs
name: metricbeat
{{- with .Values.metricbeat.resources }}
resources:
{{- toYaml . | nindent 12 }}
{{- end }}
volumeMounts:
- mountPath: /hostfs/sys/fs/cgroup
name: cgroup
- mountPath: /var/run/docker.sock
name: dockersock
- mountPath: /hostfs/proc
name: proc
env:
- name: NODE_NAME
valueFrom:
fieldRef:
fieldPath: spec.nodeName
dnsPolicy: ClusterFirstWithHostNet
hostNetwork: true # Allows to provide richer host metadata
securityContext:
runAsUser: 0
terminationGracePeriodSeconds: 30
volumes:
- hostPath:
path: /sys/fs/cgroup
name: cgroup
- hostPath:
path: /var/run/docker.sock
name: dockersock
- hostPath:
path: /proc
name: proc
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: metricbeat
rules:
- apiGroups:
- ""
resources:
- nodes
- namespaces
- events
- pods
verbs:
- get
- list
- watch
- apiGroups:
- "extensions"
resources:
- replicasets
verbs:
- get
- list
- watch
- apiGroups:
- apps
resources:
- statefulsets
- deployments
- replicasets
verbs:
- get
- list
- watch
- apiGroups:
- ""
resources:
- nodes/stats
verbs:
- get
- nonResourceURLs:
- /metrics
verbs:
- get
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: metricbeat
namespace: {{.Release.Namespace}}
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: metricbeat
subjects:
- kind: ServiceAccount
name: metricbeat
namespace: {{.Release.Namespace}}
roleRef:
kind: ClusterRole
name: metricbeat
apiGroup: rbac.authorization.k8s.io
---
image:
repository: "${CI_REGISTRY_IMAGE}"
pullPolicy: IfNotPresent
tag: "${PROJECT_RELEASE_VERSION}"
\ No newline at end of file
# version of all elastic applications
elasticVersion: 8.15.1
# Name of aws environment
envTag: "dev"
# This suffix will be used to create subdomain of following template:
# kibana.NAMESPACE_TAG.DOMAIN_SUFFIX
domainSuffix: "simpl-europe.eu"
# This value is used to create unique dns for each deployment. By default it equals to namespace.
namespaceTag: "dev"
# Spread pods evenly between subnets
createTopologySpreadConstraints: true
# Force pods to be scheduled evenly between separete AZ, this means if all nodes in AZ will be full pod won't be scheduled.
# Turning this off will still create pods in seperate AZ, but k8s scheduler won't be able to respect topologySpreadConstrains, will schedule pods in the same AZ.
forceDeployInSeperateAZ: true
# Open port 443 for each component on the Traefik level.
openStandardHttpsPorts: true
# imagePullSecrets defines the secrets with credentials to docker registry, for example nexus.
imagePullSecrets: []
elasticsearch:
image: docker.elastic.co/elasticsearch/elasticsearch
# Elasticsearch's image tag, by default it equals to elasticVersion
ccr_remote_ca_file: ccr-remote-ca.pem
imageTag: ""
jvm_options_xms: 1g
jvm_options_xmx: 1g
cert:
duration: 2160h0m0s # 90d
renewBefore: 360h0m0s # 15d
#Environment variables to set in kibana pod
#Usage from cli:
# --set "elasticsearch.env[0].name=VARIABLE_NAME" --set "elasticsearch.env[0].value=VARIABLE_VALUE"
env: []
# here you can specify several node groups (e.g. master, data groups)
nodeSets:
- name: "node"
# number of elasticsearch nodes in each group
count: 3
# persistent volume claim size
diskSpace: 60Gi
# name of StorageClass that will be used to create VolumeClaims. (StorageClass must exist)
storageClassName: csi-cinder-high-speed
# here you can specify elasticsearch config e.g
# config:
# node.roles: ["master"]
config: {}
resources:
requests:
memory: 4Gi
limits:
memory: 4Gi
cpu: "1"
kibana:
# set >0 to deploy kibana, 0 otherwise
count: 1
image: docker.elastic.co/kibana/kibana
# Kibana's image tag, by default it equals to elasticVersion
imageTag: ""
# name of helm release where elasticsearch is installed. If you install kibana together with elasticsearch, leave it empty.
elasticsearchReleaseName: ""
# Kibana server will listen on that subpath
cert:
duration: 2160h0m0s # 90d
renewBefore: 360h0m0s # 15d
# Additional kibana's config according to this link: https://www.elastic.co/guide/en/kibana/current/settings.html
config:
xpack.reporting.roles.enabled: false
resources:
requests:
memory: 1Gi
limits:
memory: 1Gi
#Environment variables to set in kibana pod
#Usage from cli:
# --set "kibana.env[0].name=VARIABLE_NAME" --set "kibana.env[0].value=VARIABLE_VALUE"
env: []
logstash:
count: 2
image: docker.elastic.co/logstash/logstash
config: {}
diskSpace: 3Gi
# name of StorageClass that will be used to create VolumeClaims. (StorageClass must exist)
storageClassName: csi-cinder-high-speed
imageTag: ""
env:
LS_JAVA_OPTS: "-Xmx256m -Xms256m"
resources:
requests:
memory: 4Gi
limits:
memory: 4Gi
cert:
duration: 2160h0m0s # 90d
renewBefore: 360h0m0s # 15d
pipelines_yml_config: |-
- pipeline.id: main
path.config: "/app/elastic/logstash/config/pipelines/*.config"
pipeline.workers: 1
pipeline.batch.size: 125
workers: 1
batch:
size: 125
beats:
pipelines_group_name: "beats"
pipelines:
- name: "beats-pipeline"
input: |-
input {
beats {
port => 5044
ssl => true
ssl_certificate_authorities => ["/usr/share/logstash/certs-logstash/ca.crt"]
ssl_certificate => "/usr/share/logstash/certs-logstash/tls.crt"
ssl_key => "/usr/share/logstash/certs-logstash/tls.key"
ssl_verify_mode => "force_peer"
}
}
filter: |-
filter {
if [fields][logtype] == "logs-sample-onboarding" {
grok {
match => {
"message" => [
'%{TIMESTAMP_ISO8601:timestamp}%{SPACE}%{WORD:loglevel}%{SPACE}\[%{JAVACLASS:logger}\]%{SPACE}\(%{DATA:thread}\)%{SPACE}%{GREEDYDATA:message}',
'%{TIMESTAMP_ISO8601:timestamp}%{SPACE}%{WORD:loglevel}%{SPACE}\[%{PATH:path}\]%{SPACE}\(%{DATA:thread}\)%{SPACE}%{GREEDYDATA:message}',
'%{TIMESTAMP_ISO8601:timestamp}%{SPACE}%{LOGLEVEL:loglevel}%{SPACE}%{NUMBER:pid}%{SPACE}---%{SPACE}\[%{DATA:thread}\]%{SPACE}%{JAVACLASS:logger}%{SPACE}:%{SPACE}\[%{DATA:request_id}\]%{SPACE}HTTP%{SPACE}%{WORD:http_method}%{SPACE}"%{DATA:uri}"',
'%{TIMESTAMP_ISO8601:timestamp}%{SPACE}%{LOGLEVEL:loglevel}%{SPACE}%{NUMBER:pid}%{SPACE}---%{SPACE}\[%{DATA:thread}\]%{SPACE}%{JAVACLASS:logger}%{SPACE}:%{SPACE}%{GREEDYDATA:message}',
'%{TIMESTAMP_ISO8601:timestamp}%{SPACE}%{LOGLEVEL:loglevel}%{SPACE}%{NUMBER:pid}%{SPACE}---%{SPACE}\[%{DATA:thread}\]%{SPACE}%{DATA:logger}%{SPACE}:%{SPACE}\[%{DATA:request_id}\]%{SPACE}%{GREEDYDATA:message}'
]
}
overwrite => [ "message" ]
}
}
if [fields][logtype] == "logs-sample-sdtooling" {
grok {
pattern_definitions => { "JAVA" => "[0-9A-Za-z\[\]\.\$]*" }
match => {
"message" => [
'%{TIMESTAMP_ISO8601:timestamp}%{SPACE}%{LOGLEVEL:loglevel}%{SPACE}%{NUMBER:pid}%{SPACE}---%{SPACE}\[%{DATA:thread}\]%{SPACE}%{JAVACLASS:logger}%{SPACE}:%{SPACE}Method:%{SPACE}%{DATA:method}%{SPACE}\-%{SPACE}%{GREEDYDATA:message}',
'%{TIMESTAMP_ISO8601:timestamp}%{SPACE}%{LOGLEVEL:loglevel}%{SPACE}%{NUMBER:pid}%{SPACE}---%{SPACE}\[%{DATA:thread}\]%{SPACE}%{JAVA:logger}%{SPACE}:%{SPACE}%{GREEDYDATA:message}'
]
}
overwrite => [ "message" ]
}
}
if [fields][logtype] == "logs-sample-catalogue" {
grok {
pattern_definitions => { "JAVA" => "[0-9A-Za-z\[\]\.\$]*" }
match => {
"message" => [
'%{TIMESTAMP_ISO8601:timestamp}%{SPACE}%{WORD:loglevel}%{SPACE}\[%{JAVACLASS:logger}\]%{SPACE}\(%{DATA:thread}\)%{SPACE}%{GREEDYDATA:message}',
'%{TIMESTAMP_ISO8601:timestamp}%{SPACE}%{LOGLEVEL:loglevel}%{SPACE}%{NUMBER:pid}%{SPACE}---%{SPACE}\[%{DATA:thread_name}\]%{SPACE}\[%{DATA:exec_thread}\]%{SPACE}\[%{DATA:request_id}\]%{JAVA:logger}%{SPACE}:%{SPACE}%{GREEDYDATA:message}',
'%{TIMESTAMP_ISO8601:timestamp}%{SPACE}%{LOGLEVEL:loglevel}%{SPACE}%{NUMBER:pid}%{SPACE}---%{SPACE}\[%{DATA:thread_name}\]%{SPACE}\[%{DATA:exec_thread}\]%{SPACE}%{JAVA:logger}%{SPACE}:%{SPACE}%{GREEDYDATA:message}'
]
}
overwrite => [ "message" ]
}
}
if [fields][logtype] == "logs-sample-signer" {
json {
source => "message"
}
}
if [fields][logtype] == "logs-sample-business" {
grok {
match => { "message" => '%{TIMESTAMP_ISO8601:timestamp}\|%{WORD:origin}\|%{WORD:destination}\|%{WORD:business_operation}\|%{DATA:message_type}\|%{WORD:correlation_id}' }
}
} if [fields][logtype] == "logs-sample-wrapper" {
if [message] !~ "^\{" {
drop { }
}
json {
source => "message"
}
}
if [ts] {
date {
match => [ "ts", "ISO8601" ]
}
} else {
date {
match => [ "timestamp", "yyyy-MM-dd HH:mm:ss.SSS", "ISO8601", "yyyy-MM-dd HH:mm:ss" ]
}
}
}
output: |-
output {
if [fields][logtype] == "logs-sample-business" {
elasticsearch {
hosts => [ "${ELASTIC_ELASTICSEARCH_ES_HOSTS}" ]
user => "${LOGSTASH_USER}"
password => "${LOGSTASH_PASSWORD}"
ssl_enabled => "true"
ssl_verification_mode => "full"
ssl_certificate_authorities => "/usr/share/logstash/config/certs/ca.crt"
data_stream => "true"
data_stream_type => "logs"
data_stream_dataset => "business"
}
}
else if [fields][logtype] == "logs-sample-wrapper" {
elasticsearch {
hosts => [ "${ELASTIC_ELASTICSEARCH_ES_HOSTS}" ]
user => "${LOGSTASH_USER}"
password => "${LOGSTASH_PASSWORD}"
ssl_enabled => "true"
ssl_verification_mode => "full"
ssl_certificate_authorities => "/usr/share/logstash/config/certs/ca.crt"
data_stream => "true"
data_stream_type => "logs"
data_stream_dataset => "business"
}
}
else if [fields][logtype] == "agents" {
elasticsearch {
hosts => [ "${ELASTIC_ELASTICSEARCH_ES_HOSTS}" ]
user => "${LOGSTASH_USER}"
password => "${LOGSTASH_PASSWORD}"
ssl_enabled => "true"
ssl_verification_mode => "full"
ssl_certificate_authorities => "/usr/share/logstash/config/certs/ca.crt"
data_stream => "true"
data_stream_type => "logs"
data_stream_dataset => "agents"
}
}
else {
elasticsearch {
hosts => [ "${ELASTIC_ELASTICSEARCH_ES_HOSTS}" ]
user => "${LOGSTASH_USER}"
password => "${LOGSTASH_PASSWORD}"
ssl_enabled => "true"
ssl_verification_mode => "full"
ssl_certificate_authorities => "/usr/share/logstash/config/certs/ca.crt"
data_stream => "true"
data_stream_type => "logs"
data_stream_dataset => "technical"
}
}
#stdout {
# codec => rubydebug
#}
}
syslog:
pipelines_group_name: "syslog"
pipelines:
- name: "syslog-pipeline"
input: |-
input {
syslog {
port => 514
}
}
filter: |-
filter {
}
output: |-
output {
elasticsearch {
hosts => [ "${ELASTIC_ELASTICSEARCH_ES_HOSTS}" ]
index => "%{[@metadata][beat]}-%{[@metadata][version]}"
user => "${LOGSTASH_USER}"
password => "${LOGSTASH_PASSWORD}"
ssl_enabled => "true"
ssl_verification_mode => "full"
ssl_certificate_authorities => "${ELASTIC_ELASTICSEARCH_ES_SSL_CERTIFICATE_AUTHORITY}"
}
stdout {
codec => rubydebug
}
}
filebeat:
image: docker.elastic.co/beats/filebeat
config: {}
count: 0
# name of StorageClass that will be used to create VolumeClaims. (StorageClass must exist)
imageTag: ""
# Total number of the sample messages to generate. Provide negative number to generate infinitely
totalMessages: 604800
# Number of messages per minute. Provide negative number to generate messages without time limit.
messagesPerMinute: 30
cert:
duration: 2160h0m0s # 90d
renewBefore: 360h0m0s # 15d
# Filebeat configuration file - input
input: |
filebeat.inputs:
- type: filestream
paths:
- /mnt/repo/log_samples/onboarding/*.txt
fields:
logtype: logs-sample-onboarding
parsers:
- multiline:
type: pattern
pattern: '^[0-9]{4}-[0-9]{2}-[0-9]{2}'
negate: true
match: after
- type: filestream
paths:
- /mnt/repo/log_samples/catalogue/signer/signer.txt
fields:
logtype: logs-sample-signer
- type: filestream
paths:
- /mnt/repo/log_samples/catalogue/sdtooling/sdtooling.txt
fields:
logtype: logs-sample-sdtooling
parsers:
- multiline:
type: pattern
pattern: '^[0-9]{4}-[0-9]{2}-[0-9]{2}'
negate: true
match: after
- type: filestream
paths:
- /mnt/repo/log_samples/catalogue/*.txt
fields:
logtype: logs-sample-catalogue
parsers:
- multiline:
type: pattern
pattern: '^[0-9]{4}-[0-9]{2}-[0-9]{2}'
negate: true
match: after
- type: filestream
paths:
- /usr/share/filebeat/logs/example.log
fields:
logtype: logs-sample-business
- type: filestream
paths:
- /mnt/repo/log_samples/wrapper/*.log
fields:
logtype: logs-sample-wrapper
filebeat.config.modules:
path: ${path.config}/modules.d/*.yml
reload.enabled: false
# processors:
# - add_kubernetes_metadata:
# in_cluster: true
# logging.level: debug
name: "test"
output: |
output.logstash:
hosts: ["${LOGSTASH_HOSTS}"]
ssl.enabled: true
ssl.certificate_authorities: ["/usr/share/filebeat/es-certs/ca.crt"]
ssl.verification_mode: full
ssl.certificate: "/usr/share/filebeat/certs/tls.crt"
ssl.key: "/usr/share/filebeat/certs/tls.key"
monitoring.enabled: "true"
monitoring.elasticsearch:
hosts: ["${ELASTIC_ELASTICSEARCH_ES_HOSTS}"]
ssl.certificate_authorities: ["/usr/share/filebeat/es-certs/ca.crt"]
username: "${MONITORING_USER}"
password: "${MONITORING_PASSWORD}"
filebeat4agents:
image: docker.elastic.co/beats/filebeat
config: {}
destinationLogstashHost: logstash-beats-ls-api.observability:5044
# name of StorageClass that will be used to create VolumeClaims. (StorageClass must exist)
imageTag: ""
# Total number of the sample messages to generate. Provide negative number to generate infinitely
totalMessages: 604800
# Number of messages per minute. Provide negative number to generate messages without time limit.
messagesPerMinute: 30
cert:
duration: 2160h0m0s # 90d
renewBefore: 360h0m0s # 15d
# Filebeat configuration file - input
input: |
filebeat.autodiscover:
providers:
- type: kubernetes
templates:
- condition:
or:
- equals:
kubernetes.namespace: "ingress-nginx"
- equals:
kubernetes.namespace: "kube-system"
config:
- type: container
paths:
- /var/log/containers/*-${data.kubernetes.container.id}.log
processors:
- add_cloud_metadata: {}
- add_host_metadata: {}
- add_fields:
fields:
logtype: "agents"
output: |
output.logstash:
hosts: ["${LOGSTASH_HOSTS}"]
ssl.enabled: true
ssl.certificate_authorities: ["/usr/share/filebeat/es-certs/ca.crt"]
ssl.verification_mode: full
ssl.certificate: "/usr/share/filebeat/certs/tls.crt"
ssl.key: "/usr/share/filebeat/certs/tls.key"
# monitoring.enabled: "true"
# monitoring.elasticsearch:
# hosts: ["${ELASTIC_ELASTICSEARCH_ES_HOSTS}"]
# ssl.certificate_authorities: ["/usr/share/filebeat/es-certs/ca.crt"]
# username: "${MONITORING_USER}"
# password: "${MONITORING_PASSWORD}"
metricbeat:
resources:
requests:
memory: 500Mi
cpu: 300m
limits:
memory: 500Mi
cpu: 300m
#Hostname to receive status_pod metrics
kubeStateHost: kube-state-metrics.kube-state-metrics.svc.cluster.local:8080
#Filter for get metric data from specific services,namespaces
dropEvent:
when:
not:
or:
- equals:
kubernetes.namespace: observability
- equals:
kubernetes.namespace: argo-cd
- equals:
service.type: system
\ No newline at end of file
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Please register or to comment