Code development platform for open source projects from the European Union institutions :large_blue_circle: EU Login authentication by SMS has been phased out. To see alternatives please check here

Skip to content
Snippets Groups Projects
Commit 13c234f0 authored by Natalia Szakiel's avatar Natalia Szakiel
Browse files

Merge branch 'feature/update-codeowners' into 'main'

Feature/update codeowners

See merge request !61
parents 038d4d82 622bafa4
Branches
Tags 2.0.0
3 merge requests!69Feature/merge develop into main,!67Feature/resolved conflicts,!61Feature/update codeowners
Pipeline #229797 passed
Showing
with 766 additions and 305 deletions
*.lock
*.tgz
*.pem
\ No newline at end of file
* @simpl/simpl-open/development/monitoring
\ No newline at end of file
* @simpl/simpl-open/development/monitoring @n00bagqb
Source diff could not be displayed: it is too large. Options to address this: view the blob.
#!/bin/bash
#Wait 10 minutes ( 120 * 5 seconds delay ) for elasticsearch GREEN to load dashboards
COUNT=120
#temp log file
LOG_FILE="/mnt/dashboards/load_dashboard.tmp"
for i in `seq 1 $COUNT`
do
echo "Attempt no. $i to load dashoards."
curl -k -Ss -u elastic:${ELASTIC_PASSWORD} -X POST https://127.0.0.1:5601/api/saved_objects/_import?createNewCopies=false -H 'kbn-xsrf: true' --form file=@/mnt/dashboards/charts/kibana/dashboards/dashboards.ndjson > $LOG_FILE
if [[ `cat $LOG_FILE | awk -F"\"" '{print $2 $4}'` == "successCountsuccess" ]]
then
echo "Dashboards have been loaded successfully."
break
fi
sleep 5
done
#!/bin/bash
LOG_FILE="/mnt/ilm/load_ilm.tmp"
#Loading ILMs
echo "Starting loading objects..."
for x in business-ilm technical-ilm
do
i=1
echo "Starting loading ILM: $x"
while :
do
echo "Attempt no. $i to load ILM"
curl -k -Ss -u elastic:${ELASTIC_PASSWORD} -X PUT https://${RELEASE_NAME}-elasticsearch-es-http:9200/_ilm/policy/$x -H 'Content-Type: application/json' -H 'kbn-xsrf: true' -d @/usr/share/logstash/ilm/logstash-$x.json > $LOG_FILE
echo "Response:"
cat $LOG_FILE
echo -e "\n--------"
if [[ `cat $LOG_FILE | awk -F"\"" '{print $2}'` == "acknowledged" ]]
then
echo "ILM $x has been loaded successfully."
break
fi
i=`expr $i + 1`
sleep 5
done
done
#Loading templates
for x in business-template technical-template
do
i=1
echo "Starting template $x"
while :
do
echo "Attempt no. $i to load template"
curl -k -Ss -u elastic:${ELASTIC_PASSWORD} -X PUT https://${RELEASE_NAME}-elasticsearch-es-http:9200/_index_template/$x -H 'Content-Type: application/json' -H 'kbn-xsrf: true' -d @/mnt/ilm/charts/kibana/templates/$x-log.json > $LOG_FILE
echo "Response:"
cat $LOG_FILE
echo -e "\n--------"
if [[ `cat $LOG_FILE | awk -F"\"" '{print $2}'` == "acknowledged" ]]
then
echo "Teamplte $x has been loaded successfully."
break
fi
i=`expr $i + 1`
sleep 5
done
done
{
"template": {
"settings": {
"index": {
"lifecycle": {
"name": "business-ilm"
}
}
},
"mappings": {
"properties": {
"@timestamp": {
"type": "date"
},
"timestamp": {
"type": "date"
},
"origin": {
"type": "keyword"
},
"destination": {
"type": "keyword"
},
"business_operation": {
"type": "keyword"
},
"message_type": {
"type": "keyword"
},
"correlation_id": {
"type": "keyword"
}
}
}
},
"index_patterns": [
"business-logs"
],
"data_stream": {
"hidden": false,
"allow_custom_routing": false
},
"composed_of": [],
"priority": 200
}
\ No newline at end of file
{
"template": {
"settings": {
"index": {
"lifecycle": {
"name": "technical-ilm"
}
}
},
"mappings": {
"properties": {
"@timestamp": {
"type": "date"
},
"timestamp": {
"type": "date"
},
"logger": {
"type": "keyword",
"ignore_above": 1024
},
"loglevel": {
"type": "keyword",
"ignore_above": 1024
},
"message": {
"type": "match_only_text"
},
"method": {
"type": "keyword",
"ignore_above": 1024
},
"msg": {
"type": "keyword",
"ignore_above": 1024
},
"namespace": {
"type": "keyword",
"ignore_above": 1024
},
"operation": {
"type": "keyword",
"ignore_above": 1024
},
"path": {
"type": "keyword",
"ignore_above": 1024
},
"pid": {
"type": "keyword",
"ignore_above": 1024
},
"request_id": {
"type": "keyword",
"ignore_above": 1024
},
"tags": {
"type": "keyword",
"ignore_above": 1024
},
"thread": {
"type": "keyword",
"ignore_above": 1024
},
"thread_name": {
"type": "keyword",
"ignore_above": 1024
},
"ts": {
"type": "keyword",
"ignore_above": 1024
},
"uri": {
"type": "keyword",
"ignore_above": 1024
},
"version": {
"type": "keyword",
"ignore_above": 1024
}
}
}
},
"index_patterns": [
"technical-logs"
],
"data_stream": {
"hidden": false,
"allow_custom_routing": false
},
"composed_of": [],
"priority": 200
}
\ No newline at end of file
......@@ -2,13 +2,13 @@
Kibana dns
*/}}
{{- define "kibana.dns" -}}
kibana.{{ default .Release.Namespace .Values.namespaceTag }}.{{ .Values.domainSuffix }}
kibana.{{ .Values.namespaceTag }}.{{ .Values.domainSuffix }}
{{- end -}}
{{/*
Kibana full URL with base path
*/}}
{{- define "kibana.dns.fullPath" -}}
https://{{ template "kibana.dns" . }}{{- if eq .Values.openStandardHttpsPorts false -}}:25602{{- end -}}{{- if and (.Values.kibana.ingressSubpath) (ne "/" .Values.kibana.ingressSubpath) }}{{- .Values.kibana.ingressSubpath}}{{- end }}
https://{{ template "kibana.dns" . }}
{{- end -}}
......@@ -17,14 +17,14 @@ https://{{ template "kibana.dns" . }}{{- if eq .Values.openStandardHttpsPorts fa
Elasticsearch api dns
*/}}
{{- define "elasticsearch.dns" -}}
elasticsearch.{{ default .Release.Namespace .Values.namespaceTag }}.{{ .Values.domainSuffix }}
elasticsearch.{{ .Values.namespaceTag }}.{{ .Values.domainSuffix }}
{{- end -}}
{{/*
Logstash input dns
*/}}
{{- define "logstash.dns" -}}
logstash.{{ .Values.logstash.beats.pipelines_group_name }}.{{ default .Release.Namespace .Values.namespaceTag }}.{{ .Values.domainSuffix }}
logstash.{{ .Values.namespaceTag }}.{{ .Values.logstash.beats.pipelines_group_name }}.{{ .Values.domainSuffix }}
{{- end -}}
{{/*
......@@ -44,7 +44,7 @@ Logstash input dns for many ingressRouteTCPs
Filebeat input dns
*/}}
{{- define "filebeat.dns" -}}
filebeat.{{ default .Release.Namespace .Values.namespaceTag }}.{{ .Values.domainSuffix }}
filebeat.{{ .Values.namespaceTag }}.{{ .Values.domainSuffix }}
{{- end -}}
......@@ -52,5 +52,5 @@ filebeat.{{ default .Release.Namespace .Values.namespaceTag }}.{{ .Values.domain
Filebeat for agents input dns
*/}}
{{- define "filebeat4agents.dns" -}}
filebeat4agents.{{ default .Release.Namespace .Values.namespaceTag }}.{{ .Values.domainSuffix }}
filebeat4agents.{{ .Values.namespaceTag }}.{{ .Values.domainSuffix }}
{{- end -}}
......@@ -17,9 +17,8 @@ spec:
- secretName: logstash-writer-secret
- secretName: user-monitoring-secret
nodeSets:
{{- range .Values.elasticsearch.nodeSets }}
- name: {{ .name }}
count: {{ .count}}
- name: {{ .Values.elasticsearch.name }}
count: {{ .Values.elasticsearch.count }}
config:
xpack.security.authc.token.enabled: true
http.cors.enabled : true
......@@ -35,12 +34,12 @@ spec:
- ReadWriteOnce
resources:
requests:
storage: {{ .diskSpace }}
storageClassName: {{ .storageClassName }}
storage: {{ .Values.elasticsearch.diskSpace }}
storageClassName: {{ .Values.elasticsearch.storageClassName }}
podTemplate:
metadata:
labels:
stack-namespace: {{$.Release.Namespace}}
stack-namespace: {{ .Release.Namespace }}
spec:
initContainers:
- name: sysctl
......@@ -64,7 +63,7 @@ spec:
imagePullPolicy: Always
securityContext:
runAsNonRoot: true
{{- with .resources }}
{{- with .Values.elasticsearch.resources }}
resources:
{{- toYaml . | nindent 12 }}
{{- end }}
......@@ -72,16 +71,15 @@ spec:
- name: ELASTICSEARCH_PASSWORD
valueFrom:
secretKeyRef:
name: {{ $.Release.Name -}}-elasticsearch-es-elastic-user
name: {{ .Release.Name -}}-elasticsearch-es-elastic-user
key: elastic
{{- with $.Values.elasticsearch.env }}
{{- with .Values.elasticsearch.env }}
{{- toYaml . | nindent 10 }}
{{- end }}
volumes:
- name: certs
secret:
secretName: {{ $.Release.Name }}-elasticsearch-http-cert-secret-internal
{{- end }}
secretName: {{ .Release.Name }}-elasticsearch-http-cert-secret-internal
http:
tls:
certificate:
......@@ -202,15 +200,6 @@ metadata:
spec:
selfSigned: {}
---
# apiVersion: rbac.authorization.k8s.io/v1
# kind: Role
# metadata:
# name: issuer-reader
# rules:
# - apiGroups: ["cert-manager.io"]
# resources: ["issuers"]
# verbs: ["get", "list", "watch"]
---
apiVersion: cert-manager.io/v1
kind: Certificate
metadata:
......
......@@ -41,7 +41,7 @@ spec:
fieldRef:
fieldPath: spec.nodeName
- name: ELASTIC_ELASTICSEARCH_ES_HOSTS
value: 'https://elastic-elasticsearch-es-http.{{ .Release.Namespace }}.svc:9200'
value: 'https://{{ .Release.Name }}-elasticsearch-es-http.{{ .Release.Namespace }}.svc:9200'
- name: LOGSTASH_HOSTS
value: 'logstash-{{ .Values.logstash.beats.pipelines_group_name }}-ls-api.{{ .Release.Namespace }}:5044'
- name: MONITORING_USER
......@@ -54,6 +54,8 @@ spec:
secretKeyRef:
name: user-monitoring-secret
key: password
- name: MONITORED_NAMESPACE
value: '{{ .Release.Namespace }}'
volumes:
- name: varlogcontainers
......@@ -71,7 +73,7 @@ spec:
defaultMode: 0555
- name: es-certs # used for monitoring
secret:
secretName: elastic-elasticsearch-http-cert-secret-internal
secretName: {{ .Release.Name }}-elasticsearch-http-cert-secret-internal
- name: filebeat-certs
secret:
secretName: filebeat4agents-certs-secret
......@@ -106,7 +108,7 @@ spec:
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: filebeat4agents-role
name: filebeat4agents-role-{{ .Release.Namespace }}
rules:
- apiGroups: [""] # "" indicates the core API group
resources:
......@@ -141,13 +143,45 @@ metadata:
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: filebeat4agents-rolebinding
name: filebeat4agents-rolebinding-{{ .Release.Namespace }}
subjects:
- kind: ServiceAccount
name: filebeat4agents-account
namespace: {{ .Release.Namespace }}
roleRef:
kind: ClusterRole
name: filebeat4agents-role
name: filebeat4agents-role-{{ .Release.Namespace }}
apiGroup: rbac.authorization.k8s.io
---
apiVersion: v1
kind: ConfigMap
metadata:
name: filebeat-ilm-configmap
data:
filebeat-bussines-ilm.json: |
{
"policy": {
"phases": {
"hot": {
"actions": {
"rollover": {
"max_age": "{{ .Values.heartbeat.ilm.hot.max_age }}",
"max_primary_shard_size": "{{ .Values.heartbeat.ilm.hot.max_primary_shard_size }}"
},
"set_priority": {
"priority": 100
}
},
"min_age": "0ms"
},
"delete": {
"min_age": "{{ .Values.heartbeat.ilm.delete.min_age }}",
"actions": {
"delete": {}
}
}
}
}
}
---
......@@ -15,19 +15,6 @@ spec:
securityContext:
runAsUser: 0
fsGroup: 1000
initContainers:
- name: git-clone
image: alpine/git
args:
- clone
- --single-branch
- --branch
- feature/removed-roles # or the branch where your logs are
- https://code.europa.eu/simpl/simpl-open/development/monitoring/eck-monitoring.git
- /mnt/repo
volumeMounts:
- name: repo
mountPath: /mnt/repo
containers:
- name: filebeat
command: ['sh', '-c', 'exec /usr/share/filebeat/logs/example.sh & exec /usr/share/filebeat/filebeat -e -c /usr/share/filebeat/filebeat.yml']
......@@ -39,14 +26,12 @@ spec:
name: filebeat-certs
- mountPath: /usr/share/filebeat/es-certs # used for monitoring
name: es-certs
- mountPath: /mnt/repo
name: repo
- mountPath: /usr/share/filebeat/logs/example.sh
subPath: example.sh
name: example-script
env:
- name: ELASTIC_ELASTICSEARCH_ES_HOSTS
value: 'https://elastic-elasticsearch-es-http.{{ .Release.Namespace }}.svc:9200'
value: 'https://{{ .Release.Name }}-elasticsearch-es-http.{{ .Release.Namespace }}.svc:9200'
- name: LOGSTASH_HOSTS
value: 'logstash-{{ .Values.logstash.beats.pipelines_group_name }}-ls-api.{{ .Release.Namespace }}:5044'
- name: MONITORING_USER
......@@ -69,9 +54,7 @@ spec:
secretName: filebeat-certs-secret
- name: es-certs # used for monitoring
secret:
secretName: elastic-elasticsearch-http-cert-secret-internal
- name: repo
emptyDir: {}
secretName: {{ .Release.Name }}-elasticsearch-http-cert-secret-internal
- name: example-script
configMap:
name: filebeat-example-script
......@@ -174,7 +157,7 @@ data:
TIMESTAMP=$(date +"%Y-%m-%d %H:%M:%S")
TIMESTAMP=$(date +"%Y-%m-%dT%H:%M:%S")
echo "$TIMESTAMP|$ORIGIN|$DESTINATION|$BUSINESS_OPERATION|$MESSAGE_TYPE|$CORRELATION_ID" >> "$LOG_FILE"
count=$((count + 1))
done
......
......@@ -9,21 +9,58 @@ spec:
elasticsearchRef:
name: {{ .Release.Name }}-elasticsearch
config:
heartbeat.monitors:
- type: tcp
schedule: '@every 5s'
hosts: ["elastic-elasticsearch-es-http.observability.svc:9200"]
- type: tcp
schedule: '@every 5s'
hosts: ["kibana.dev.simpl-europe.eu:443"]
- type: icmp
id: ping-myhost
name: My Host Ping
hosts: ["elastic-kibana-kb-http.observability.svc"]
schedule: '*/5 * * * * * *'
{{- with .Values.heartbeat.services }}
{{- toYaml . | nindent 4 }}
{{- end }}
setup.ilm.enabled: true
setup.ilm.policy_name: heartbeat-ilm
setup.ilm.policy_file: "/usr/share/heartbeat/ilm/heartbeat-ilm.json"
deployment:
replicas: 1
podTemplate:
spec:
securityContext:
runAsUser: 0
containers:
- name: heartbeat
volumeMounts:
- mountPath: /usr/share/heartbeat/ilm/heartbeat-ilm.json
name: heartbeat-ilm-vol
subPath: heartbeat-ilm.json
volumes:
- name: heartbeat-ilm-vol
configMap:
name: heartbeat-ilm-configmap
defaultMode: 511
---
apiVersion: v1
kind: ConfigMap
metadata:
name: heartbeat-ilm-configmap
data:
heartbeat-ilm.json: |
{
"policy": {
"phases": {
"hot": {
"actions": {
"rollover": {
"max_age": "{{ .Values.heartbeat.ilm.hot.max_age }}",
"max_primary_shard_size": "{{ .Values.heartbeat.ilm.hot.max_primary_shard_size }}"
},
"set_priority": {
"priority": 100
}
},
"min_age": "0ms"
},
"delete": {
"min_age": "{{ .Values.heartbeat.ilm.delete.min_age }}",
"actions": {
"delete": {}
}
}
}
}
}
---
\ No newline at end of file
......@@ -14,13 +14,7 @@ spec:
config:
server.ssl.enabled: true
elasticsearch.requestTimeout: 120000
#elasticsearch.ssl.verificationMode: certificate
#elasticsearch.ssl.certificateAuthorities: ["/usr/share/kibana/config/certs/ca.crt"]
server.publicBaseUrl: "{{ template "kibana.dns.fullPath" . }}"
{{- if and (.Values.kibana.ingressSubpath) (ne "/" .Values.kibana.ingressSubpath) }}
server.basePath: {{ .Values.kibana.ingressSubpath }}
server.rewriteBasePath: true
{{- end }}
xpack.reporting.enabled: true
xpack.reporting.kibanaServer.protocol: "https"
{{- with .Values.kibana.config }}
......@@ -36,6 +30,19 @@ spec:
securityContext:
runAsUser: 1000
fsGroup: 1000
initContainers:
- name: git-clone
image: alpine/git
args:
- clone
- --single-branch
- --branch
- {{ .Values.kibana.dashboardsBranch }}
- https://code.europa.eu/simpl/simpl-open/development/monitoring/eck-monitoring.git
- /mnt/dashboards
volumeMounts:
- name: repo
mountPath: /mnt/dashboards
containers:
- name: kibana
imagePullPolicy: Always
......@@ -43,23 +50,27 @@ spec:
resources:
{{- toYaml . | nindent 12 }}
{{- end }}
#volumeMounts:
#- name: es-certs
# mountPath: /usr/share/kibana/config/certs
#- name: lets-encrypt-ca
# mountPath: /usr/share/kibana/config/certs-ca
readinessProbe:
httpGet:
scheme: HTTPS
path: {{- with .Values.kibana.ingressSubpath }} {{ . }} {{- end }}
path: "/"
port: 5601
#volumes:
#- name: es-certs
# secret:
# secretName: elastic-elasticsearch-es-http-certs-internal
#- name: lets-encrypt-ca
# secret:
# secretName: lets-encrypt-ca
env:
- name: ELASTIC_PASSWORD
valueFrom:
secretKeyRef:
name: {{ .Release.Name }}-elasticsearch-es-elastic-user
key: elastic
lifecycle:
postStart:
exec:
command: ["/bin/sh", "-c", "cd /mnt/dashboards/charts/kibana/dashboards/; chmod +x ./load_dashboards.sh; ./load_dashboards.sh > /mnt/dashboards/load_dashboard.log"]
volumeMounts:
- name: repo
mountPath: /mnt/dashboards
volumes:
- name: repo
emptyDir: {}
http:
tls:
certificate:
......@@ -72,18 +83,18 @@ metadata:
annotations:
nginx.ingress.kubernetes.io/backend-protocol: "HTTPS"
acme.cert-manager.io/http01-edit-in-place: "true"
cert-manager.io/cluster-issuer: dev-staging
cert-manager.io/cluster-issuer: {{ .Values.clusterIssuer }}
spec:
ingressClassName: nginx
tls:
- hosts:
- {{ template "kibana.dns" . }}
secretName: {{ .Release.Name }}-kibana-cert-secret
secretName: {{ .Release.Name }}-kibana-ssl
rules:
- host: {{ template "kibana.dns" . }}
http:
paths:
- path: {{ default "/" .Values.kibana.ingressSubpath }}
- path: "/"
pathType: Prefix
backend:
service:
......
......@@ -7,7 +7,7 @@ metadata:
spec:
image: {{ .Values.logstash.image }}:{{ default .Values.elasticVersion .Values.logstash.imageTag }}
version: {{ .Values.elasticVersion }}
count: {{ .Values.logstash.count }}
count: {{ .Values.logstash.count_beats }}
elasticsearchRefs:
- name: {{ .Release.Name }}-elasticsearch
clusterName: {{ .Release.Name }}-elasticsearch
......@@ -49,6 +49,41 @@ spec:
labels:
stack-namespace: {{ .Release.Namespace }}
spec:
securityContext:
runAsUser: 1000
fsGroup: 1000
initContainers:
- name: git-clone
image: alpine/git
args:
- clone
- --single-branch
- --branch
- {{ .Values.kibana.dashboardsBranch }}
- https://code.europa.eu/simpl/simpl-open/development/monitoring/eck-monitoring.git
- /mnt/ilm/
volumeMounts:
- name: repo
mountPath: /mnt/ilm/
- name: load-objects
command: ["/bin/sh", "-c", "cd /mnt/ilm/charts/kibana/scripts; chmod +x ./load_objects.sh; ./load_objects.sh 2>&1 "]
volumeMounts:
- name: repo
mountPath: /mnt/ilm/
- name: logstash-business-ilm-vol
mountPath: /usr/share/logstash/ilm/logstash-business-ilm.json
subPath: logstash-business-ilm.json
- name: logstash-technical-ilm-vol
mountPath: /usr/share/logstash/ilm/logstash-technical-ilm.json
subPath: logstash-technical-ilm.json
env:
- name: RELEASE_NAME
value: {{ .Release.Name }}
- name: ELASTIC_PASSWORD
valueFrom:
secretKeyRef:
name: {{ .Release.Name }}-elasticsearch-es-elastic-user
key: elastic
containers:
- name: logstash
{{- with .Values.logstash.resources }}
......@@ -63,9 +98,19 @@ spec:
{{- end }}
- name: es-certs
mountPath: /usr/share/logstash/config/certs
- mountPath: /usr/share/logstash/certs-logstash
name: certs-logstash
- name: certs-logstash
mountPath: /usr/share/logstash/certs-logstash
- name: repo
mountPath: /mnt/ilm/
- name: logstash-business-ilm-vol
mountPath: /usr/share/logstash/ilm/logstash-business-ilm.json
subPath: logstash-business-ilm.json
- name: logstash-technical-ilm-vol
mountPath: /usr/share/logstash/ilm/logstash-technical-ilm.json
subPath: logstash-technical-ilm.json
env:
- name: LS_JAVA_OPTS
value: {{ .Values.logstash.env.ls_java_opts }}
- name: LOGSTASH_USER
valueFrom:
secretKeyRef:
......@@ -87,7 +132,7 @@ spec:
name: user-monitoring-secret
key: password
- name: ELASTIC_ELASTICSEARCH_ES_HOSTS
value: 'https://elastic-elasticsearch-es-http.{{ .Release.Namespace }}.svc:9200'
value: 'https://{{ .Release.Name }}-elasticsearch-es-http.{{ .Release.Namespace }}.svc:9200'
- name: ELASTICSEARCH_SSL_CERTIFICATE_VERIFICATION
value: "true"
- name: ELASTICSEARCH_SSL_CA_PATH
......@@ -105,6 +150,16 @@ spec:
- name: certs-logstash
secret:
secretName: logstash-secret-{{ .Values.logstash.beats.pipelines_group_name }}
- name: repo
emptyDir: {}
- name: logstash-business-ilm-vol
configMap:
name: logstash-business-ilm-configmap
defaultMode: 511
- name: logstash-technical-ilm-vol
configMap:
name: logstash-technical-ilm-configmap
defaultMode: 511
pipelinesRef:
secretName: logstash-{{ .Values.logstash.beats.pipelines_group_name }}-pipelines-yml
---
......@@ -188,3 +243,65 @@ spec:
privateKey:
encoding: "PKCS8"
---
apiVersion: v1
kind: ConfigMap
metadata:
name: logstash-business-ilm-configmap
data:
logstash-business-ilm.json: |
{
"policy": {
"phases": {
"hot": {
"actions": {
"rollover": {
"max_age": "{{ .Values.logstash.ilm.business.hot.max_age }}",
"max_primary_shard_size": "{{ .Values.logstash.ilm.business.hot.max_primary_shard_size }}"
},
"set_priority": {
"priority": 100
}
},
"min_age": "0ms"
},
"delete": {
"min_age": "{{ .Values.logstash.ilm.business.delete.min_age }}",
"actions": {
"delete": {}
}
}
}
}
}
---
apiVersion: v1
kind: ConfigMap
metadata:
name: logstash-technical-ilm-configmap
data:
logstash-technical-ilm.json: |
{
"policy": {
"phases": {
"hot": {
"actions": {
"rollover": {
"max_age": "{{ .Values.logstash.ilm.technical.hot.max_age }}",
"max_primary_shard_size": "{{ .Values.logstash.ilm.technical.hot.max_primary_shard_size }}"
},
"set_priority": {
"priority": 100
}
},
"min_age": "0ms"
},
"delete": {
"min_age": "{{ .Values.logstash.ilm.technical.delete.min_age }}",
"actions": {
"delete": {}
}
}
}
}
}
---
\ No newline at end of file
......@@ -7,7 +7,7 @@ metadata:
spec:
image: {{ .Values.logstash.image }}:{{ default .Values.elasticVersion .Values.logstash.imageTag }}
version: {{ .Values.elasticVersion }}
count: {{ .Values.logstash.count }}
count: {{ .Values.logstash.count_syslog }}
elasticsearchRefs:
- name: {{ .Release.Name }}-elasticsearch
clusterName: {{ .Release.Name }}-elasticsearch
......@@ -71,7 +71,7 @@ spec:
name: logstash-writer-secret
key: password
- name: ELASTIC_ELASTICSEARCH_ES_HOSTS
value: 'https://elastic-elasticsearch-es-http.{{ .Release.Namespace }}.svc:9200'
value: 'https://{{ .Release.Name }}-elasticsearch-es-http.{{ .Release.Namespace }}.svc:9200'
volumes:
{{- range .Values.logstash.syslog.pipelines }}
- name: pipeline-config-{{- .name }}
......
......@@ -68,13 +68,20 @@ spec:
module: kubernetes
ssl:
verification_mode: none
setup.ilm.enabled: true
setup.ilm.policy_name: metricbeat-ilm
setup.ilm.policy_file: "/usr/share/metricbeat/ilm/metricbeat-ilm.json"
processors:
- add_cloud_metadata: {}
- add_host_metadata: {}
{{- with .Values.metricbeat.dropEvent }}
- drop_event:
{{- toYaml . | nindent 8 }}
{{- end }}
when:
not:
or:
- equals:
kubernetes.namespace: {{ .Release.Namespace }}
- equals:
service.type: system
daemonSet:
podTemplate:
spec:
......@@ -98,6 +105,9 @@ spec:
name: dockersock
- mountPath: /hostfs/proc
name: proc
- mountPath: /usr/share/metricbeat/ilm/metricbeat-ilm.json
name: metricbeat-ilm-vol
subPath: metricbeat-ilm.json
env:
- name: NODE_NAME
valueFrom:
......@@ -118,11 +128,15 @@ spec:
- hostPath:
path: /proc
name: proc
- name: metricbeat-ilm-vol
configMap:
name: metricbeat-ilm-configmap
defaultMode: 511
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: metricbeat
name: metricbeat-{{ .Release.Namespace }}
rules:
- apiGroups:
- ""
......@@ -173,13 +187,44 @@ metadata:
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: metricbeat
name: metricbeat-{{ .Release.Namespace }}
subjects:
- kind: ServiceAccount
name: metricbeat
namespace: {{.Release.Namespace}}
roleRef:
kind: ClusterRole
name: metricbeat
name: metricbeat-{{ .Release.Namespace }}
apiGroup: rbac.authorization.k8s.io
---
apiVersion: v1
kind: ConfigMap
metadata:
name: metricbeat-ilm-configmap
data:
metricbeat-ilm.json: |
{
"policy": {
"phases": {
"hot": {
"actions": {
"rollover": {
"max_age": "{{ .Values.metricbeat.ilm.hot.max_age }}",
"max_primary_shard_size": "{{ .Values.metricbeat.ilm.hot.max_primary_shard_size }}"
},
"set_priority": {
"priority": 100
}
},
"min_age": "0ms"
},
"delete": {
"min_age": "{{ .Values.metricbeat.ilm.delete.min_age }}",
"actions": {
"delete": {}
}
}
}
}
}
---
# version of all elastic applications
elasticVersion: 8.15.1
# Name of aws environment
envTag: "dev"
namespaceTag: "test-namespace"
# This suffix will be used to create subdomain of following template:
# kibana.NAMESPACE_TAG.DOMAIN_SUFFIX
domainSuffix: "simpl-europe.eu"
# This value is used to create unique dns for each deployment. By default it equals to namespace.
namespaceTag: "dev"
# Spread pods evenly between subnets
createTopologySpreadConstraints: true
# Force pods to be scheduled evenly between separete AZ, this means if all nodes in AZ will be full pod won't be scheduled.
# Turning this off will still create pods in seperate AZ, but k8s scheduler won't be able to respect topologySpreadConstrains, will schedule pods in the same AZ.
forceDeployInSeperateAZ: true
# Open port 443 for each component on the Traefik level.
openStandardHttpsPorts: true
# imagePullSecrets defines the secrets with credentials to docker registry, for example nexus.
imagePullSecrets: []
# kibana.NAMESPACE.NAMESPACE_TAG.DOMAIN_SUFFIX
domainSuffix: "dev.simpl-europe.eu"
#ClusterIssuer to generate Kibana SSL front certificate
clusterIssuer: "dev-prod-dns01"
elasticsearch:
image: docker.elastic.co/elasticsearch/elasticsearch
# Elasticsearch's image tag, by default it equals to elasticVersion
ccr_remote_ca_file: ccr-remote-ca.pem
imageTag: ""
jvm_options_xms: 1g
jvm_options_xmx: 1g
cert:
duration: 2160h0m0s # 90d
renewBefore: 360h0m0s # 15d
#Environment variables to set in kibana pod
#Usage from cli:
# --set "elasticsearch.env[0].name=VARIABLE_NAME" --set "elasticsearch.env[0].value=VARIABLE_VALUE"
env: []
# here you can specify several node groups (e.g. master, data groups)
nodeSets:
- name: "node"
# number of elasticsearch nodes in each group
name: node
count: 3
# persistent volume claim size
diskSpace: 60Gi
# name of StorageClass that will be used to create VolumeClaims. (StorageClass must exist)
storageClassName: csi-cinder-high-speed
# here you can specify elasticsearch config e.g
# config:
# node.roles: ["master"]
config: {}
resources:
requests:
memory: 4Gi
......@@ -71,14 +33,14 @@ elasticsearch:
cpu: "1"
kibana:
# set >0 to deploy kibana, 0 otherwise
count: 1
image: docker.elastic.co/kibana/kibana
#Branch name to donwload dashboards
dashboardsBranch: "develop"
# Kibana's image tag, by default it equals to elasticVersion
imageTag: ""
# name of helm release where elasticsearch is installed. If you install kibana together with elasticsearch, leave it empty.
elasticsearchReleaseName: ""
# Kibana server will listen on that subpath
cert:
duration: 2160h0m0s # 90d
renewBefore: 360h0m0s # 15d
......@@ -90,23 +52,34 @@ kibana:
memory: 1Gi
limits:
memory: 1Gi
#Environment variables to set in kibana pod
#Usage from cli:
# --set "kibana.env[0].name=VARIABLE_NAME" --set "kibana.env[0].value=VARIABLE_VALUE"
env: []
logstash:
count: 2
ilm:
business:
hot:
max_age: 30d
max_primary_shard_size: 1gb
delete:
min_age: 30d
technical:
hot:
max_age: 30d
max_primary_shard_size: 1gb
delete:
min_age: 30d
count_beats: 1
count_syslog: 0
image: docker.elastic.co/logstash/logstash
config: {}
diskSpace: 3Gi
# name of StorageClass that will be used to create VolumeClaims. (StorageClass must exist)
storageClassName: csi-cinder-high-speed
imageTag: ""
env:
LS_JAVA_OPTS: "-Xmx256m -Xms256m"
ls_java_opts: "-Xms3g -Xmx3g"
resources:
requests:
memory: 4Gi
......@@ -120,10 +93,6 @@ logstash:
path.config: "/app/elastic/logstash/config/pipelines/*.config"
pipeline.workers: 1
pipeline.batch.size: 125
workers: 1
batch:
size: 125
beats:
pipelines_group_name: "beats"
pipelines:
......@@ -141,71 +110,109 @@ logstash:
}
filter: |-
filter {
if [fields][logtype] == "logs-sample-onboarding" {
if [kubernetes][container][name] == "ejbca-community-helm" {
grok {
match => {
"message" => [
'%{TIMESTAMP_ISO8601:timestamp}%{SPACE}%{WORD:loglevel}%{SPACE}\[%{JAVACLASS:logger}\]%{SPACE}\(%{DATA:thread}\)%{SPACE}%{GREEDYDATA:message}',
'%{TIMESTAMP_ISO8601:timestamp}%{SPACE}%{WORD:loglevel}%{SPACE}\[%{PATH:path}\]%{SPACE}\(%{DATA:thread}\)%{SPACE}%{GREEDYDATA:message}'
]
}
overwrite => [ "message" ]
}
}
if [kubernetes][container][name] == "keycloak" {
grok {
match => {
"message" => [
'%{TIMESTAMP_ISO8601:timestamp}%{SPACE}%{WORD:loglevel}%{SPACE}\[%{JAVACLASS:logger}\]%{SPACE}\(%{DATA:thread}\)%{SPACE}%{GREEDYDATA:message}'
]
}
overwrite => [ "message" ]
}
}
if [kubernetes][container][name] == "onboarding" {
grok {
pattern_definitions => { "JAVA" => "[0-9A-Za-z\[\]\.\$]*" }
match => {
"message" => [
'%{TIMESTAMP_ISO8601:timestamp}%{SPACE}%{WORD:loglevel}%{SPACE}\[%{JAVACLASS:logger}\]%{SPACE}\(%{DATA:thread}\)%{SPACE}%{GREEDYDATA:message}',
'%{TIMESTAMP_ISO8601:timestamp}%{SPACE}%{WORD:loglevel}%{SPACE}\[%{PATH:path}\]%{SPACE}\(%{DATA:thread}\)%{SPACE}%{GREEDYDATA:message}',
'%{TIMESTAMP_ISO8601:timestamp}%{SPACE}%{LOGLEVEL:loglevel}%{SPACE}%{NUMBER:pid}%{SPACE}---%{SPACE}\[%{DATA:thread}\]%{SPACE}%{JAVACLASS:logger}%{SPACE}:%{SPACE}\[%{DATA:request_id}\]%{SPACE}HTTP%{SPACE}%{WORD:http_method}%{SPACE}"%{DATA:uri}"',
'%{TIMESTAMP_ISO8601:timestamp}%{SPACE}%{LOGLEVEL:loglevel}%{SPACE}%{NUMBER:pid}%{SPACE}---%{SPACE}\[%{DATA:thread}\]%{SPACE}%{JAVACLASS:logger}%{SPACE}:%{SPACE}%{GREEDYDATA:message}',
'%{TIMESTAMP_ISO8601:timestamp}%{SPACE}%{LOGLEVEL:loglevel}%{SPACE}%{NUMBER:pid}%{SPACE}---%{SPACE}\[%{DATA:thread}\]%{SPACE}%{JAVA:logger}%{SPACE}:%{SPACE}%{GREEDYDATA:message}',
'%{TIMESTAMP_ISO8601:timestamp}%{SPACE}%{LOGLEVEL:loglevel}%{SPACE}%{NUMBER:pid}%{SPACE}---%{SPACE}\[%{DATA:thread}\]%{SPACE}%{DATA:logger}%{SPACE}:%{SPACE}\[%{DATA:request_id}\]%{SPACE}%{GREEDYDATA:message}'
]
}
overwrite => [ "message" ]
}
}
if [fields][logtype] == "logs-sample-sdtooling" {
if [kubernetes][container][name] == "postgresql" {
grok {
pattern_definitions => { "JAVA" => "[0-9A-Za-z\[\]\.\$]*" }
match => {
"message" => [
'%{TIMESTAMP_ISO8601:timestamp}%{SPACE}%{LOGLEVEL:loglevel}%{SPACE}%{NUMBER:pid}%{SPACE}---%{SPACE}\[%{DATA:thread}\]%{SPACE}%{JAVACLASS:logger}%{SPACE}:%{SPACE}Method:%{SPACE}%{DATA:method}%{SPACE}\-%{SPACE}%{GREEDYDATA:message}',
'%{TIMESTAMP_ISO8601:timestamp}%{SPACE}%{LOGLEVEL:loglevel}%{SPACE}%{NUMBER:pid}%{SPACE}---%{SPACE}\[%{DATA:thread}\]%{SPACE}%{JAVA:logger}%{SPACE}:%{SPACE}%{GREEDYDATA:message}'
'%{TIMESTAMP_ISO8601:timestamp}%{SPACE}%{WORD:timezone}%{SPACE}\[%{NUMBER:pid}\]%{SPACE}%{WORD:log_level}:%{SPACE}%{GREEDYDATA:message}'
]
}
overwrite => [ "message" ]
}
}
if [fields][logtype] == "logs-sample-catalogue" {
if [kubernetes][container][name] == "vault" or [kubernetes][container][name] == "vault-agent-init" or [kubernetes][container][name] == "sidecar-injector" {
grok {
pattern_definitions => { "JAVA" => "[0-9A-Za-z\[\]\.\$]*" }
match => {
"message" => [
'%{TIMESTAMP_ISO8601:timestamp}%{SPACE}%{WORD:loglevel}%{SPACE}\[%{JAVACLASS:logger}\]%{SPACE}\(%{DATA:thread}\)%{SPACE}%{GREEDYDATA:message}',
'%{TIMESTAMP_ISO8601:timestamp}%{SPACE}%{LOGLEVEL:loglevel}%{SPACE}%{NUMBER:pid}%{SPACE}---%{SPACE}\[%{DATA:thread_name}\]%{SPACE}\[%{DATA:exec_thread}\]%{SPACE}\[%{DATA:request_id}\]%{JAVA:logger}%{SPACE}:%{SPACE}%{GREEDYDATA:message}',
'%{TIMESTAMP_ISO8601:timestamp}%{SPACE}%{LOGLEVEL:loglevel}%{SPACE}%{NUMBER:pid}%{SPACE}---%{SPACE}\[%{DATA:thread_name}\]%{SPACE}\[%{DATA:exec_thread}\]%{SPACE}%{JAVA:logger}%{SPACE}:%{SPACE}%{GREEDYDATA:message}'
'%{TIMESTAMP_ISO8601:timestamp}%{SPACE}\[%{LOGLEVEL:loglevel}\]%{SPACE}%{DATA:handler}:%{SPACE}%{GREEDYDATA:message}'
]
}
overwrite => [ "message" ]
}
}
if [fields][logtype] == "logs-sample-signer" {
json {
source => "message"
if [kubernetes][container][name] == "simpl-cloud-gateway" or [kubernetes][container][name] == "users-roles" {
grok {
match => {
"message" => [
'%{TIMESTAMP_ISO8601:timestamp}%{SPACE}%{LOGLEVEL:loglevel}%{SPACE}%{NUMBER:pid}%{SPACE}---%{SPACE}\[%{DATA:thread}\]%{SPACE}%{JAVACLASS:logger}%{SPACE}:%{SPACE}%{GREEDYDATA:message}'
]
}
overwrite => [ "message" ]
}
if [fields][logtype] == "logs-sample-business" {
}
if [kubernetes][container][name] == "neo4j" {
grok {
match => { "message" => '%{TIMESTAMP_ISO8601:timestamp}\|%{WORD:origin}\|%{WORD:destination}\|%{WORD:business_operation}\|%{DATA:message_type}\|%{WORD:correlation_id}' }
match => {
"message" => [
'%{TIMESTAMP_ISO8601:timestamp}%{SPACE}%{LOGLEVEL:loglevel}%{SPACE}%{GREEDYDATA:message}'
]
}
overwrite => [ "message" ]
}
} if [fields][logtype] == "logs-sample-wrapper" {
if [message] !~ "^\{" {
drop { }
}
json {
source => "message"
if [kubernetes][container][name] == "redis" {
grok {
match => {
"message" => [
'%{NUMBER:process_id}:%{WORD:process_type}%{SPACE}%{MONTHDAY:day}%{SPACE}%{MONTH:month}%{SPACE}%{YEAR:year}%{SPACE}%{TIME:time}\.%{INT:milliseconds}%{SPACE}\*%{SPACE}%{GREEDYDATA:message}'
]
}
overwrite => [ "message" ]
add_field => {
"timestamp" => "%{day} %{month} %{year} %{time}.%{milliseconds}"
}
if [ts] {
date {
match => [ "ts", "ISO8601" ]
}
} else {
date {
match => [ "timestamp", "yyyy-MM-dd HH:mm:ss.SSS", "ISO8601", "yyyy-MM-dd HH:mm:ss" ]
}
if [fields][logtype] == "logs-sample-business" {
grok {
match => { "message" => '%{TIMESTAMP_ISO8601:timestamp}\|%{WORD:origin}\|%{WORD:destination}\|%{WORD:business_operation}\|%{DATA:message_type}\|%{WORD:correlation_id}' }
}
}
date {
match => [ "timestamp", "yyyy-MM-dd HH:mm:ss.SSS", "ISO8601", "yyyy-MM-dd HH:mm:ss", "dd MMM yyyy HH:mm:ss.SSS" ]
}
}
output: |-
output {
......@@ -217,9 +224,9 @@ logstash:
ssl_enabled => "true"
ssl_verification_mode => "full"
ssl_certificate_authorities => "/usr/share/logstash/config/certs/ca.crt"
data_stream => "true"
data_stream_type => "logs"
data_stream_dataset => "business"
index => "business-logs"
template_name => "business-template"
action => "create"
}
}
else if [fields][logtype] == "logs-sample-wrapper" {
......@@ -230,22 +237,12 @@ logstash:
ssl_enabled => "true"
ssl_verification_mode => "full"
ssl_certificate_authorities => "/usr/share/logstash/config/certs/ca.crt"
data_stream => "true"
data_stream_type => "logs"
data_stream_dataset => "business"
}
}
else if [fields][logtype] == "agents" {
elasticsearch {
hosts => [ "${ELASTIC_ELASTICSEARCH_ES_HOSTS}" ]
user => "${LOGSTASH_USER}"
password => "${LOGSTASH_PASSWORD}"
ssl_enabled => "true"
ssl_verification_mode => "full"
ssl_certificate_authorities => "/usr/share/logstash/config/certs/ca.crt"
data_stream => "true"
data_stream_type => "logs"
data_stream_dataset => "agents"
#data_stream => "true"
#data_stream_type => "logs"
#data_stream_dataset => "business"
index => "business-logs"
template_name => "business-template"
action => "create"
}
}
else {
......@@ -256,9 +253,12 @@ logstash:
ssl_enabled => "true"
ssl_verification_mode => "full"
ssl_certificate_authorities => "/usr/share/logstash/config/certs/ca.crt"
data_stream => "true"
data_stream_type => "logs"
data_stream_dataset => "technical"
#data_stream => "true"
#data_stream_type => "logs"
#data_stream_dataset => "technical"
index => "technical-logs"
template_name => "technical-template"
action => "create"
}
}
#stdout {
......@@ -297,13 +297,11 @@ logstash:
filebeat:
image: docker.elastic.co/beats/filebeat
config: {}
count: 0
count: 1
# name of StorageClass that will be used to create VolumeClaims. (StorageClass must exist)
imageTag: ""
# Total number of the sample messages to generate. Provide negative number to generate infinitely
totalMessages: 604800
# Number of messages per minute. Provide negative number to generate messages without time limit.
messagesPerMinute: 30
cert:
......@@ -363,11 +361,6 @@ filebeat:
filebeat.config.modules:
path: ${path.config}/modules.d/*.yml
reload.enabled: false
# processors:
# - add_kubernetes_metadata:
# in_cluster: true
# logging.level: debug
name: "test"
output: |
output.logstash:
hosts: ["${LOGSTASH_HOSTS}"]
......@@ -384,19 +377,9 @@ filebeat:
password: "${MONITORING_PASSWORD}"
filebeat4agents:
image: docker.elastic.co/beats/filebeat
config: {}
destinationLogstashHost: logstash-beats-ls-api.observability:5044
# name of StorageClass that will be used to create VolumeClaims. (StorageClass must exist)
imageTag: ""
# Total number of the sample messages to generate. Provide negative number to generate infinitely
totalMessages: 604800
# Number of messages per minute. Provide negative number to generate messages without time limit.
messagesPerMinute: 30
cert:
duration: 2160h0m0s # 90d
renewBefore: 360h0m0s # 15d
......@@ -409,19 +392,30 @@ filebeat4agents:
- condition:
or:
- equals:
kubernetes.namespace: "ingress-nginx"
- equals:
kubernetes.namespace: "kube-system"
kubernetes.namespace: "${MONITORED_NAMESPACE}"
config:
- type: container
paths:
- /var/log/containers/*-${data.kubernetes.container.id}.log
multiline:
type: pattern
pattern: '^[0-9]{4}-[0-9]{2}-[0-9]{2}'
negate: true
match: after
- condition:
equals:
kubernetes.container.name: "redis"
config:
- type: container
paths:
- /var/log/containers/*-${data.kubernetes.container.id}.log
multiline:
pattern: '^\d+:\w+\s+\d{2}\s+\w{3}\s+\d{4}'
negate: true
match: after
processors:
- add_cloud_metadata: {}
- add_host_metadata: {}
- add_fields:
fields:
logtype: "agents"
output: |
output.logstash:
hosts: ["${LOGSTASH_HOSTS}"]
......@@ -430,33 +424,45 @@ filebeat4agents:
ssl.verification_mode: full
ssl.certificate: "/usr/share/filebeat/certs/tls.crt"
ssl.key: "/usr/share/filebeat/certs/tls.key"
# monitoring.enabled: "true"
# monitoring.elasticsearch:
# hosts: ["${ELASTIC_ELASTICSEARCH_ES_HOSTS}"]
# ssl.certificate_authorities: ["/usr/share/filebeat/es-certs/ca.crt"]
# username: "${MONITORING_USER}"
# password: "${MONITORING_PASSWORD}"
metricbeat:
ilm:
hot:
max_age: 30d
max_primary_shard_size: 1gb
delete:
min_age: 30d
resources:
requests:
memory: 500Mi
cpu: 300m
limits:
memory: 500Mi
cpu: 300m
#Hostname to receive status_pod metrics
kubeStateHost: kube-state-metrics.kube-state-metrics.svc.cluster.local:8080
#Filter for get metric data from specific services,namespaces
dropEvent:
when:
not:
or:
- equals:
kubernetes.namespace: observability
- equals:
kubernetes.namespace: argo-cd
- equals:
service.type: system
heartbeat:
ilm:
hot:
max_age: 30d
max_primary_shard_size: 100mb
delete:
min_age: 30d
services:
heartbeat.monitors:
- type: tcp
name: Elasticsearch Service
id: elasticsearch:9200
schedule: '@every 5s'
hosts: ["elastic-elasticsearch-es-http.observability.svc:9200"]
- type: tcp
name: Kibana GUI
id: kibana:443
schedule: '@every 5s'
hosts: ["kibana.dev.simpl-europe.eu:443"]
- type: icmp
id: kibana/icmp
name: Kibana ICMP
hosts: ["elastic-kibana-kb-http.observability.svc"]
schedule: '*/5 * * * * * *'
\ No newline at end of file
elasticsearch:
count: 2
resources:
requests:
memory: 4Gi
limits:
memory: 4Gi
cpu: "1"
kibana:
count: 1
filebeat:
count: 0
logstash:
count_beats: 1
count_syslog: 0
PROJECT_VERSION_NUMBER="0.1.0"
\ No newline at end of file
PROJECT_VERSION_NUMBER="0.1.2"
\ No newline at end of file
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Please register or to comment