Code development platform for open source projects from the European Union institutions :large_blue_circle: EU Login authentication by SMS has been phased out. To see alternatives please check here

Skip to content
Snippets Groups Projects
Commit d23f21de authored by Natalia Szakiel's avatar Natalia Szakiel
Browse files

Merge branch 'develop' into 'main'

Develop

See merge request !86
parents 0161068f c820df33
No related branches found
No related tags found
8 merge requests!102[SIMPL-12666](https://jira.simplprogramme.eu/browse/SIMPL-12666) Remove unused fields,!99fixed date parsing for redis logs,!95Feature/apm,!94Feature/0113,!92Feature/logs added,!90Feature/simpl 10278,!88Feature/update manual ab2 to develop,!86Develop
Pipeline #261872 passed
......@@ -2,6 +2,12 @@
All notable changes to this project will be documented in this file.
## [0.1.10] - 2025-01-20
### Changed
- Changed destination of values.yaml file from eck-monitoring/charts/values/dev/observability/values.yaml to eck-monitoring/charts/values.yaml
## [0.1.9] - 2025-01-17
### Added
......
name: eck-monitoring
version: ${PROJECT_RELEASE_VERSION}
appVersion: "${PROJECT_RELEASE_VERSION}"
#version: 0.1.5
#version: 0.1.10
......@@ -2,3 +2,498 @@ image:
repository: "${CI_REGISTRY_IMAGE}"
pullPolicy: IfNotPresent
tag: "${PROJECT_RELEASE_VERSION}"
# version of all elastic applications
elasticVersion: 8.15.1
namespaceTag: "test-namespace"
mainNamespace: observability
# This suffix will be used to create subdomain of following template:
# kibana.NAMESPACE.NAMESPACE_TAG.DOMAIN_SUFFIX
domainSuffix: "dev.simpl-europe.eu"
#ClusterIssuer to generate Kibana SSL front certificate
clusterIssuer: "dev-prod-dns01"
elasticsearch:
image: docker.elastic.co/elasticsearch/elasticsearch
# Elasticsearch's image tag, by default it equals to elasticVersion
imageTag: ""
cert:
duration: 2160h0m0s # 90d
renewBefore: 360h0m0s # 15d
#Usage from cli:
# --set "elasticsearch.env[0].name=VARIABLE_NAME" --set "elasticsearch.env[0].value=VARIABLE_VALUE"
env: []
name: node
count: 3
diskSpace: 60Gi
storageClassName: csi-cinder-high-speed
resources:
requests:
memory: "0"
cpu: "0"
limits:
memory: 4Gi
cpu: "1"
kibana:
count: 1
image: docker.elastic.co/kibana/kibana
#Branch name to donwload dashboards
dashboardsBranch: "main"
# Kibana's image tag, by default it equals to elasticVersion
imageTag: ""
# name of helm release where elasticsearch is installed. If you install kibana together with elasticsearch, leave it empty.
elasticsearchReleaseName: ""
cert:
duration: 2160h0m0s # 90d
renewBefore: 360h0m0s # 15d
# Additional kibana's config according to this link: https://www.elastic.co/guide/en/kibana/current/settings.html
config:
xpack.reporting.roles.enabled: false
resources:
requests:
memory: "0"
cpu: "0"
limits:
memory: 1Gi
cpu: 300m
#Environment variables to set in kibana pod
#Usage from cli:
# --set "kibana.env[0].name=VARIABLE_NAME" --set "kibana.env[0].value=VARIABLE_VALUE"
env: []
logstash:
ilm:
business:
hot:
max_age: 30d
max_primary_shard_size: 1gb
delete:
min_age: 365d
technical:
hot:
max_age: 30d
max_primary_shard_size: 1gb
delete:
min_age: 365d
count_beats: 1
count_syslog: 0
image: docker.elastic.co/logstash/logstash
diskSpace: 3Gi
# name of StorageClass that will be used to create VolumeClaims. (StorageClass must exist)
storageClassName: csi-cinder-high-speed
imageTag: ""
env:
ls_java_opts: "-Xms3g -Xmx3g"
resources:
requests:
memory: "0"
cpu: "0"
limits:
memory: 4Gi
cpu: 300m
cert:
duration: 2160h0m0s # 90d
renewBefore: 360h0m0s # 15d
pipelines_yml_config: |-
- pipeline.id: main
path.config: "/app/elastic/logstash/config/pipelines/*.config"
pipeline.workers: 1
pipeline.batch.size: 125
beats:
pipelines_group_name: "beats"
pipelines:
- name: "beats-pipeline"
input: |-
input {
beats {
port => 5044
ssl_enabled => true
ssl_certificate_authorities => ["/usr/share/logstash/certs-logstash/ca.crt"]
ssl_certificate => "/usr/share/logstash/certs-logstash/tls.crt"
ssl_key => "/usr/share/logstash/certs-logstash/tls.key"
ssl_client_authentication => "required"
}
}
filter: |-
filter {
## removing ELK logs
if [kubernetes][container][name] == "filebeat" or [kubernetes][container][name] == "metricbeat" or [kubernetes][container][name] == "logstash" or [kubernetes][container][name] == "heartbeat" or [kubernetes][container][name] == "kibana" or [kubernetes][container][name] == "elasticsearch" {
drop { }
}
# Technical logs
if [kubernetes][container][name] == "sd-creation-wizard-api" or [kubernetes][container][name] == "signer" or [kubernetes][container][name] == "sd-creation-wizard-api-validation" {
json {
source => "message"
skip_on_invalid_json => true
add_field => { "log_type" => "technical" }
}
}
# Business logs
if [kubernetes][container][name] == "simpl-cloud-gateway" or [kubernetes][container][name] == "tls-gateway" {
json {
source => "message"
skip_on_invalid_json => true
}
if [level] == "BUSINESS" {
mutate { add_field => { "log_type" => "business" } }
ruby {
code => '
if event.get("[message]").is_a?(Hash)
event.set("is_json_message", true)
else
event.set("is_json_message", false)
end
'
}
if [is_json_message] {
if [message][msg] { mutate { add_field => { "msg" => "%{[message][msg]}" } } }
if [message][messageType] { mutate { add_field => { "messageType" => "%{[message][messageType]}" } } }
if [message][businessOperations] { mutate { add_field => { "businessOperations" => "%{[message][businessOperations]}" } } }
if [message][origin] { mutate { add_field => { "origin" => "%{[message][origin]}" } } }
if [message][httpStatus] { mutate { add_field => { "httpStatus" => "%{[message][httpStatus]}" } } }
if [message][destination] { mutate { add_field => { "destination" => "%{[message][destination]}" } } }
if [message][correlationId] { mutate { add_field => { "correlationId" => "%{[message][correlationId]}" } } }
if [message][user] { mutate { add_field => { "user" => "%{[message][user]}" } } }
mutate { remove_field => [ "[message]" ] }
}
}
else {
mutate { add_field => { "log_type" => "technical" } }
}
}
# Onboaring technical logs
if [kubernetes][container][name] == "users-roles" or [kubernetes][container][name] == "identity-provider" or [kubernetes][container][name] == "onboarding" or [kubernetes][container][name] == "security-attributes-provider" or [kubernetes][container][name] == "xsfc-advsearch-be" or [kubernetes][container][name] == "contract-consumption-be-api" {
json {
source => "message"
skip_on_invalid_json => true
add_field => { "log_type" => "technical" }
}
ruby {
code => '
if event.get("[message]").is_a?(Hash)
event.set("is_json_message", true)
else
event.set("is_json_message", false)
end
'
}
if [is_json_message] {
if [message][httpStatus] { mutate { add_field => { "httpStatus" => "%{[message][httpStatus]}" } } }
if [message][msg] { mutate { add_field => { "msg" => "%{[message][msg]}" } } }
if [message][httpRequestSize] { mutate { add_field => { "httpRequestSize" => "%{[message][httpRequestSize]}" } } }
if [message][user] { mutate { add_field => { "user" => "%{[message][user]}" } } }
if [message][httpExecutionTime] { mutate { add_field => { "httpExecutionTime" => "%{[message][httpExecutionTime]}" } } }
mutate { remove_field => [ "[message]" ] }
}
}
# Not JSON logs
if [kubernetes][container][name] == "redis" {
grok {
match => {
"message" => [
'%{NUMBER:process_id}:%{WORD:process_type}%{SPACE}%{MONTHDAY:day}%{SPACE}%{MONTH:month}%{SPACE}%{YEAR:year}%{SPACE}%{TIME:time}\.%{INT:milliseconds}%{SPACE}\*%{SPACE}%{GREEDYDATA:message}'
]
}
overwrite => [ "message" ]
add_field => {
"timestamp" => "%{day} %{month} %{year} %{time}.%{milliseconds}"
"log_type" => "technical"
}
}
}
if [kubernetes][container][name] == "keycloak" {
grok {
match => {
"message" => [
'%{TIMESTAMP_ISO8601:ts}%{SPACE}%{WORD:level}%{SPACE}\[%{JAVACLASS:logger}\]%{SPACE}\(%{DATA:thread}\)%{SPACE}%{GREEDYDATA:message}'
]
}
overwrite => [ "message" ]
add_field => { "log_type" => "technical" }
}
mutate {
gsub => [
"ts", ",", "."
]
}
date {
match => [ "ts", "yyyy-MM-dd HH:mm:ss,SSS", "yyyy-MM-dd HH:mm:ss.SSS"]
target => "@timestamp"
}
}
if [kubernetes][container][name] == "postgresql" {
grok {
match => {
"message" => [
'%{TIMESTAMP_ISO8601:ts}%{SPACE}%{WORD:timezone}%{SPACE}\[%{NUMBER:pid}\]%{SPACE}%{WORD:level}:%{SPACE}%{GREEDYDATA:message}'
]
}
overwrite => [ "message" ]
add_field => { "log_type" => "technical" }
}
date {
match => [ "ts", "yyyy-MM-dd HH:mm:ss.SSS"]
target => "@timestamp"
}
}
if [kubernetes][container][name] == "simpl-edc-container" {
mutate {
gsub => ["message", "\x1B\[[0-9;]*[mK]", ""]
}
grok {
match => {
"message" => [
"%{LOGLEVEL:level} %{TIMESTAMP_ISO8601:timestamp} \[%{DATA:component}\] %{DATA:entity_type} %{UUID:entity_id} is now in state %{DATA:state}",
"%{LOGLEVEL:level} %{TIMESTAMP_ISO8601:timestamp} %{GREEDYDATA:message}",
"%{LOGLEVEL:level} %{TIMESTAMP_ISO8601:timestamp} Error %{GREEDYDATA:message}"
]
}
overwrite => [ "message" ]
add_field => { "log_type" => "technical" }
}
date {
match => ["timestamp", "ISO8601"]
target => "@timestamp"
}
}
date {
match => [ "timestamp", "yyyy-MM-dd HH:mm:ss.SSS", "ISO8601", "yyyy-MM-dd HH:mm:ss", "dd MMM yyyy HH:mm:ss.SSS", "yyyy-MM-dd HH:mm:ss,SSS"]
}
}
output: |-
output {
if [log_type] == "business" {
elasticsearch {
hosts => [ "${ELASTIC_ELASTICSEARCH_ES_HOSTS}" ]
user => "${LOGSTASH_USER}"
password => "${LOGSTASH_PASSWORD}"
ssl_enabled => "true"
ssl_verification_mode => "full"
ssl_certificate_authorities => "/usr/share/logstash/config/certs/ca.crt"
index => "business-logs"
template_name => "business-template"
action => "create"
}
}
else if [log_type] == "technical" {
elasticsearch {
hosts => [ "${ELASTIC_ELASTICSEARCH_ES_HOSTS}" ]
user => "${LOGSTASH_USER}"
password => "${LOGSTASH_PASSWORD}"
ssl_enabled => "true"
ssl_verification_mode => "full"
ssl_certificate_authorities => "/usr/share/logstash/config/certs/ca.crt"
index => "technical-logs"
template_name => "technical-template"
action => "create"
}
}
#stdout {
# codec => json
#}
}
syslog:
pipelines_group_name: "syslog"
pipelines:
- name: "syslog-pipeline"
input: |-
input {
syslog {
port => 514
}
}
filter: |-
filter {
}
output: |-
output {
elasticsearch {
hosts => [ "${ELASTIC_ELASTICSEARCH_ES_HOSTS}" ]
index => "%{[@metadata][beat]}-%{[@metadata][version]}"
user => "${LOGSTASH_USER}"
password => "${LOGSTASH_PASSWORD}"
ssl_enabled => "true"
ssl_verification_mode => "full"
ssl_certificate_authorities => "${ELASTIC_ELASTICSEARCH_ES_SSL_CERTIFICATE_AUTHORITY}"
}
stdout {
codec => rubydebug
}
}
filebeat:
image: docker.elastic.co/beats/filebeat
imageTag: ""
resources:
requests:
memory: "0"
cpu: "0"
limits:
memory: 1Gi
cpu: 100m
cert:
duration: 2160h0m0s # 90d
renewBefore: 360h0m0s # 15d
# Filebeat configuration file - input
input: |
filebeat.autodiscover:
providers:
- type: kubernetes
# Filter logs only from the monitored namespace
namespace: "${MONITORED_NAMESPACE}"
templates:
# Condition for redis container in the monitored namespace
- condition:
equals:
kubernetes.container.name: "redis"
config:
- type: container
paths:
- /var/log/containers/*-${data.kubernetes.container.id}.log
multiline:
pattern: '^\d+:\w+\s+\d{2}\s+\w{3}\s+\d{4}'
negate: true
match: after
# Condition for json structured logs
- condition:
or:
# Business logs
- equals:
kubernetes.container.name: "simpl-cloud-gateway"
- equals:
kubernetes.container.name: "tls-gateway"
# Onboarding technical logs containers
- equals:
kubernetes.container.name: "identity-provider"
- equals:
kubernetes.container.name: "onboarding"
- equals:
kubernetes.container.name: "security-attributes-provider"
- equals:
kubernetes.container.name: "users-roles"
# Logs not specified yet
- equals:
kubernetes.container.name: "signer"
- equals:
kubernetes.container.name: "sd-creation-wizard-api"
- equals:
kubernetes.container.name: "sd-creation-wizard-api-validation"
- equals:
kubernetes.container.name: "xsfc-advsearch-be"
- equals:
kubernetes.container.name: "contract-consumption-be-api"
config:
- type: container
paths:
- /var/log/containers/*-${data.kubernetes.container.id}.log
- condition:
or:
# External apps logs
- equals:
kubernetes.container.name: "keycloak"
- equals:
kubernetes.container.name: "postgresql"
config:
- type: container
paths:
- /var/log/containers/*-${data.kubernetes.container.id}.log
multiline:
type: pattern
pattern: '^[0-9]{4}-[0-9]{2}-[0-9]{2}'
negate: true
match: after
- condition:
or:
# EDC logs
- equals:
kubernetes.container.name: "simpl-edc-container"
config:
- type: container
paths:
- /var/log/containers/*-${data.kubernetes.container.id}.log
multiline:
type: pattern
pattern: '^\x1B\[[0-9;]*[mK]*(DEBUG|INFO|WARNING|ERROR|SEVERE)'
negate: true
match: after
processors:
# Add cloud and host metadata
- add_cloud_metadata: {}
- add_host_metadata: {}
output: |
output.logstash:
hosts: ["${LOGSTASH_HOSTS}"]
ssl.enabled: true
ssl.certificate_authorities: ["/usr/share/filebeat/certs/ca.crt"]
ssl.verification_mode: full
ssl.certificate: "/usr/share/filebeat/certs/tls.crt"
ssl.key: "/usr/share/filebeat/certs/tls.key"
metricbeat:
ilm:
hot:
max_age: 30d
max_primary_shard_size: 50gb
delete:
min_age: 365d
resources:
requests:
memory: "0"
cpu: "0"
limits:
memory: 500Mi
cpu: 300m
#Hostname to receive status_pod metrics
kubeStateHost: kube-state-metrics.kube-state-metrics.svc.cluster.local:8080
heartbeat:
ilm:
hot:
max_age: 30d
max_primary_shard_size: 50gb
delete:
min_age: 365d
services:
heartbeat.monitors:
- type: tcp
name: Elasticsearch Service
id: elasticsearch:9200
schedule: '@every 5s'
hosts: ["elastic-elasticsearch-es-http.observability.svc:9200"]
- type: tcp
name: Kibana GUI
id: kibana:443
schedule: '@every 5s'
hosts: ["kibana.dev.simpl-europe.eu:443"]
- type: icmp
id: kibana/icmp
name: Kibana ICMP
hosts: ["elastic-kibana-kb-http.observability.svc"]
schedule: '*/5 * * * * * *'
\ No newline at end of file
# version of all elastic applications
elasticVersion: 8.15.1
namespaceTag: "test-namespace"
mainNamespace: observability
# This suffix will be used to create subdomain of following template:
# kibana.NAMESPACE.NAMESPACE_TAG.DOMAIN_SUFFIX
domainSuffix: "dev.simpl-europe.eu"
#ClusterIssuer to generate Kibana SSL front certificate
clusterIssuer: "dev-prod-dns01"
elasticsearch:
image: docker.elastic.co/elasticsearch/elasticsearch
# Elasticsearch's image tag, by default it equals to elasticVersion
imageTag: ""
cert:
duration: 2160h0m0s # 90d
renewBefore: 360h0m0s # 15d
#Usage from cli:
# --set "elasticsearch.env[0].name=VARIABLE_NAME" --set "elasticsearch.env[0].value=VARIABLE_VALUE"
env: []
name: node
count: 3
diskSpace: 60Gi
storageClassName: csi-cinder-high-speed
resources:
requests:
memory: "0"
cpu: "0"
limits:
memory: 4Gi
cpu: "1"
kibana:
count: 1
image: docker.elastic.co/kibana/kibana
#Branch name to donwload dashboards
dashboardsBranch: "main"
# Kibana's image tag, by default it equals to elasticVersion
imageTag: ""
# name of helm release where elasticsearch is installed. If you install kibana together with elasticsearch, leave it empty.
elasticsearchReleaseName: ""
cert:
duration: 2160h0m0s # 90d
renewBefore: 360h0m0s # 15d
# Additional kibana's config according to this link: https://www.elastic.co/guide/en/kibana/current/settings.html
config:
xpack.reporting.roles.enabled: false
resources:
requests:
memory: "0"
cpu: "0"
limits:
memory: 1Gi
cpu: 300m
#Environment variables to set in kibana pod
#Usage from cli:
# --set "kibana.env[0].name=VARIABLE_NAME" --set "kibana.env[0].value=VARIABLE_VALUE"
env: []
logstash:
ilm:
business:
hot:
max_age: 30d
max_primary_shard_size: 1gb
delete:
min_age: 365d
technical:
hot:
max_age: 30d
max_primary_shard_size: 1gb
delete:
min_age: 365d
count_beats: 1
count_syslog: 0
image: docker.elastic.co/logstash/logstash
diskSpace: 3Gi
# name of StorageClass that will be used to create VolumeClaims. (StorageClass must exist)
storageClassName: csi-cinder-high-speed
imageTag: ""
env:
ls_java_opts: "-Xms3g -Xmx3g"
resources:
requests:
memory: "0"
cpu: "0"
limits:
memory: 4Gi
cpu: 300m
cert:
duration: 2160h0m0s # 90d
renewBefore: 360h0m0s # 15d
pipelines_yml_config: |-
- pipeline.id: main
path.config: "/app/elastic/logstash/config/pipelines/*.config"
pipeline.workers: 1
pipeline.batch.size: 125
beats:
pipelines_group_name: "beats"
pipelines:
- name: "beats-pipeline"
input: |-
input {
beats {
port => 5044
ssl_enabled => true
ssl_certificate_authorities => ["/usr/share/logstash/certs-logstash/ca.crt"]
ssl_certificate => "/usr/share/logstash/certs-logstash/tls.crt"
ssl_key => "/usr/share/logstash/certs-logstash/tls.key"
ssl_client_authentication => "required"
}
}
filter: |-
filter {
## removing ELK logs
if [kubernetes][container][name] == "filebeat" or [kubernetes][container][name] == "metricbeat" or [kubernetes][container][name] == "logstash" or [kubernetes][container][name] == "heartbeat" or [kubernetes][container][name] == "kibana" or [kubernetes][container][name] == "elasticsearch" {
drop { }
}
# Technical logs
if [kubernetes][container][name] == "sd-creation-wizard-api" or [kubernetes][container][name] == "signer" or [kubernetes][container][name] == "sd-creation-wizard-api-validation" {
json {
source => "message"
skip_on_invalid_json => true
add_field => { "log_type" => "technical" }
}
}
# Business logs
if [kubernetes][container][name] == "simpl-cloud-gateway" or [kubernetes][container][name] == "tls-gateway" {
json {
source => "message"
skip_on_invalid_json => true
}
if [level] == "BUSINESS" {
mutate { add_field => { "log_type" => "business" } }
ruby {
code => '
if event.get("[message]").is_a?(Hash)
event.set("is_json_message", true)
else
event.set("is_json_message", false)
end
'
}
if [is_json_message] {
if [message][msg] { mutate { add_field => { "msg" => "%{[message][msg]}" } } }
if [message][messageType] { mutate { add_field => { "messageType" => "%{[message][messageType]}" } } }
if [message][businessOperations] { mutate { add_field => { "businessOperations" => "%{[message][businessOperations]}" } } }
if [message][origin] { mutate { add_field => { "origin" => "%{[message][origin]}" } } }
if [message][httpStatus] { mutate { add_field => { "httpStatus" => "%{[message][httpStatus]}" } } }
if [message][destination] { mutate { add_field => { "destination" => "%{[message][destination]}" } } }
if [message][correlationId] { mutate { add_field => { "correlationId" => "%{[message][correlationId]}" } } }
if [message][user] { mutate { add_field => { "user" => "%{[message][user]}" } } }
mutate { remove_field => [ "[message]" ] }
}
}
else {
mutate { add_field => { "log_type" => "technical" } }
}
}
# Onboaring technical logs
if [kubernetes][container][name] == "users-roles" or [kubernetes][container][name] == "identity-provider" or [kubernetes][container][name] == "onboarding" or [kubernetes][container][name] == "security-attributes-provider" or [kubernetes][container][name] == "xsfc-advsearch-be" or [kubernetes][container][name] == "contract-consumption-be-api" {
json {
source => "message"
skip_on_invalid_json => true
add_field => { "log_type" => "technical" }
}
ruby {
code => '
if event.get("[message]").is_a?(Hash)
event.set("is_json_message", true)
else
event.set("is_json_message", false)
end
'
}
if [is_json_message] {
if [message][httpStatus] { mutate { add_field => { "httpStatus" => "%{[message][httpStatus]}" } } }
if [message][msg] { mutate { add_field => { "msg" => "%{[message][msg]}" } } }
if [message][httpRequestSize] { mutate { add_field => { "httpRequestSize" => "%{[message][httpRequestSize]}" } } }
if [message][user] { mutate { add_field => { "user" => "%{[message][user]}" } } }
if [message][httpExecutionTime] { mutate { add_field => { "httpExecutionTime" => "%{[message][httpExecutionTime]}" } } }
mutate { remove_field => [ "[message]" ] }
}
}
# Not JSON logs
if [kubernetes][container][name] == "redis" {
grok {
match => {
"message" => [
'%{NUMBER:process_id}:%{WORD:process_type}%{SPACE}%{MONTHDAY:day}%{SPACE}%{MONTH:month}%{SPACE}%{YEAR:year}%{SPACE}%{TIME:time}\.%{INT:milliseconds}%{SPACE}\*%{SPACE}%{GREEDYDATA:message}'
]
}
overwrite => [ "message" ]
add_field => {
"timestamp" => "%{day} %{month} %{year} %{time}.%{milliseconds}"
"log_type" => "technical"
}
}
}
if [kubernetes][container][name] == "keycloak" {
grok {
match => {
"message" => [
'%{TIMESTAMP_ISO8601:ts}%{SPACE}%{WORD:level}%{SPACE}\[%{JAVACLASS:logger}\]%{SPACE}\(%{DATA:thread}\)%{SPACE}%{GREEDYDATA:message}'
]
}
overwrite => [ "message" ]
add_field => { "log_type" => "technical" }
}
mutate {
gsub => [
"ts", ",", "."
]
}
date {
match => [ "ts", "yyyy-MM-dd HH:mm:ss,SSS", "yyyy-MM-dd HH:mm:ss.SSS"]
target => "@timestamp"
}
}
if [kubernetes][container][name] == "postgresql" {
grok {
match => {
"message" => [
'%{TIMESTAMP_ISO8601:ts}%{SPACE}%{WORD:timezone}%{SPACE}\[%{NUMBER:pid}\]%{SPACE}%{WORD:level}:%{SPACE}%{GREEDYDATA:message}'
]
}
overwrite => [ "message" ]
add_field => { "log_type" => "technical" }
}
date {
match => [ "ts", "yyyy-MM-dd HH:mm:ss.SSS"]
target => "@timestamp"
}
}
if [kubernetes][container][name] == "simpl-edc-container" {
mutate {
gsub => ["message", "\x1B\[[0-9;]*[mK]", ""]
}
grok {
match => {
"message" => [
"%{LOGLEVEL:level} %{TIMESTAMP_ISO8601:timestamp} \[%{DATA:component}\] %{DATA:entity_type} %{UUID:entity_id} is now in state %{DATA:state}",
"%{LOGLEVEL:level} %{TIMESTAMP_ISO8601:timestamp} %{GREEDYDATA:message}",
"%{LOGLEVEL:level} %{TIMESTAMP_ISO8601:timestamp} Error %{GREEDYDATA:message}"
]
}
overwrite => [ "message" ]
add_field => { "log_type" => "technical" }
}
date {
match => ["timestamp", "ISO8601"]
target => "@timestamp"
}
}
date {
match => [ "timestamp", "yyyy-MM-dd HH:mm:ss.SSS", "ISO8601", "yyyy-MM-dd HH:mm:ss", "dd MMM yyyy HH:mm:ss.SSS", "yyyy-MM-dd HH:mm:ss,SSS"]
}
}
output: |-
output {
if [log_type] == "business" {
elasticsearch {
hosts => [ "${ELASTIC_ELASTICSEARCH_ES_HOSTS}" ]
user => "${LOGSTASH_USER}"
password => "${LOGSTASH_PASSWORD}"
ssl_enabled => "true"
ssl_verification_mode => "full"
ssl_certificate_authorities => "/usr/share/logstash/config/certs/ca.crt"
index => "business-logs"
template_name => "business-template"
action => "create"
}
}
else if [log_type] == "technical" {
elasticsearch {
hosts => [ "${ELASTIC_ELASTICSEARCH_ES_HOSTS}" ]
user => "${LOGSTASH_USER}"
password => "${LOGSTASH_PASSWORD}"
ssl_enabled => "true"
ssl_verification_mode => "full"
ssl_certificate_authorities => "/usr/share/logstash/config/certs/ca.crt"
index => "technical-logs"
template_name => "technical-template"
action => "create"
}
}
#stdout {
# codec => json
#}
}
syslog:
pipelines_group_name: "syslog"
pipelines:
- name: "syslog-pipeline"
input: |-
input {
syslog {
port => 514
}
}
filter: |-
filter {
}
output: |-
output {
elasticsearch {
hosts => [ "${ELASTIC_ELASTICSEARCH_ES_HOSTS}" ]
index => "%{[@metadata][beat]}-%{[@metadata][version]}"
user => "${LOGSTASH_USER}"
password => "${LOGSTASH_PASSWORD}"
ssl_enabled => "true"
ssl_verification_mode => "full"
ssl_certificate_authorities => "${ELASTIC_ELASTICSEARCH_ES_SSL_CERTIFICATE_AUTHORITY}"
}
stdout {
codec => rubydebug
}
}
filebeat:
image: docker.elastic.co/beats/filebeat
imageTag: ""
resources:
requests:
memory: "0"
cpu: "0"
limits:
memory: 1Gi
cpu: 100m
cert:
duration: 2160h0m0s # 90d
renewBefore: 360h0m0s # 15d
# Filebeat configuration file - input
input: |
filebeat.autodiscover:
providers:
- type: kubernetes
# Filter logs only from the monitored namespace
namespace: "${MONITORED_NAMESPACE}"
templates:
# Condition for redis container in the monitored namespace
- condition:
equals:
kubernetes.container.name: "redis"
config:
- type: container
paths:
- /var/log/containers/*-${data.kubernetes.container.id}.log
multiline:
pattern: '^\d+:\w+\s+\d{2}\s+\w{3}\s+\d{4}'
negate: true
match: after
# Condition for json structured logs
- condition:
or:
# Business logs
- equals:
kubernetes.container.name: "simpl-cloud-gateway"
- equals:
kubernetes.container.name: "tls-gateway"
# Onboarding technical logs containers
- equals:
kubernetes.container.name: "identity-provider"
- equals:
kubernetes.container.name: "onboarding"
- equals:
kubernetes.container.name: "security-attributes-provider"
- equals:
kubernetes.container.name: "users-roles"
# Logs not specified yet
- equals:
kubernetes.container.name: "signer"
- equals:
kubernetes.container.name: "sd-creation-wizard-api"
- equals:
kubernetes.container.name: "sd-creation-wizard-api-validation"
- equals:
kubernetes.container.name: "xsfc-advsearch-be"
- equals:
kubernetes.container.name: "contract-consumption-be-api"
config:
- type: container
paths:
- /var/log/containers/*-${data.kubernetes.container.id}.log
- condition:
or:
# External apps logs
- equals:
kubernetes.container.name: "keycloak"
- equals:
kubernetes.container.name: "postgresql"
config:
- type: container
paths:
- /var/log/containers/*-${data.kubernetes.container.id}.log
multiline:
type: pattern
pattern: '^[0-9]{4}-[0-9]{2}-[0-9]{2}'
negate: true
match: after
- condition:
or:
# EDC logs
- equals:
kubernetes.container.name: "simpl-edc-container"
config:
- type: container
paths:
- /var/log/containers/*-${data.kubernetes.container.id}.log
multiline:
type: pattern
pattern: '^\x1B\[[0-9;]*[mK]*(DEBUG|INFO|WARNING|ERROR|SEVERE)'
negate: true
match: after
processors:
# Add cloud and host metadata
- add_cloud_metadata: {}
- add_host_metadata: {}
output: |
output.logstash:
hosts: ["${LOGSTASH_HOSTS}"]
ssl.enabled: true
ssl.certificate_authorities: ["/usr/share/filebeat/certs/ca.crt"]
ssl.verification_mode: full
ssl.certificate: "/usr/share/filebeat/certs/tls.crt"
ssl.key: "/usr/share/filebeat/certs/tls.key"
metricbeat:
ilm:
hot:
max_age: 30d
max_primary_shard_size: 50gb
delete:
min_age: 365d
resources:
requests:
memory: "0"
cpu: "0"
limits:
memory: 500Mi
cpu: 300m
#Hostname to receive status_pod metrics
kubeStateHost: kube-state-metrics.kube-state-metrics.svc.cluster.local:8080
heartbeat:
ilm:
hot:
max_age: 30d
max_primary_shard_size: 50gb
delete:
min_age: 365d
services:
heartbeat.monitors:
- type: tcp
name: Elasticsearch Service
id: elasticsearch:9200
schedule: '@every 5s'
hosts: ["elastic-elasticsearch-es-http.observability.svc:9200"]
- type: tcp
name: Kibana GUI
id: kibana:443
schedule: '@every 5s'
hosts: ["kibana.dev.simpl-europe.eu:443"]
- type: icmp
id: kibana/icmp
name: Kibana ICMP
hosts: ["elastic-kibana-kb-http.observability.svc"]
schedule: '*/5 * * * * * *'
\ No newline at end of file
PROJECT_VERSION_NUMBER="0.1.9"
PROJECT_VERSION_NUMBER="0.1.10"
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Please register or to comment