From be34c53ed50392fa3342e17cfb33504e166805ac Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Pascal=20Pfl=C3=BCger?= <pascal.pflueger@t-systems.com>
Date: Mon, 24 Jun 2024 10:39:46 +0200
Subject: [PATCH 1/4] add CODEOWNERS

---
 .gitlab/CODEOWNERS | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/.gitlab/CODEOWNERS b/.gitlab/CODEOWNERS
index 8cc70c3..4b9b26a 100644
--- a/.gitlab/CODEOWNERS
+++ b/.gitlab/CODEOWNERS
@@ -1 +1 @@
-* @simpl/simpl-open/development/monitoring @n00bagqb
+* @simpl/simpl-open/development/monitoring @n00bagqb
\ No newline at end of file
-- 
GitLab


From 53e32566817dccf2ad499923dfcd8663065d1598 Mon Sep 17 00:00:00 2001
From: Albert Brzozowski <albert.brzozowski.external@atos.net>
Date: Thu, 21 Nov 2024 11:54:44 +0100
Subject: [PATCH 2/4] Feature/logstash adjust

---
 .gitlab/CODEOWNERS                          |   2 +-
 charts/Chart.yaml                           |   2 +-
 charts/templates/kibana.yaml                |   2 +-
 charts/values/dev/observability/values.yaml | 173 ++++++++++----------
 pipeline.variables.sh                       |   2 +-
 5 files changed, 87 insertions(+), 94 deletions(-)

diff --git a/.gitlab/CODEOWNERS b/.gitlab/CODEOWNERS
index 8cc70c3..4b9b26a 100644
--- a/.gitlab/CODEOWNERS
+++ b/.gitlab/CODEOWNERS
@@ -1 +1 @@
-* @simpl/simpl-open/development/monitoring @n00bagqb
+* @simpl/simpl-open/development/monitoring @n00bagqb
\ No newline at end of file
diff --git a/charts/Chart.yaml b/charts/Chart.yaml
index f21aa4e..dc19d20 100644
--- a/charts/Chart.yaml
+++ b/charts/Chart.yaml
@@ -1,6 +1,6 @@
 name: eck-monitoring
 version: ${PROJECT_RELEASE_VERSION}
 appVersion: "${PROJECT_RELEASE_VERSION}"
-#version: 0.1.0
+#version: 0.1.3
 
 
diff --git a/charts/templates/kibana.yaml b/charts/templates/kibana.yaml
index 1de015d..77a6bf0 100644
--- a/charts/templates/kibana.yaml
+++ b/charts/templates/kibana.yaml
@@ -74,7 +74,7 @@ spec:
   http:
     tls:
       certificate:
-        secretName: {{ .Release.Name }}-kibana-cert-secret
+        secretName: {{ .Release.Name }}-kibana-ssl
 ---
 apiVersion: networking.k8s.io/v1
 kind: Ingress
diff --git a/charts/values/dev/observability/values.yaml b/charts/values/dev/observability/values.yaml
index 1114e68..df345e9 100644
--- a/charts/values/dev/observability/values.yaml
+++ b/charts/values/dev/observability/values.yaml
@@ -28,6 +28,7 @@ elasticsearch:
   resources:
     requests:
       memory: 4Gi
+      cpu: 300m
     limits:
       memory: 4Gi
       cpu: "1"
@@ -36,7 +37,7 @@ kibana:
   count: 1
   image: docker.elastic.co/kibana/kibana
   #Branch name to donwload dashboards
-  dashboardsBranch: "develop"
+  dashboardsBranch: "main"
   # Kibana's image tag, by default it equals to elasticVersion
   imageTag: ""
   # name of helm release where elasticsearch is installed. If you install kibana together with elasticsearch, leave it empty.
@@ -110,17 +111,48 @@ logstash:
         }
       filter: |-
         filter {
-          if [kubernetes][container][name] == "ejbca-community-helm" {
-            grok {
-              match => { 
-                "message" => [
-                  '%{TIMESTAMP_ISO8601:timestamp}%{SPACE}%{WORD:loglevel}%{SPACE}\[%{JAVACLASS:logger}\]%{SPACE}\(%{DATA:thread}\)%{SPACE}%{GREEDYDATA:message}', 
-                  '%{TIMESTAMP_ISO8601:timestamp}%{SPACE}%{WORD:loglevel}%{SPACE}\[%{PATH:path}\]%{SPACE}\(%{DATA:thread}\)%{SPACE}%{GREEDYDATA:message}'
-                  ] 
-              }
-              overwrite => [ "message" ]
+          ## removing ELK logs
+          if [kubernetes][container][name] == "filebeat" or [kubernetes][container][name] == "metricbeat" or [kubernetes][container][name] == "logstash" or [kubernetes][container][name] == "heartbeat"  or [kubernetes][container][name] == "kibana" or [kubernetes][container][name] == "elasticsearch" {
+            drop { }
+          }    
+          
+          if [kubernetes][container][name] == "sd-creation-wizard-api" or [kubernetes][container][name] == "signer" 	or [kubernetes][container][name] == "sd-creation-wizard-api-validation" or [kubernetes][container][name] == "simpl-cloud-gateway"  {
+            json {
+                    source => "message"
+                    skip_on_invalid_json => true
+                }
+          }
+              
+          if [kubernetes][container][name] == "users-roles" {
+
+            json {
+                    source => "message"
+                    skip_on_invalid_json => true
+                }
+
+
+            ruby {
+                code => '
+                    if event.get("[message]").is_a?(Hash)
+                        event.set("is_json_message", true)
+                    else
+                        event.set("is_json_message", false)
+                    end
+                '
             }
+              
+            if [is_json_message] {
+              if [message][httpStatus] { mutate { add_field => { "httpStatus" => "%{[message][httpStatus]}" } } }
+              if [message][msg] { mutate { add_field => { "msg" => "%{[message][msg]}" } } }
+              if [message][httpRequestSize] { mutate { add_field => { "httpRequestSize" => "%{[message][httpRequestSize]}" } } }
+              if [message][user] { mutate { add_field => { "user" => "%{[message][user]}" } } }
+              if [message][httpExecutionTime] { mutate { add_field => { "httpExecutionTime" => "%{[message][httpExecutionTime]}" } } }
+              
+              mutate { remove_field => [ "[message]" ] }
+              
+            } 
           }
+
           if [kubernetes][container][name] == "keycloak" {
             grok {
               match => { 
@@ -131,21 +163,7 @@ logstash:
               overwrite => [ "message" ]
             }
           }
-          if [kubernetes][container][name] == "onboarding" {
-            grok {
-              pattern_definitions => { "JAVA" => "[0-9A-Za-z\[\]\.\$]*" }
-              match => { 
-                "message" => [
-                  '%{TIMESTAMP_ISO8601:timestamp}%{SPACE}%{WORD:loglevel}%{SPACE}\[%{JAVACLASS:logger}\]%{SPACE}\(%{DATA:thread}\)%{SPACE}%{GREEDYDATA:message}', 
-                  '%{TIMESTAMP_ISO8601:timestamp}%{SPACE}%{WORD:loglevel}%{SPACE}\[%{PATH:path}\]%{SPACE}\(%{DATA:thread}\)%{SPACE}%{GREEDYDATA:message}',
-                  '%{TIMESTAMP_ISO8601:timestamp}%{SPACE}%{LOGLEVEL:loglevel}%{SPACE}%{NUMBER:pid}%{SPACE}---%{SPACE}\[%{DATA:thread}\]%{SPACE}%{JAVACLASS:logger}%{SPACE}:%{SPACE}\[%{DATA:request_id}\]%{SPACE}HTTP%{SPACE}%{WORD:http_method}%{SPACE}"%{DATA:uri}"',
-                  '%{TIMESTAMP_ISO8601:timestamp}%{SPACE}%{LOGLEVEL:loglevel}%{SPACE}%{NUMBER:pid}%{SPACE}---%{SPACE}\[%{DATA:thread}\]%{SPACE}%{JAVA:logger}%{SPACE}:%{SPACE}%{GREEDYDATA:message}',
-                  '%{TIMESTAMP_ISO8601:timestamp}%{SPACE}%{LOGLEVEL:loglevel}%{SPACE}%{NUMBER:pid}%{SPACE}---%{SPACE}\[%{DATA:thread}\]%{SPACE}%{DATA:logger}%{SPACE}:%{SPACE}\[%{DATA:request_id}\]%{SPACE}%{GREEDYDATA:message}'
-                ] 
-              }
-              overwrite => [ "message" ]
-            }
-          }
+
           if [kubernetes][container][name] == "postgresql" {
             grok {
               match => { 
@@ -156,67 +174,17 @@ logstash:
               overwrite => [ "message" ]
             }
           }
-          if [kubernetes][container][name] == "vault" or [kubernetes][container][name] == "vault-agent-init" or [kubernetes][container][name] == "sidecar-injector" {
-            grok {
-              match => { 
-                "message" => [
-                    '%{TIMESTAMP_ISO8601:timestamp}%{SPACE}\[%{LOGLEVEL:loglevel}\]%{SPACE}%{DATA:handler}:%{SPACE}%{GREEDYDATA:message}' 
-                   
-
-                ]
-              }
-              overwrite => [ "message" ]
-            }
-          }
-          if [kubernetes][container][name] == "simpl-cloud-gateway" or [kubernetes][container][name] == "users-roles" {
-            grok {
-              match => { 
-                "message" => [
-                    '%{TIMESTAMP_ISO8601:timestamp}%{SPACE}%{LOGLEVEL:loglevel}%{SPACE}%{NUMBER:pid}%{SPACE}---%{SPACE}\[%{DATA:thread}\]%{SPACE}%{JAVACLASS:logger}%{SPACE}:%{SPACE}%{GREEDYDATA:message}' 
-                ]
-              }
-              overwrite => [ "message" ]
-            }
-          }
-          if [kubernetes][container][name] == "neo4j" {
-            grok {
-              match => { 
-                "message" => [
-                    '%{TIMESTAMP_ISO8601:timestamp}%{SPACE}%{LOGLEVEL:loglevel}%{SPACE}%{GREEDYDATA:message}' 
-                ]
-              }
-              overwrite => [ "message" ]
-            }
-          }
-          if [kubernetes][container][name] == "redis" {  
-            grok {
-              match => { 
-                "message" => [
-                     '%{NUMBER:process_id}:%{WORD:process_type}%{SPACE}%{MONTHDAY:day}%{SPACE}%{MONTH:month}%{SPACE}%{YEAR:year}%{SPACE}%{TIME:time}\.%{INT:milliseconds}%{SPACE}\*%{SPACE}%{GREEDYDATA:message}'
-                  ]
-                }
-              overwrite => [ "message" ]
-              add_field => {
-                "timestamp" => "%{day} %{month} %{year} %{time}.%{milliseconds}"
-              }
-            }
             
-            
-          }
-
-           if [fields][logtype] == "logs-sample-business" {
-            grok {
-              match => { "message" => '%{TIMESTAMP_ISO8601:timestamp}\|%{WORD:origin}\|%{WORD:destination}\|%{WORD:business_operation}\|%{DATA:message_type}\|%{WORD:correlation_id}' }
-            }
-          }  
-          
-            date {
-              match => [ "timestamp", "yyyy-MM-dd HH:mm:ss.SSS", "ISO8601", "yyyy-MM-dd HH:mm:ss", "dd MMM yyyy HH:mm:ss.SSS" ]
-            } 
+          date {
+            match => [ "timestamp", "yyyy-MM-dd HH:mm:ss.SSS", "ISO8601", "yyyy-MM-dd HH:mm:ss", "dd MMM yyyy HH:mm:ss.SSS"]
+          } 
+          date {
+            match => [ "ts", "yyyy-MM-dd HH:mm:ss.SSS", "ISO8601", "yyyy-MM-dd HH:mm:ss", "dd MMM yyyy HH:mm:ss.SSS"]
+          } 
         }
       output: |-
         output {
-          if [fields][logtype] == "logs-sample-business" {
+          if [kubernetes][container][name] == "simpl-cloud-gateway" {
             elasticsearch {
             hosts => [ "${ELASTIC_ELASTICSEARCH_ES_HOSTS}" ]
             user => "${LOGSTASH_USER}"
@@ -388,34 +356,59 @@ filebeat4agents:
     filebeat.autodiscover:
       providers:
         - type: kubernetes
+          # Filter logs only from the monitored namespace
+          namespace: "${MONITORED_NAMESPACE}"
           templates:
+            # Condition for redis container in the monitored namespace
             - condition:
-                or: 
-                  - equals:
-                      kubernetes.namespace: "${MONITORED_NAMESPACE}"
+                equals:
+                  kubernetes.container.name: "redis"
               config:
                 - type: container
                   paths:
                     - /var/log/containers/*-${data.kubernetes.container.id}.log
                   multiline:
-                    type: pattern
-                    pattern: '^[0-9]{4}-[0-9]{2}-[0-9]{2}'
+                    pattern: '^\d+:\w+\s+\d{2}\s+\w{3}\s+\d{4}'
                     negate: true
                     match: after
+            # Condition for json structured logs
             - condition:
-                equals:
-                  kubernetes.container.name: "redis"  
+                or:
+                  - equals:
+                      kubernetes.container.name: "users-roles"
+                  - equals:
+                      kubernetes.container.name: "signer"
+                  - equals:
+                      kubernetes.container.name: "sd-creation-wizard-api"
+                  - equals:
+                      kubernetes.container.name: "sd-creation-wizard-api-validation"
+                  - equals:
+                      kubernetes.container.name: "simpl-cloud-gateway"
+              config:
+                - type: container
+                  paths:
+                    - /var/log/containers/*-${data.kubernetes.container.id}.log
+            # Condition for plain text logs
+            - condition:
+                or:
+                  - equals:
+                      kubernetes.container.name: "keycloak"
+                  - equals:
+                      kubernetes.container.name: "postgresql"
               config:
                 - type: container
                   paths:
                     - /var/log/containers/*-${data.kubernetes.container.id}.log
                   multiline:
-                    pattern: '^\d+:\w+\s+\d{2}\s+\w{3}\s+\d{4}'  
+                    type: pattern
+                    pattern: '^[0-9]{4}-[0-9]{2}-[0-9]{2}'
                     negate: true
                     match: after
     processors:
+      # Add cloud and host metadata
       - add_cloud_metadata: {}
       - add_host_metadata: {}
+
   output: |
     output.logstash:
       hosts: ["${LOGSTASH_HOSTS}"]
diff --git a/pipeline.variables.sh b/pipeline.variables.sh
index 3564d00..322e00a 100644
--- a/pipeline.variables.sh
+++ b/pipeline.variables.sh
@@ -1 +1 @@
-PROJECT_VERSION_NUMBER="0.1.2"
\ No newline at end of file
+PROJECT_VERSION_NUMBER="0.1.3"
\ No newline at end of file
-- 
GitLab


From 270d3bddd6f12b65a19530378062c2dd1d38086b Mon Sep 17 00:00:00 2001
From: Albert Brzozowski <albert.brzozowski.external@atos.net>
Date: Fri, 29 Nov 2024 15:13:08 +0100
Subject: [PATCH 3/4] Feature/logstash adjust

---
 README.md                                   | 157 +++++++++++++++++++-
 charts/Chart.yaml                           |   2 +-
 charts/templates/filebeat-agents.yaml       |   4 +
 charts/templates/heartbeat.yaml             |   1 +
 charts/templates/logstash_beats.yaml        |   2 +-
 charts/templates/metricbeat.yaml            |   1 +
 charts/values/dev/observability/values.yaml | 135 ++++++++---------
 pipeline.variables.sh                       |   2 +-
 8 files changed, 222 insertions(+), 82 deletions(-)

diff --git a/README.md b/README.md
index f378c41..90be894 100644
--- a/README.md
+++ b/README.md
@@ -14,6 +14,159 @@ Loading dashboards
     -   next press import
 
 
-Variables which must be changed/verified from values.yml during instalation on other kubernetes cluster:
-    -   metricbeat.dropEvent - set correct namespace. ( system is mandatory )
+=========================================================================================================
+=                                                                                                       =
+=                                   Performance parameters                                              =
+=                                                                                                       =
+=========================================================================================================
+
+===================================== Logstash performance parameters ==================================
+
+In file values/dev/observability/values.yml are following logstash performance parameters:
+
+-   logstash.env.ls_java_opts: "-Xms3g -Xmx3g"
+    Set heap memory for logstash process inside container.
+    Logstash statefulsets restart is required. 
+
+-   logstash.resources.requests.memory: 4Gi
+    logstash.resources.limits.memory: 4Gi
+    Set memory allocation (request/limit) for logstash pod.
+
+-   logstash.resources.requests.cpu:  300m
+    logstash.resources.limits.cpu: 300m
+    Set CPU allocation (request/limit) for logstash pod.
+
+-   pipelines_yml_config.pipeline.workers: 1
+    Set number of workers for logstash pipeline.
+    Logstash statefulsets restart is required. 
+
+-   pipelines_yml_config.pipeline.pipeline.batch.size: 125
+    Set batch_size for logstash pipeline.
+    Logstash statefulsets restart is required. 
+
+===================================== Elasticsearch performance parameters ==================================
+
+-   elasticsearch.diskSpace: 60Gi
+    Set disk size to store indices in elasticsearch pods
+
+-   elasticsearch.count: 3
+    Number of elasticsearch pods in stack
+
+-   elasticsearch.resources.requests.memory: 4Gi
+    elasticsearch.resources.limits.memory: 4Gi
+    Set memory allocation (request/limit) for elasticsearch pod.
+
+-   elasticsearch.resources.requests.cpu:  300m
+    elasticsearch.resources.limits.cpu: 300m
+    Set CPU allocation (request/limit) for elasticsearch pod.
+
+
+===================================== Kibana performance parameters ==================================
+
+-   kibana.resources.requests.memory: 1Gi
+    kibana.resources.limits.memory: 1Gi
+    Set memory allocation (request/limit) for kibana pod.
+
+-   kibana.resources.requests.cpu:  300m
+    kibana.resources.limits.cpu: 300m
+    Set CPU allocation (request/limit) for kibana pod.
+
+-   kibana.count: 1
+    Number of kibana pods in stack
+
+===================================== Filebeat performance parameters ==================================
+
+-   filebeat4agents.resources.requests.memory: 1Gi
+    filebeat4agents.resources.limits.memory: 1Gi
+    Set memory allocation (request/limit) for filebeat pod.
+
+-   filebeat4agents.resources.requests.cpu:  100m
+    filebeat4agents.resources.limits.cpu: 100m
+    Set CPU allocation (request/limit) for filebeat pod.
+
+=========================================================================================================
+=                                                                                                       =
+=                                               ILM                                                     =
+=                                                                                                       =
+=========================================================================================================
+
+On each monitoring ELK stack are 5 ILM politics:
+- bussines-ilm - reponsible for index rotation with bussines logs
+- technical-ilm - reponsible for index rotation with technical logs
+- metricbeat-ilm - reponsible for index rotation with metrics collected from agents
+- filebeat - reponsible for index rotation with ELK stack logs 
+- heartbeat-ilm - reponsible for index rotation with services heartbeats 
+
+
+Apply changes on heartbeat-ilm:
+    1) Modify values.yml file and set new values:
+    ....
+    heartbeat:
+    ilm:
+        hot:
+        max_age: 30d                      <- set max age for documents in hot phase 
+        max_primary_shard_size: 50gb      <- set max size for primary shard in hot phase 
+        delete:
+        min_age: 365d                     <- set age after which index will be deleted
+    services:
+        heartbeat.monitors:
+    ....
+    2) Restart heartbeat by command:
+    
+    kubectl rollout restart deployment heartbeat-beat-heartbeat
+
+
+Apply changes on metricbeat-ilm:
+    1) Modify values.yml file and set new values:
+    ....
+    metricbeat:
+    ilm:
+        hot:
+        max_age: 30d                        <- set max age for documents in hot phase 
+        max_primary_shard_size: 50gb        <- set max size for primary shard in hot phase 
+        delete:
+        min_age: 365d                       <- set age after which index will be deleted
+    resources:
+        requests:
+        memory: 500Mi
+        limits:
+    ....
+    2) Restart metricbeat by command:
+    
+     kubectl rollout restart daemonset metricbeat-beat-metricbeat
+
+Apply changes on filebeat:
+    1) Login to Kibana and go to: Stack Management -> Index Lifecycle Policies.
+    2) Click on filebeat policy
+    3) Modify "Hot phase" in advanced setting when disable "Use recommneded defaults" and/or modify Delete phase if needed.
+    4) Press "Save policy".
+
+
+Apply changes on business-ilm and technical-ilm:
+    1) Modify values.yml file and set new values:
+    ....
+    logstash:
+    ilm:
+        business:
+        hot:
+            max_age: 30d                    <- set max age for business documents in hot phase 
+            max_primary_shard_size: 1gb     <- set max size for business primary shard in hot phase 
+        delete:
+            min_age: 30d                    <- set age after which business index will be deleted
+        technical:
+        hot:
+            max_age: 30d                    <- set max age for technical documents in hot phase 
+            max_primary_shard_size: 1gb     <- set max size for technical primary shard in hot phase
+        delete:
+            min_age: 30d                    <- set age after which technical index will be deleted
+    count_beats: 1
+    count_syslog: 0
+    ....
+    2) Restart logstash statefulsets by command:
+    
+    kubectl rollout restart sts logstash-beats-ls
+
+
+
+
 
diff --git a/charts/Chart.yaml b/charts/Chart.yaml
index dc19d20..06235c2 100644
--- a/charts/Chart.yaml
+++ b/charts/Chart.yaml
@@ -1,6 +1,6 @@
 name: eck-monitoring
 version: ${PROJECT_RELEASE_VERSION}
 appVersion: "${PROJECT_RELEASE_VERSION}"
-#version: 0.1.3
+#version: 0.1.4
 
 
diff --git a/charts/templates/filebeat-agents.yaml b/charts/templates/filebeat-agents.yaml
index d706d34..1c17dfa 100644
--- a/charts/templates/filebeat-agents.yaml
+++ b/charts/templates/filebeat-agents.yaml
@@ -16,6 +16,10 @@ spec:
         hostNetwork: true # Allows to provide richer host metadata
         containers:
         - name: filebeat
+          {{- with .Values.filebeat4agents.resources }}
+          resources:
+            {{- toYaml . | nindent 12 }}
+          {{- end }}
           command: ['sh', '-c', 'exec /usr/share/filebeat/filebeat -e -c /usr/share/filebeat/filebeat.yml']
           securityContext:
             runAsUser: 0
diff --git a/charts/templates/heartbeat.yaml b/charts/templates/heartbeat.yaml
index 7740a69..696b9e9 100644
--- a/charts/templates/heartbeat.yaml
+++ b/charts/templates/heartbeat.yaml
@@ -14,6 +14,7 @@ spec:
     {{- end }}  
     setup.ilm.enabled: true
     setup.ilm.policy_name: heartbeat-ilm
+    setup.ilm.overwrite: true
     setup.ilm.policy_file: "/usr/share/heartbeat/ilm/heartbeat-ilm.json"
   deployment:
     replicas: 1
diff --git a/charts/templates/logstash_beats.yaml b/charts/templates/logstash_beats.yaml
index 855c66f..48f0db3 100644
--- a/charts/templates/logstash_beats.yaml
+++ b/charts/templates/logstash_beats.yaml
@@ -75,7 +75,7 @@ spec:
             subPath: logstash-business-ilm.json
           - name:  logstash-technical-ilm-vol
             mountPath: /usr/share/logstash/ilm/logstash-technical-ilm.json
-            subPath: logstash-technical-ilm.json            
+            subPath: logstash-technical-ilm.json
         env:
           - name: RELEASE_NAME
             value: {{ .Release.Name }}
diff --git a/charts/templates/metricbeat.yaml b/charts/templates/metricbeat.yaml
index 5ad5746..1e6e6c3 100644
--- a/charts/templates/metricbeat.yaml
+++ b/charts/templates/metricbeat.yaml
@@ -70,6 +70,7 @@ spec:
           verification_mode: none
     setup.ilm.enabled: true
     setup.ilm.policy_name: metricbeat-ilm
+    setup.ilm.overwrite: true
     setup.ilm.policy_file: "/usr/share/metricbeat/ilm/metricbeat-ilm.json"
     processors:
     - add_cloud_metadata: {}
diff --git a/charts/values/dev/observability/values.yaml b/charts/values/dev/observability/values.yaml
index df345e9..ec195b5 100644
--- a/charts/values/dev/observability/values.yaml
+++ b/charts/values/dev/observability/values.yaml
@@ -51,8 +51,10 @@ kibana:
   resources:
     requests:
       memory: 1Gi
+      cpu: 300m
     limits:
       memory: 1Gi
+      cpu: 300m
   #Environment variables to set in kibana pod 
   #Usage from cli: 
   # --set "kibana.env[0].name=VARIABLE_NAME" --set "kibana.env[0].value=VARIABLE_VALUE" 
@@ -65,13 +67,13 @@ logstash:
         max_age: 30d
         max_primary_shard_size: 1gb
       delete:
-        min_age: 30d
+        min_age: 365d
     technical:
       hot:
         max_age: 30d
         max_primary_shard_size: 1gb
       delete:
-        min_age: 30d
+        min_age: 365d
   count_beats: 1
   count_syslog: 0
   image: docker.elastic.co/logstash/logstash
@@ -84,8 +86,10 @@ logstash:
   resources:
     requests:
       memory: 4Gi
+      cpu: 300m
     limits:
       memory: 4Gi
+      cpu: 300m
   cert:
     duration: 2160h0m0s # 90d
     renewBefore: 360h0m0s # 15d
@@ -116,21 +120,38 @@ logstash:
             drop { }
           }    
           
-          if [kubernetes][container][name] == "sd-creation-wizard-api" or [kubernetes][container][name] == "signer" 	or [kubernetes][container][name] == "sd-creation-wizard-api-validation" or [kubernetes][container][name] == "simpl-cloud-gateway"  {
+          if [kubernetes][container][name] == "sd-creation-wizard-api" or [kubernetes][container][name] == "signer" 	or [kubernetes][container][name] == "sd-creation-wizard-api-validation"  {
             json {
                     source => "message"
                     skip_on_invalid_json => true
+                    add_field => { "log_type" => "NA" }
+
                 }
           }
-              
-          if [kubernetes][container][name] == "users-roles" {
+          # Business logs
+          if [kubernetes][container][name] == "simpl-cloud-gateway" or [kubernetes][container][name] == "tls-gateway" {
+            json {
+                    source => "message"
+                    skip_on_invalid_json => true
+                    add_field => { "log_type" => "business" }
 
+                }
+          }
+          # Onboaring technical logs
+          if [kubernetes][container][name] == "identity-provider" or [kubernetes][container][name] == "onboarding" or [kubernetes][container][name] == "security-attributes-provider" {
             json {
                     source => "message"
                     skip_on_invalid_json => true
+                    add_field => { "log_type" => "technical" }
+                }
+          }
+          # Onboaring technical logs - more complex parsing because of the specific log structure
+          if [kubernetes][container][name] == "users-roles" {
+            json {
+                    source => "message"
+                    skip_on_invalid_json => true
+                    add_field => { "log_type" => "technical" }
                 }
-
-
             ruby {
                 code => '
                     if event.get("[message]").is_a?(Hash)
@@ -140,7 +161,6 @@ logstash:
                     end
                 '
             }
-              
             if [is_json_message] {
               if [message][httpStatus] { mutate { add_field => { "httpStatus" => "%{[message][httpStatus]}" } } }
               if [message][msg] { mutate { add_field => { "msg" => "%{[message][msg]}" } } }
@@ -153,38 +173,17 @@ logstash:
             } 
           }
 
-          if [kubernetes][container][name] == "keycloak" {
-            grok {
-              match => { 
-                "message" => [
-                  '%{TIMESTAMP_ISO8601:timestamp}%{SPACE}%{WORD:loglevel}%{SPACE}\[%{JAVACLASS:logger}\]%{SPACE}\(%{DATA:thread}\)%{SPACE}%{GREEDYDATA:message}'
-                  ]
-              }
-              overwrite => [ "message" ]
-            }
-          }
-
-          if [kubernetes][container][name] == "postgresql" {
-            grok {
-              match => { 
-                "message" => [
-                    '%{TIMESTAMP_ISO8601:timestamp}%{SPACE}%{WORD:timezone}%{SPACE}\[%{NUMBER:pid}\]%{SPACE}%{WORD:log_level}:%{SPACE}%{GREEDYDATA:message}' 
-                  ]
-              }
-              overwrite => [ "message" ]
-            }
-          }
             
           date {
-            match => [ "timestamp", "yyyy-MM-dd HH:mm:ss.SSS", "ISO8601", "yyyy-MM-dd HH:mm:ss", "dd MMM yyyy HH:mm:ss.SSS"]
+            match => [ "timestamp", "yyyy-MM-dd HH:mm:ss.SSS", "ISO8601", "yyyy-MM-dd HH:mm:ss", "dd MMM yyyy HH:mm:ss.SSS", "yyyy-MM-dd HH:mm:ss,SSS"]
           } 
           date {
-            match => [ "ts", "yyyy-MM-dd HH:mm:ss.SSS", "ISO8601", "yyyy-MM-dd HH:mm:ss", "dd MMM yyyy HH:mm:ss.SSS"]
+            match => [ "ts", "yyyy-MM-dd HH:mm:ss.SSS", "ISO8601", "yyyy-MM-dd HH:mm:ss", "dd MMM yyyy HH:mm:ss.SSS", "yyyy-MM-dd HH:mm:ss,SSS"]
           } 
         }
       output: |-
         output {
-          if [kubernetes][container][name] == "simpl-cloud-gateway" {
+          if [log_type] == "business" {
             elasticsearch {
             hosts => [ "${ELASTIC_ELASTICSEARCH_ES_HOSTS}" ]
             user => "${LOGSTASH_USER}"
@@ -197,7 +196,7 @@ logstash:
             action => "create"
             }
           }
-          else if [fields][logtype] == "logs-sample-wrapper" {
+          else if [log_type] == "technical" {
             elasticsearch {
             hosts => [ "${ELASTIC_ELASTICSEARCH_ES_HOSTS}" ]
             user => "${LOGSTASH_USER}"
@@ -205,30 +204,11 @@ logstash:
             ssl_enabled => "true"
             ssl_verification_mode => "full"
             ssl_certificate_authorities => "/usr/share/logstash/config/certs/ca.crt"
-            #data_stream => "true"
-            #data_stream_type => "logs"
-            #data_stream_dataset => "business"
-            index => "business-logs"
-            template_name => "business-template"
+            index => "technical-logs"
+            template_name => "technical-template"
             action => "create"            
             }
           }
-          else {
-            elasticsearch {
-              hosts => [ "${ELASTIC_ELASTICSEARCH_ES_HOSTS}" ]
-              user => "${LOGSTASH_USER}"
-              password => "${LOGSTASH_PASSWORD}"
-              ssl_enabled => "true"
-              ssl_verification_mode => "full"
-              ssl_certificate_authorities => "/usr/share/logstash/config/certs/ca.crt"
-              #data_stream => "true"
-              #data_stream_type => "logs"
-              #data_stream_dataset => "technical"
-              index => "technical-logs"
-              template_name => "technical-template"
-              action => "create"
-            }
-          }
           #stdout { 
           #  codec => rubydebug
           #}
@@ -348,6 +328,13 @@ filebeat:
 filebeat4agents:
   image: docker.elastic.co/beats/filebeat
   imageTag: ""
+  resources:
+    requests:
+      memory: 1Gi
+      cpu: 100m
+    limits:
+      memory: 1Gi
+      cpu: 100m    
   cert:
     duration: 2160h0m0s # 90d
     renewBefore: 360h0m0s # 15d
@@ -374,36 +361,31 @@ filebeat4agents:
             # Condition for json structured logs
             - condition:
                 or:
+                  # Business logs
+                  - equals:
+                      kubernetes.container.name: "simpl-cloud-gateway"
+                  - equals:
+                      kubernetes.container.name: "tls-gateway"
+                  # Onboarding technical logs containers
+                  - equals:
+                      kubernetes.container.name: "identity-provider"
+                  - equals:
+                      kubernetes.container.name: "onboarding"
+                  - equals:
+                      kubernetes.container.name: "security-attributes-provider"
                   - equals:
                       kubernetes.container.name: "users-roles"
+                  # Logs not specified yet
                   - equals:
                       kubernetes.container.name: "signer"
                   - equals:
                       kubernetes.container.name: "sd-creation-wizard-api"
                   - equals:
                       kubernetes.container.name: "sd-creation-wizard-api-validation"
-                  - equals:
-                      kubernetes.container.name: "simpl-cloud-gateway"
-              config:
-                - type: container
-                  paths:
-                    - /var/log/containers/*-${data.kubernetes.container.id}.log
-            # Condition for plain text logs
-            - condition:
-                or:
-                  - equals:
-                      kubernetes.container.name: "keycloak"
-                  - equals:
-                      kubernetes.container.name: "postgresql"
               config:
                 - type: container
                   paths:
                     - /var/log/containers/*-${data.kubernetes.container.id}.log
-                  multiline:
-                    type: pattern
-                    pattern: '^[0-9]{4}-[0-9]{2}-[0-9]{2}'
-                    negate: true
-                    match: after
     processors:
       # Add cloud and host metadata
       - add_cloud_metadata: {}
@@ -422,13 +404,12 @@ metricbeat:
   ilm:
     hot:
       max_age: 30d
-      max_primary_shard_size: 1gb
+      max_primary_shard_size: 50gb
     delete:
-      min_age: 30d
+      min_age: 365d
   resources:
     requests:
       memory: 500Mi
-
     limits:
       memory: 500Mi
       cpu: 300m
@@ -439,9 +420,9 @@ heartbeat:
   ilm:
     hot:
       max_age: 30d
-      max_primary_shard_size: 100mb
+      max_primary_shard_size: 50gb
     delete:
-      min_age: 30d
+      min_age: 365d
   services:
     heartbeat.monitors:
     - type: tcp
diff --git a/pipeline.variables.sh b/pipeline.variables.sh
index 322e00a..103f4f7 100644
--- a/pipeline.variables.sh
+++ b/pipeline.variables.sh
@@ -1 +1 @@
-PROJECT_VERSION_NUMBER="0.1.3"
\ No newline at end of file
+PROJECT_VERSION_NUMBER="0.1.4"
\ No newline at end of file
-- 
GitLab


From dcf118faa09f22f8342e600815ed50fdbb5f942c Mon Sep 17 00:00:00 2001
From: Natalia Szakiel <natalia.szakiel.external@eviden.com>
Date: Mon, 2 Dec 2024 11:08:02 +0100
Subject: [PATCH 4/4] Feature/resolved conflicts

-- 
GitLab