diff --git a/README.md b/README.md index a5f95aac241a33ccbc9d226cba4b39bc1735a7f2..68f58e2dff9973aae5943701b628b366734a2396 100644 --- a/README.md +++ b/README.md @@ -144,6 +144,7 @@ SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. | <a name="module_aws_eks_fargate_profiles"></a> [aws\_eks\_fargate\_profiles](#module\_aws\_eks\_fargate\_profiles) | ./modules/aws-eks-fargate-profiles | n/a | | <a name="module_aws_eks_managed_node_groups"></a> [aws\_eks\_managed\_node\_groups](#module\_aws\_eks\_managed\_node\_groups) | ./modules/aws-eks-managed-node-groups | n/a | | <a name="module_aws_eks_self_managed_node_groups"></a> [aws\_eks\_self\_managed\_node\_groups](#module\_aws\_eks\_self\_managed\_node\_groups) | ./modules/aws-eks-self-managed-node-groups | n/a | +| <a name="module_aws_load_balancer_controller"></a> [aws\_load\_balancer\_controller](#module\_aws\_load\_balancer\_controller) | ./kubernetes-addons/aws-load-balancer-controller | n/a | | <a name="module_aws_managed_prometheus"></a> [aws\_managed\_prometheus](#module\_aws\_managed\_prometheus) | ./modules/aws-managed-prometheus | n/a | | <a name="module_aws_opentelemetry_collector"></a> [aws\_opentelemetry\_collector](#module\_aws\_opentelemetry\_collector) | ./kubernetes-addons/aws-opentelemetry-eks | n/a | | <a name="module_cert_manager"></a> [cert\_manager](#module\_cert\_manager) | ./kubernetes-addons/cert-manager | n/a | @@ -152,7 +153,6 @@ SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. | <a name="module_emr_on_eks"></a> [emr\_on\_eks](#module\_emr\_on\_eks) | ./modules/emr-on-eks | n/a | | <a name="module_fargate_fluentbit"></a> [fargate\_fluentbit](#module\_fargate\_fluentbit) | ./kubernetes-addons/fargate-fluentbit | n/a | | <a name="module_keda"></a> [keda](#module\_keda) | ./kubernetes-addons/keda | n/a | -| <a name="module_lb_ingress_controller"></a> [lb\_ingress\_controller](#module\_lb\_ingress\_controller) | ./kubernetes-addons/lb-ingress-controller | n/a | | <a name="module_metrics_server"></a> [metrics\_server](#module\_metrics\_server) | ./kubernetes-addons/metrics-server | n/a | | <a name="module_nginx_ingress"></a> [nginx\_ingress](#module\_nginx\_ingress) | ./kubernetes-addons/nginx-ingress | n/a | | <a name="module_prometheus"></a> [prometheus](#module\_prometheus) | ./kubernetes-addons/prometheus | n/a | diff --git a/aws-eks-worker.tf b/aws-eks-worker.tf index dbff99bcc671ed604caafed7e35d6bae04da79f2..5f5484669aa0631a5628d4fec4fa3df73b51a90f 100644 --- a/aws-eks-worker.tf +++ b/aws-eks-worker.tf @@ -101,7 +101,6 @@ module "aws_eks_fargate_profiles" { # AWS EKS Add-ons (VPC CNI, CoreDNS, KubeProxy ) # --------------------------------------------------------------------------------------------------------------------- module "aws_eks_addon" { - count = var.create_eks && var.enable_managed_nodegroups || var.create_eks && var.enable_self_managed_nodegroups || var.create_eks && var.enable_fargate ? 1 : 0 source = "./modules/aws-eks-addon" @@ -116,7 +115,5 @@ module "aws_eks_addon" { kube_proxy_addon_version = var.kube_proxy_addon_version tags = module.eks_tags.tags - depends_on = [ - module.aws_eks - ] + depends_on = [module.aws_eks] } diff --git a/kubernetes-addons.tf b/kubernetes-addons.tf index b6c66e255070914bb185f22a8df18eee536ffb63..ca1234e77585a5fbf427d849937dc10a5b210526 100644 --- a/kubernetes-addons.tf +++ b/kubernetes-addons.tf @@ -45,6 +45,7 @@ module "prometheus" { count = var.create_eks && var.prometheus_enable ? 1 : 0 source = "./kubernetes-addons/prometheus" prometheus_helm_chart = var.prometheus_helm_chart + #AWS Managed Prometheus Workspace aws_managed_prometheus_enable = var.aws_managed_prometheus_enable amp_workspace_id = var.aws_managed_prometheus_enable ? module.aws_managed_prometheus[0].amp_workspace_id : "" @@ -54,9 +55,9 @@ module "prometheus" { depends_on = [module.aws_eks] } -module "lb_ingress_controller" { +module "aws_load_balancer_controller" { count = var.create_eks && var.aws_lb_ingress_controller_enable ? 1 : 0 - source = "./kubernetes-addons/lb-ingress-controller" + source = "./kubernetes-addons/aws-load-balancer-controller" eks_cluster_id = module.aws_eks.cluster_id lb_ingress_controller_helm_app = var.aws_lb_ingress_controller_helm_app eks_oidc_issuer_url = module.aws_eks.cluster_oidc_issuer_url diff --git a/kubernetes-addons/agones/agones-values.yaml b/kubernetes-addons/agones/agones-values.yaml deleted file mode 100644 index ae38e459fe3a3887eaafc37e97b60f04a9458297..0000000000000000000000000000000000000000 --- a/kubernetes-addons/agones/agones-values.yaml +++ /dev/null @@ -1,179 +0,0 @@ -agones: - featureGates: "" - metrics: - prometheusEnabled: true - prometheusServiceDiscovery: true - stackdriverEnabled: false - stackdriverProjectID: "" - stackdriverLabels: "" - rbacEnabled: true - registerServiceAccounts: true - registerWebhooks: true - registerApiService: true - crds: - install: true - cleanupOnDelete: true - serviceaccount: - controller: agones-controller - sdk: agones-sdk - allocator: agones-allocator - createPriorityClass: true - priorityClassName: agones-system - controller: - resources: {} - # requests: - # cpu: 1 - # memory: 256Mi - nodeSelector: - kubernetes.io/os: linux - annotations: {} - tolerations: - - key: "agones.dev/agones-system" - operator: "Equal" - value: "true" - effect: "NoExecute" - affinity: - nodeAffinity: - preferredDuringSchedulingIgnoredDuringExecution: - - weight: 1 - preference: - matchExpressions: - - key: agones.dev/agones-system - operator: Exists - generateTLS: true - safeToEvict: false - persistentLogs: true - persistentLogsSizeLimitMB: 10000 - logLevel: info - numWorkers: 100 - apiServerQPS: 400 - apiServerQPSBurst: 500 - http: - port: 8080 - healthCheck: - initialDelaySeconds: 3 - periodSeconds: 3 - failureThreshold: 3 - timeoutSeconds: 1 - ping: - install: true - resources: {} - # requests: - # cpu: 1 - # memory: 256Mi - nodeSelector: - kubernetes.io/os: linux - annotations: {} - tolerations: - - key: "agones.dev/agones-system" - operator: "Equal" - value: "true" - effect: "NoExecute" - affinity: - nodeAffinity: - preferredDuringSchedulingIgnoredDuringExecution: - - weight: 1 - preference: - matchExpressions: - - key: agones.dev/agones-system - operator: Exists - replicas: 2 - http: - expose: true - response: ok - port: 80 - serviceType: LoadBalancer - loadBalancerIP: "" - loadBalancerSourceRanges: [] - annotations: - service.beta.kubernetes.io/aws-load-balancer-internal: "false" - service.beta.kubernetes.io/aws-load-balancer-type: "nlb" - udp: - expose: true - rateLimit: 20 - port: 50000 - serviceType: LoadBalancer - loadBalancerIP: "" - loadBalancerSourceRanges: [] - annotations: - service.beta.kubernetes.io/aws-load-balancer-internal: "false" - service.beta.kubernetes.io/aws-load-balancer-type: "nlb" - healthCheck: - initialDelaySeconds: 3 - periodSeconds: 3 - failureThreshold: 3 - timeoutSeconds: 5 - allocator: - install: true - apiServerQPS: 400 - apiServerQPSBurst: 500 - logLevel: info - annotations: {} - resources: {} - # requests: - # cpu: 1 - # memory: 256Mi - nodeSelector: - kubernetes.io/os: linux - healthCheck: - initialDelaySeconds: 3 - periodSeconds: 3 - failureThreshold: 3 - timeoutSeconds: 1 - tolerations: - - key: "agones.dev/agones-system" - operator: "Equal" - value: "true" - effect: "NoExecute" - affinity: - nodeAffinity: - preferredDuringSchedulingIgnoredDuringExecution: - - weight: 1 - preference: - matchExpressions: - - key: agones.dev/agones-system - operator: Exists - replicas: 3 - http: - port: 443 - serviceType: LoadBalancer - loadBalancerIP: "" - loadBalancerSourceRanges: [] - annotations: - service.beta.kubernetes.io/aws-load-balancer-internal: "false" - service.beta.kubernetes.io/aws-load-balancer-type: "nlb" - generateTLS: true - generateClientTLS: true - disableMTLS: false - disableTLS: false - remoteAllocationTimeout: 10s - totalRemoteAllocationTimeout: 30s - image: - registry: gcr.io/agones-images - tag: 1.15.0 - controller: - name: agones-controller - pullPolicy: IfNotPresent - sdk: - name: agones-sdk - cpuRequest: 30m - cpuLimit: 0 - memoryRequest: 0 - memoryLimit: 0 - alwaysPull: false - ping: - name: agones-ping - pullPolicy: IfNotPresent - allocator: - name: agones-allocator - pullPolicy: IfNotPresent - -gameservers: - namespaces: - - default - minPort: 7000 - maxPort: 8000 - podPreserveUnknownFields: false - -helm: - installTests: false diff --git a/kubernetes-addons/agones/certs/certs.sh b/kubernetes-addons/agones/certs/certs.sh deleted file mode 100755 index c18bbd2a8b657f46c3728c4240b9e061bbb103c6..0000000000000000000000000000000000000000 --- a/kubernetes-addons/agones/certs/certs.sh +++ /dev/null @@ -1,12 +0,0 @@ -#!/usr/bin/env bash - -echo "Generating certs..." -echo "Email should be: emailAddress=agones-discuss@googlegroups.com" -echo "Common Name should be: agones-controller-service.agones-system.svc" -openssl genrsa -out server.key 2048 -openssl req -new -x509 -sha256 -key server.key -out server.crt -days 3650 - -echo "caBundle:" -base64 -w 0 server.crt - -echo "done" diff --git a/kubernetes-addons/agones/locals.tf b/kubernetes-addons/agones/locals.tf index 5292b241e8cc88a50fefacab3cc7d90e9eb3deb6..7a8f5e7e58168e0bdc651f3746f2c839d7e9140a 100644 --- a/kubernetes-addons/agones/locals.tf +++ b/kubernetes-addons/agones/locals.tf @@ -1,22 +1,23 @@ data "aws_region" "current" {} locals { + default_helm_values = [templatefile("${path.module}/values.yaml", {})] default_agones_helm_app = { name = "agones" chart = "agones" repository = "https://agones.dev/chart/stable" - version = "1.15.0" + version = "1.18.0" namespace = "agones-system" timeout = "1200" create_namespace = true description = "Agones Gaming Server Helm Chart deployment configuration" lint = false - values = local.default_agones_helm_values + values = local.default_helm_values wait = true wait_for_jobs = false verify = false - set = null + set = [] set_sensitive = null keyring = "" repository_key_file = "" @@ -41,11 +42,9 @@ locals { gameserver_minport = 7000 gameserver_maxport = 8000 } + agones_helm_app = merge( local.default_agones_helm_app, var.agones_helm_chart ) - - default_agones_helm_values = [templatefile("${path.module}/agones-values.yaml", {})] - } diff --git a/kubernetes-addons/agones/values.yaml b/kubernetes-addons/agones/values.yaml new file mode 100644 index 0000000000000000000000000000000000000000..67c6aa3dca4373d2819852c897572a09dc1cb48b --- /dev/null +++ b/kubernetes-addons/agones/values.yaml @@ -0,0 +1,15 @@ +agones: + ping: + http: + annotations: + service.beta.kubernetes.io/aws-load-balancer-internal: "false" + service.beta.kubernetes.io/aws-load-balancer-type: "nlb" + udp: + annotations: + service.beta.kubernetes.io/aws-load-balancer-internal: "false" + service.beta.kubernetes.io/aws-load-balancer-type: "nlb" + allocator: + http: + annotations: + service.beta.kubernetes.io/aws-load-balancer-internal: "false" + service.beta.kubernetes.io/aws-load-balancer-type: "nlb" \ No newline at end of file diff --git a/kubernetes-addons/argocd/locals.tf b/kubernetes-addons/argocd/locals.tf index 683cecb175359027f83c1d02165bb7b2d8443ae2..cceca1b786b3ae111249b67c8dc7c3c9bf6963dd 100644 --- a/kubernetes-addons/argocd/locals.tf +++ b/kubernetes-addons/argocd/locals.tf @@ -1,18 +1,17 @@ locals { + default_helm_values = [templatefile("${path.module}/values.yaml", {})] + default_argocd_helm_app = { - name = "argo-cd" - chart = "argo-cd" - repository = "https://argoproj.github.io/argo-helm" - version = "3.26.3" - namespace = "argocd" - timeout = "1200" - create_namespace = true - values = local.default_argocd_helm_values - set = [{ - name = "nodeSelector.kubernetes\\.io/os" - value = "linux" - }] + name = "argo-cd" + chart = "argo-cd" + repository = "https://argoproj.github.io/argo-helm" + version = "3.26.8" + namespace = "argocd" + timeout = "1200" + create_namespace = true + values = local.default_helm_values + set = [] set_sensitive = null lint = false verify = false @@ -40,11 +39,11 @@ locals { description = "The argocd HelmChart Ingress Controller deployment configuration" postrender = "" } + argocd_helm_app = merge( local.default_argocd_helm_app, var.argocd_helm_chart ) - default_argocd_helm_values = [templatefile("${path.module}/argocd-values.yaml", {})] # Global Values for ArgoCD App of Apps. global_values = { diff --git a/kubernetes-addons/argocd/argocd-values.yaml b/kubernetes-addons/argocd/values.yaml similarity index 100% rename from kubernetes-addons/argocd/argocd-values.yaml rename to kubernetes-addons/argocd/values.yaml diff --git a/kubernetes-addons/aws-for-fluentbit/aws-for-fluent-bit-values.yaml b/kubernetes-addons/aws-for-fluentbit/aws-for-fluent-bit-values.yaml deleted file mode 100644 index c4d58a6c4ef846edd0725d3928cd98c27506afaa..0000000000000000000000000000000000000000 --- a/kubernetes-addons/aws-for-fluentbit/aws-for-fluent-bit-values.yaml +++ /dev/null @@ -1,140 +0,0 @@ -global: - ## Override the deployment namespace - namespaceOverride: logging - -image: - repository: amazon/aws-for-fluent-bit - tag: 2.1.0 - pullPolicy: IfNotPresent - -imagePullSecrets: [] -nameOverride: "" -fullnameOverride: "" - -service: - parsersFiles: - - /fluent-bit/parsers/parsers.conf - # extraParsers: | - # [PARSER] - # Name logfmt - # Format logfmt - -input: - tag: "kube.*" - path: "/var/log/containers/*.log" - db: "/var/log/flb_kube.db" - parser: docker - dockerMode: "On" - memBufLimit: 5MB - skipLongLines: "On" - refreshInterval: 10 - -# extraInputs: | -# [INPUT] -# Name winlog -# Channels Setup,Windows PowerShell -# Interval_Sec 1 -# DB winlog.sqlite - - -filter: - match: "kube.*" - kubeURL: "https://kubernetes.default.svc.cluster.local:443" - mergeLog: "On" - mergeLogKey: "data" - keepLog: "On" - k8sLoggingParser: "On" - k8sLoggingExclude: "On" - -# extraFilters: | -# [FILTER] -# Name grep -# Match * -# Exclude log lvl=debug* - -cloudWatch: - enabled: true - match: "*" - region: ${region} - logGroupName: ${aws_for_fluent_bit_cw_log_group} - logStreamName: - logStreamPrefix: "fluentbit-" - logKey: - logFormat: - roleArn: - autoCreateGroup: true - endpoint: - credentialsEndpoint: {} - -firehose: - enabled: false - match: "*" - region: ${region} - deliveryStream: "my-stream" - dataKeys: - roleArn: - endpoint: - timeKey: - -kinesis: - enabled: false - match: "*" - region: ${region} - stream: "my-kinesis-stream-name" - partitionKey: "container_id" - appendNewline: - dataKeys: - roleArn: - timeKey: - timeKeyFormat: - -# extraOutputs: | -# [OUTPUT] -# Name file -# Format template -# Template {time} used={Mem.used} free={Mem.free} total={Mem.total} - -serviceAccount: - create: true - annotations: {} - name: - -resources: - limits: - memory: 500Mi - requests: - cpu: 500m - memory: 500Mi - -## Assign a PriorityClassName to pods if set -# priorityClassName: system-node-critical - -updateStrategy: - type: RollingUpdate - -nodeSelector: - kubernetes.io/os: linux - -tolerations: [] - -affinity: {} - -annotations: {} -# iam.amazonaws.com/role: arn:aws:iam::123456789012:role/role-for-fluent-bit - -env: [] - -volumes: - - name: varlog - hostPath: - path: /var/log - - name: varlibdockercontainers - hostPath: - path: /var/lib/docker/containers - -volumeMounts: - - name: varlog - mountPath: /var/log - - name: varlibdockercontainers - mountPath: /var/lib/docker/containers - readOnly: true diff --git a/kubernetes-addons/aws-for-fluentbit/locals.tf b/kubernetes-addons/aws-for-fluentbit/locals.tf index a4f4050962526bbc84ed416183513214dfb2eb06..d9aedc5435acdd45e90c4c3110927a1d5643e6ab 100644 --- a/kubernetes-addons/aws-for-fluentbit/locals.tf +++ b/kubernetes-addons/aws-for-fluentbit/locals.tf @@ -1,58 +1,54 @@ locals { + log_group_name = "/${var.eks_cluster_id}/worker-fluentbit-logs" - aws_for_fluentbit_cwlog_group_name = "/${var.eks_cluster_id}/worker-fluentbit-logs" + default_helm_values = [templatefile("${path.module}/values.yaml", { + aws_region = data.aws_region.current.name, + log_group_name = local.log_group_name + })] default_aws_for_fluentbit_helm_app = { name = "aws-for-fluent-bit" chart = "aws-for-fluent-bit" repository = "https://aws.github.io/eks-charts" - version = "0.1.0" - namespace = "logging" + version = "0.1.11" + namespace = "kube-system" timeout = "1200" create_namespace = true - aws_for_fluent_bit_cw_log_group = local.aws_for_fluentbit_cwlog_group_name + aws_for_fluent_bit_cw_log_group = local.log_group_name aws_for_fluentbit_cwlog_retention_in_days = 90 - values = [templatefile("${path.module}/aws-for-fluent-bit-values.yaml", { - region = data.aws_region.current.name, - aws_for_fluent_bit_cw_log_group = local.aws_for_fluentbit_cwlog_group_name - })] - set = [ - { - name = "nodeSelector.kubernetes\\.io/os" - value = "linux" - } - ] - set_sensitive = null - lint = true - wait = true - wait_for_jobs = false - description = "aws-for-fluentbit Helm Chart deployment configuration" - verify = false - keyring = "" - repository_key_file = "" - repository_cert_file = "" - repository_ca_file = "" - repository_username = "" - repository_password = "" - disable_webhooks = false - reuse_values = false - reset_values = false - force_update = false - recreate_pods = false - cleanup_on_fail = false - max_history = 0 - atomic = false - skip_crds = false - render_subchart_notes = true - disable_openapi_validation = false - dependency_update = false - replace = false - postrender = "" - - + values = local.default_helm_values + set = [] + set_sensitive = null + lint = true + wait = true + wait_for_jobs = false + description = "aws-for-fluentbit Helm Chart deployment configuration" + verify = false + keyring = "" + repository_key_file = "" + repository_cert_file = "" + repository_ca_file = "" + repository_username = "" + repository_password = "" + disable_webhooks = false + reuse_values = false + reset_values = false + force_update = false + recreate_pods = false + cleanup_on_fail = false + max_history = 0 + atomic = false + skip_crds = false + render_subchart_notes = true + disable_openapi_validation = false + dependency_update = false + replace = false + postrender = "" } + aws_for_fluentbit_helm_app = merge( local.default_aws_for_fluentbit_helm_app, - var.aws_for_fluentbit_helm_chart) + var.aws_for_fluentbit_helm_chart + ) } diff --git a/kubernetes-addons/aws-for-fluentbit/values.yaml b/kubernetes-addons/aws-for-fluentbit/values.yaml new file mode 100644 index 0000000000000000000000000000000000000000..5de63ecac508682fcb5dea09fbf7401597a68825 --- /dev/null +++ b/kubernetes-addons/aws-for-fluentbit/values.yaml @@ -0,0 +1,9 @@ +cloudWatch: + region: ${aws_region} + logGroupName: ${log_group_name} + +firehose: + region: ${aws_region} + +kinesis: + region: ${aws_region} \ No newline at end of file diff --git a/kubernetes-addons/lb-ingress-controller/README.md b/kubernetes-addons/aws-load-balancer-controller/README.md similarity index 89% rename from kubernetes-addons/lb-ingress-controller/README.md rename to kubernetes-addons/aws-load-balancer-controller/README.md index 11ed1e5fe5f1b36660307dc2281f4421927d7224..bf002b090e1dccf1b8694f142a3b2cc3ef5f9922 100644 --- a/kubernetes-addons/lb-ingress-controller/README.md +++ b/kubernetes-addons/aws-load-balancer-controller/README.md @@ -94,12 +94,12 @@ No modules. | Name | Type | |------|------| -| [aws_iam_policy.eks_lb_controller](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_policy) | resource | -| [aws_iam_role.eks_lb_controller_role](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_role) | resource | +| [aws_iam_policy.aws_load_balancer_controller](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_policy) | resource | +| [aws_iam_role.aws_load_balancer_controller_role](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_role) | resource | | [aws_iam_role_policy_attachment.eks_role_policy](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_role_policy_attachment) | resource | | [helm_release.lb_ingress](https://registry.terraform.io/providers/hashicorp/helm/latest/docs/resources/release) | resource | -| [kubernetes_service_account.eks_lb_controller_sa](https://registry.terraform.io/providers/hashicorp/kubernetes/latest/docs/resources/service_account) | resource | -| [aws_iam_policy_document.eks_lb_controller_assume_policy](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/iam_policy_document) | data source | +| [kubernetes_service_account.aws_load_balancer_controller_sa](https://registry.terraform.io/providers/hashicorp/kubernetes/latest/docs/resources/service_account) | resource | +| [aws_iam_policy_document.aws_load_balancer_controller_assume_policy](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/iam_policy_document) | data source | | [aws_region.current](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/region) | data source | ## Inputs diff --git a/kubernetes-addons/aws-load-balancer-controller/locals.tf b/kubernetes-addons/aws-load-balancer-controller/locals.tf new file mode 100644 index 0000000000000000000000000000000000000000..350bbc62d693476c733fe97b26a911036993d05e --- /dev/null +++ b/kubernetes-addons/aws-load-balancer-controller/locals.tf @@ -0,0 +1,54 @@ +data "aws_region" "current" {} + +locals { + aws_load_balancer_controller_sa = "aws-load-balancer-controller-sa" + + default_helm_values = [templatefile("${path.module}/values.yaml", { + aws_region = data.aws_region.current.name, + cluster_name = var.eks_cluster_id, + service_account_name = local.aws_load_balancer_controller_sa + })] + + default_lb_ingress_controller_helm_app = { + name = "aws-load-balancer-controller" + chart = "aws-load-balancer-controller" + repository = "https://aws.github.io/eks-charts" + version = "1.3.1" + namespace = "kube-system" + timeout = "1200" + create_namespace = false + values = local.default_helm_values + set = [] + set_sensitive = null + lint = true + wait = true + wait_for_jobs = false + description = "aws-load-balancer-controller Helm Chart for ingress resources" + verify = false + keyring = "" + repository_key_file = "" + repository_cert_file = "" + repository_ca_file = "" + repository_username = "" + repository_password = "" + disable_webhooks = false + reuse_values = false + reset_values = false + force_update = false + recreate_pods = false + cleanup_on_fail = false + max_history = 0 + atomic = false + skip_crds = false + render_subchart_notes = true + disable_openapi_validation = false + dependency_update = false + replace = false + postrender = "" + } + + lb_ingress_controller_helm_app = merge( + local.default_lb_ingress_controller_helm_app, + var.lb_ingress_controller_helm_app + ) +} diff --git a/kubernetes-addons/lb-ingress-controller/main.tf b/kubernetes-addons/aws-load-balancer-controller/main.tf similarity index 94% rename from kubernetes-addons/lb-ingress-controller/main.tf rename to kubernetes-addons/aws-load-balancer-controller/main.tf index ddb070dbf70e2e8e39b5fb9246f2f30a5d0f737f..43f2f811f53aa390aa74efe0dbbf0e38c7bc1ed5 100644 --- a/kubernetes-addons/lb-ingress-controller/main.tf +++ b/kubernetes-addons/aws-load-balancer-controller/main.tf @@ -74,10 +74,10 @@ resource "helm_release" "lb_ingress" { } } - depends_on = [aws_iam_role.eks_lb_controller_role, kubernetes_service_account.eks_lb_controller_sa] + depends_on = [aws_iam_role.aws_load_balancer_controller_role, kubernetes_service_account.aws_load_balancer_controller_sa] } -resource "aws_iam_policy" "eks_lb_controller" { +resource "aws_iam_policy" "aws_load_balancer_controller" { name = "${var.eks_cluster_id}-lb-controller-policy" description = "Allows lb controller to manage ALB and NLB" @@ -310,7 +310,7 @@ resource "aws_iam_policy" "eks_lb_controller" { EOF } -data "aws_iam_policy_document" "eks_lb_controller_assume_policy" { +data "aws_iam_policy_document" "aws_load_balancer_controller_assume_policy" { statement { actions = ["sts:AssumeRoleWithWebIdentity"] effect = "Allow" @@ -318,7 +318,7 @@ data "aws_iam_policy_document" "eks_lb_controller_assume_policy" { condition { test = "StringEquals" variable = "${replace(var.eks_oidc_issuer_url, "https://", "")}:sub" - values = ["system:serviceaccount:kube-system:${local.aws_lb_controller_sa}"] + values = ["system:serviceaccount:kube-system:${local.aws_load_balancer_controller_sa}"] } principals { @@ -329,24 +329,24 @@ data "aws_iam_policy_document" "eks_lb_controller_assume_policy" { } # IAM role for eks alb controller -resource "aws_iam_role" "eks_lb_controller_role" { +resource "aws_iam_role" "aws_load_balancer_controller_role" { name = "${var.eks_cluster_id}-lb-controller-role" - assume_role_policy = data.aws_iam_policy_document.eks_lb_controller_assume_policy.json + assume_role_policy = data.aws_iam_policy_document.aws_load_balancer_controller_assume_policy.json } # Allows eks alb controller to manage LB's resource "aws_iam_role_policy_attachment" "eks_role_policy" { - role = aws_iam_role.eks_lb_controller_role.name - policy_arn = aws_iam_policy.eks_lb_controller.arn + role = aws_iam_role.aws_load_balancer_controller_role.name + policy_arn = aws_iam_policy.aws_load_balancer_controller.arn } # Kubernetes service account for lb controller -resource "kubernetes_service_account" "eks_lb_controller_sa" { +resource "kubernetes_service_account" "aws_load_balancer_controller_sa" { metadata { - name = local.aws_lb_controller_sa + name = local.aws_load_balancer_controller_sa namespace = "kube-system" - annotations = { "eks.amazonaws.com/role-arn" : aws_iam_role.eks_lb_controller_role.arn } + annotations = { "eks.amazonaws.com/role-arn" : aws_iam_role.aws_load_balancer_controller_role.arn } } automount_service_account_token = true -} +} \ No newline at end of file diff --git a/kubernetes-addons/lb-ingress-controller/outputs.tf b/kubernetes-addons/aws-load-balancer-controller/outputs.tf similarity index 99% rename from kubernetes-addons/lb-ingress-controller/outputs.tf rename to kubernetes-addons/aws-load-balancer-controller/outputs.tf index 6f5b1fa5b22930033cd85fcd033e774e9d6549e0..00c3f9be2e545318dacab36075a4c4fb81644327 100644 --- a/kubernetes-addons/lb-ingress-controller/outputs.tf +++ b/kubernetes-addons/aws-load-balancer-controller/outputs.tf @@ -19,6 +19,7 @@ output "ingress_namespace" { value = helm_release.lb_ingress.metadata[0].namespace } + output "ingress_name" { value = helm_release.lb_ingress.metadata[0].name } diff --git a/kubernetes-addons/aws-load-balancer-controller/values.yaml b/kubernetes-addons/aws-load-balancer-controller/values.yaml new file mode 100644 index 0000000000000000000000000000000000000000..9d0e51476df5ee4e2839c8f1c16a3b636ed0c0bc --- /dev/null +++ b/kubernetes-addons/aws-load-balancer-controller/values.yaml @@ -0,0 +1,5 @@ +clusterName: ${cluster_name} +region: ${aws_region} +serviceAccount: + create: false + name: ${service_account_name} \ No newline at end of file diff --git a/kubernetes-addons/lb-ingress-controller/variables.tf b/kubernetes-addons/aws-load-balancer-controller/variables.tf similarity index 100% rename from kubernetes-addons/lb-ingress-controller/variables.tf rename to kubernetes-addons/aws-load-balancer-controller/variables.tf diff --git a/kubernetes-addons/aws-opentelemetry-eks/locals.tf b/kubernetes-addons/aws-opentelemetry-eks/locals.tf index 23d368e3afdfb08904066c6314529560ced411d2..8799163102987a87c382b9ab4ef0e3197a7a8d1d 100644 --- a/kubernetes-addons/aws-opentelemetry-eks/locals.tf +++ b/kubernetes-addons/aws-opentelemetry-eks/locals.tf @@ -12,7 +12,9 @@ locals { aws_open_telemetry_mg_node_iam_role_arns = [] aws_open_telemetry_self_mg_node_iam_role_arns = [] } + aws_open_telemetry_app = merge( local.default_aws_open_telemetry_helm_app, - var.aws_open_telemetry_addon) + var.aws_open_telemetry_addon + ) } diff --git a/kubernetes-addons/aws-opentelemetry-eks/aws-otel-eks.tf b/kubernetes-addons/aws-opentelemetry-eks/main.tf similarity index 100% rename from kubernetes-addons/aws-opentelemetry-eks/aws-otel-eks.tf rename to kubernetes-addons/aws-opentelemetry-eks/main.tf diff --git a/kubernetes-addons/cert-manager/locals.tf b/kubernetes-addons/cert-manager/locals.tf index 19c9a5ddc9e88f7fa6e7e578b53fbc237fffb4f0..ef901f9c131837e3ffd8ece989890d2a1d5b6a27 100644 --- a/kubernetes-addons/cert-manager/locals.tf +++ b/kubernetes-addons/cert-manager/locals.tf @@ -1,35 +1,19 @@ locals { + default_helm_values = [templatefile("${path.module}/values.yaml", {})] + default_cert_manager_helm_app = { - name = "cert-manager" - chart = "cert-manager" - repository = "https://charts.jetstack.io" - version = "v1.5.4" - namespace = "kube-system" - timeout = "600" - create_namespace = false - set = [{ - name = "extraArgs[0]" - value = "--enable-certificate-owner-ref=true" - }, { - name = "installCRDs" - value = "true" - }, { - name = "nodeSelector.kubernetes\\.io/os" - value = "linux" - }, { - name = "cainjector.nodeSelector.kubernetes\\.io/os" - value = "linux" - }, { - name = "startupapicheck.nodeSelector.kubernetes\\.io/os" - value = "linux" - }, { - name = "webhook.nodeSelector.kubernetes\\.io/os" - value = "linux" - }] + name = "cert-manager" + chart = "cert-manager" + repository = "https://charts.jetstack.io" + version = "v1.6.1" + namespace = "kube-system" + timeout = "600" + create_namespace = false + set = [] set_sensitive = null lint = false - values = null + values = local.default_helm_values wait = true wait_for_jobs = false description = "Cert Manager Helm chart deployment configuration" @@ -58,7 +42,9 @@ locals { # See ./cert-manager-ca/templates/ca.yaml install_default_ca = true } + cert_manager_helm_app = merge( local.default_cert_manager_helm_app, - var.cert_manager_helm_chart) + var.cert_manager_helm_chart + ) } diff --git a/kubernetes-addons/cert-manager/values.yaml b/kubernetes-addons/cert-manager/values.yaml new file mode 100644 index 0000000000000000000000000000000000000000..c3eb59e1a5145297c03a05e87c090e33317c5933 --- /dev/null +++ b/kubernetes-addons/cert-manager/values.yaml @@ -0,0 +1,4 @@ +extraArgs: + - --enable-certificate-owner-ref=true + +installCRDs: true diff --git a/kubernetes-addons/cluster-autoscaler/locals.tf b/kubernetes-addons/cluster-autoscaler/locals.tf index fd22f2a5748825a62b25af631c6ebf5f08667809..7b0224b3e3313431f190929f3d9cb71c9da4adfa 100644 --- a/kubernetes-addons/cluster-autoscaler/locals.tf +++ b/kubernetes-addons/cluster-autoscaler/locals.tf @@ -1,15 +1,20 @@ data "aws_region" "current" {} locals { + default_helm_values = [templatefile("${path.module}/values.yaml", { + aws_region = data.aws_region.current.name, + cluster_name = var.eks_cluster_id + })] + default_cluster_autoscaler_helm_app = { name = "cluster-autoscaler" chart = "cluster-autoscaler" repository = "https://kubernetes.github.io/autoscaler" - version = "9.10.7" + version = "9.10.8" namespace = "kube-system" timeout = "1200" create_namespace = false - values = null + values = local.default_helm_values lint = false verify = false keyring = "" @@ -35,29 +40,10 @@ locals { replace = false description = "Cluster AutoScaler helm Chart deployment configuration" postrender = "" - set = [ - { - name = "autoDiscovery.clusterName" - value = var.eks_cluster_id - }, - { - name = "extraArgs.aws-use-static-instance-list" - value = "true" - }, - { - name = "replicaCount" - value = "2" - }, - { - name = "awsRegion" - value = data.aws_region.current.id - }, - { - name = "nodeSelector.kubernetes\\.io/os" - value = "linux" - }] - set_sensitive = null + set = [] + set_sensitive = null } + cluster_autoscaler_helm_app = merge( local.default_cluster_autoscaler_helm_app, var.cluster_autoscaler_helm_chart diff --git a/kubernetes-addons/cluster-autoscaler/main.tf b/kubernetes-addons/cluster-autoscaler/main.tf index 46a6ff42039d9280820f6c0a6acf77881d9a2753..bb4a0318f3a51e747a3409c38590f81ba5b79357 100644 --- a/kubernetes-addons/cluster-autoscaler/main.tf +++ b/kubernetes-addons/cluster-autoscaler/main.tf @@ -74,5 +74,4 @@ resource "helm_release" "cluster_autoscaler" { value = each_item.value.value } } - } diff --git a/kubernetes-addons/cluster-autoscaler/values.yaml b/kubernetes-addons/cluster-autoscaler/values.yaml new file mode 100644 index 0000000000000000000000000000000000000000..4559e7b3840d836d2e302c87b91014d03cee0926 --- /dev/null +++ b/kubernetes-addons/cluster-autoscaler/values.yaml @@ -0,0 +1,6 @@ +replicaCount: 2 +awsRegion: ${aws_region} +autoDiscovery: + clusterName: ${cluster_name} +extraArgs: + aws-use-static-instance-list: true \ No newline at end of file diff --git a/kubernetes-addons/lb-ingress-controller/lb-ingress-controller.yaml b/kubernetes-addons/lb-ingress-controller/lb-ingress-controller.yaml deleted file mode 100644 index 7a120d24f0000c6e6ef9e372eb65b32a8f89e55d..0000000000000000000000000000000000000000 --- a/kubernetes-addons/lb-ingress-controller/lb-ingress-controller.yaml +++ /dev/null @@ -1,19 +0,0 @@ -image: - repository: ${image} - tag: ${tag} - pullPolicy: IfNotPresent - -replicaCount: 1 -clusterName: ${clusterName} - -serviceAccount: - # Specifies whether a service account should be created - create: false - name: ${aws_lb_controller_sa} - -rbac: - # Specifies whether rbac resources should be created - create: true - -nodeSelector: - kubernetes.io/os: linux diff --git a/kubernetes-addons/lb-ingress-controller/locals.tf b/kubernetes-addons/lb-ingress-controller/locals.tf deleted file mode 100644 index 9d132b461aca9816a8cc2dacb39753749f5a7f28..0000000000000000000000000000000000000000 --- a/kubernetes-addons/lb-ingress-controller/locals.tf +++ /dev/null @@ -1,67 +0,0 @@ -data "aws_region" "current" {} - -locals { - aws_lb_controller_sa = "aws-load-balancer-controller" - - default_lb_ingress_controller_helm_app = { - name = "aws-lb-ingress-controller" - chart = "aws-load-balancer-controller" - repository = "https://aws.github.io/eks-charts" - version = "1.3.1" - namespace = "kube-system" - timeout = "1200" - create_namespace = false - values = [templatefile("${path.module}/lb-ingress-controller.yaml", { - region = data.aws_region.current.name, - image = "602401143452.dkr.ecr.us-west-2.amazonaws.com/amazon/aws-load-balancer-controller" - tag = "v2.3.0" - clusterName = var.eks_cluster_id - aws_lb_controller_sa = local.aws_lb_controller_sa - replicaCount = "1" - })] - set = [ - { - name = "nodeSelector.kubernetes\\.io/os" - value = "linux" - }, - { - name = "serviceAccount.create" - value = "false" - }, - { - name = "serviceAccount.name" - value = local.aws_lb_controller_sa - } - ] - set_sensitive = null - lint = true - wait = true - wait_for_jobs = false - description = "aws-lb-ingress-controller Helm Chart for ingress resources" - verify = false - keyring = "" - repository_key_file = "" - repository_cert_file = "" - repository_ca_file = "" - repository_username = "" - repository_password = "" - disable_webhooks = false - reuse_values = false - reset_values = false - force_update = false - recreate_pods = false - cleanup_on_fail = false - max_history = 0 - atomic = false - skip_crds = false - render_subchart_notes = true - disable_openapi_validation = false - dependency_update = false - replace = false - postrender = "" - - } - lb_ingress_controller_helm_app = merge( - local.default_lb_ingress_controller_helm_app, - var.lb_ingress_controller_helm_app) -} diff --git a/kubernetes-addons/metrics-server/locals.tf b/kubernetes-addons/metrics-server/locals.tf index b0b61965a7c237cf1d9f4c892b7970f852abff65..fe67d2a4892599a78678dda9fd96798422662ff4 100644 --- a/kubernetes-addons/metrics-server/locals.tf +++ b/kubernetes-addons/metrics-server/locals.tf @@ -1,17 +1,14 @@ locals { default_metric_server_helm_app = { - name = "metrics-server" - chart = "metrics-server" - repository = "https://kubernetes-sigs.github.io/metrics-server/" - version = "3.5.0" - namespace = "kube-system" - timeout = "1200" - create_namespace = false - set = [{ - name = "nodeSelector.kubernetes\\.io/os" - value = "linux" - }] + name = "metrics-server" + chart = "metrics-server" + repository = "https://kubernetes-sigs.github.io/metrics-server/" + version = "3.5.0" + namespace = "kube-system" + timeout = "1200" + create_namespace = false + set = [] set_sensitive = null lint = false values = null @@ -40,7 +37,9 @@ locals { replace = false postrender = "" } + metric_server_helm_app = merge( local.default_metric_server_helm_app, - var.metrics_server_helm_chart) + var.metrics_server_helm_chart + ) } diff --git a/kubernetes-addons/nginx-ingress/locals.tf b/kubernetes-addons/nginx-ingress/locals.tf index a154474c8a18da26055d3d6d05bca3da1e809c88..084bf2cdd04dd4bc98320e7c6a18fda30ad0d19f 100644 --- a/kubernetes-addons/nginx-ingress/locals.tf +++ b/kubernetes-addons/nginx-ingress/locals.tf @@ -1,18 +1,17 @@ locals { + default_helm_values = [templatefile("${path.module}/values.yaml", {})] + default_nginx_helm_app = { - name = "ingress-nginx" - chart = "ingress-nginx" - repository = "https://kubernetes.github.io/ingress-nginx" - version = "3.33.0" - namespace = "kube-system" - timeout = "1200" - create_namespace = false - values = local.default_nginx_helm_values - set = [{ - name = "nodeSelector.kubernetes\\.io/os" - value = "linux" - }] + name = "ingress-nginx" + chart = "ingress-nginx" + repository = "https://kubernetes.github.io/ingress-nginx" + version = "4.0.6" + namespace = "kube-system" + timeout = "1200" + create_namespace = false + values = local.default_helm_values + set = [] set_sensitive = null lint = false verify = false @@ -40,9 +39,9 @@ locals { description = "The NGINX HelmChart Ingress Controller deployment configuration" postrender = "" } + nginx_helm_app = merge( local.default_nginx_helm_app, var.nginx_helm_chart ) - default_nginx_helm_values = [templatefile("${path.module}/nginx-default-values.yaml", {})] } diff --git a/kubernetes-addons/nginx-ingress/nginx-default-values.yaml b/kubernetes-addons/nginx-ingress/values.yaml similarity index 61% rename from kubernetes-addons/nginx-ingress/nginx-default-values.yaml rename to kubernetes-addons/nginx-ingress/values.yaml index 2b7bb8cd696600825469e2c21cfbc0803c666075..65dad2a3f605e27681b87eb3da44197c6e85d95c 100644 --- a/kubernetes-addons/nginx-ingress/nginx-default-values.yaml +++ b/kubernetes-addons/nginx-ingress/values.yaml @@ -1,23 +1,8 @@ controller: - image: - registry: k8s.gcr.io - image: ingress-nginx/controller - tag: v0.47.0 - - containerName: nginx-controller - service: - enabled: true annotations: service.beta.kubernetes.io/aws-load-balancer-backend-protocol: tcp service.beta.kubernetes.io/aws-load-balancer-connection-idle-timeout: '60' service.beta.kubernetes.io/aws-load-balancer-cross-zone-load-balancing-enabled: 'true' service.beta.kubernetes.io/aws-load-balancer-internal: "false" service.beta.kubernetes.io/aws-load-balancer-type: "nlb" - - # Configures the ports the nginx-controller listens on - containerPort: - http: 80 - https: 443 - -kind: deployment diff --git a/kubernetes-addons/prometheus/locals.tf b/kubernetes-addons/prometheus/locals.tf index e2569e8cd0c2180f31e638d5153397207506ff41..bc365ccd3a846a51775cd15138e8a4fe4a133b74 100644 --- a/kubernetes-addons/prometheus/locals.tf +++ b/kubernetes-addons/prometheus/locals.tf @@ -1,49 +1,14 @@ data "aws_region" "current" {} locals { - aws_managed_prometheus = [{ - name = "serviceAccounts.server.name" - value = var.service_account_amp_ingest_name - }, - { - name = "serviceAccounts.server.annotations.eks\\.amazonaws\\.com/role-arn" - value = var.amp_ingest_role_arn - }, - { - name = "server.remoteWrite[0].url" - value = "https://aps-workspaces.${data.aws_region.current.id}.amazonaws.com/workspaces/${var.amp_workspace_id}/api/v1/remote_write" - }, - { - name = "server.remoteWrite[0].sigv4.region" - value = data.aws_region.current.id - }] + amp_workspace_url = "https://aps-workspaces.${data.aws_region.current.id}.amazonaws.com/workspaces/${var.amp_workspace_id}/api/v1/remote_write" - default_set_values = [{ - name = "nodeSelector.kubernetes\\.io/os" - value = "linux" - }, - { - name = "kube-state-metrics.nodeSelector.kubernetes\\.io/os" - value = "linux" - }, - { - name = "nodeExporter.nodeSelector.kubernetes\\.io/os" - value = "linux" - }, - { - name = "pushgateway.nodeSelector.kubernetes\\.io/os" - value = "linux" - }, - { - name = "alertmanager.nodeSelector.kubernetes\\.io/os" - value = "linux" - }, - { - name = "server.nodeSelector.kubernetes\\.io/os" - value = "linux" - }, - ] - set_values = var.aws_managed_prometheus_enable == true ? concat(local.aws_managed_prometheus, local.default_set_values) : local.default_set_values + default_helm_values = [templatefile("${path.module}/values.yaml", { + aws_region = data.aws_region.current.name, + ingest_role_arn = var.amp_ingest_role_arn + service_account_name = var.service_account_amp_ingest_name + amp_workspace_url = local.amp_workspace_url + })] default_prometheus_helm_app = { name = "prometheus" @@ -55,11 +20,11 @@ locals { create_namespace = true description = "Prometheus helm Chart deployment configuration" lint = false - values = null + values = local.default_helm_values wait = true wait_for_jobs = false verify = false - set = local.set_values + set = [] set_sensitive = null keyring = "" repository_key_file = "" @@ -81,9 +46,10 @@ locals { dependency_update = false replace = false postrender = "" - } + prometheus_helm_app = merge( local.default_prometheus_helm_app, - var.prometheus_helm_chart) + var.prometheus_helm_chart + ) } diff --git a/kubernetes-addons/prometheus/values.yaml b/kubernetes-addons/prometheus/values.yaml new file mode 100644 index 0000000000000000000000000000000000000000..066e4e507133123611a89b2ba6cdcded621d22e8 --- /dev/null +++ b/kubernetes-addons/prometheus/values.yaml @@ -0,0 +1,15 @@ +serviceAccounts: + server: + name: ${service_account_name} + annotations: + eks.amazonaws.com/role-arn: ${ingest_role_arn} +server: + remoteWrite: + - url: ${amp_workspace_url} + sigv4: + region: ${aws_region} + queue_config: + max_samples_per_send: 1000 + max_shards: 200 + capacity: 2500 + diff --git a/kubernetes-addons/spark-k8s-operator/locals.tf b/kubernetes-addons/spark-k8s-operator/locals.tf index 437889e07e304df0e0a0f96da51af8424c7b8de0..47666de760030c3fb7bed0c76379d76bd6ac4bae 100644 --- a/kubernetes-addons/spark-k8s-operator/locals.tf +++ b/kubernetes-addons/spark-k8s-operator/locals.tf @@ -1,18 +1,15 @@ locals { default_spark_k8s_operator_helm_app = { - name = "spark-operator" - chart = "spark-operator" - repository = "https://googlecloudplatform.github.io/spark-on-k8s-operator" - version = "1.1.6" - namespace = "spark-k8s-operator" - timeout = "1200" - create_namespace = true - values = local.default_spark_k8s_operator_helm_values - set = [{ - name = "nodeSelector.kubernetes\\.io/os" - value = "linux" - }] + name = "spark-operator" + chart = "spark-operator" + repository = "https://googlecloudplatform.github.io/spark-on-k8s-operator" + version = "1.1.6" + namespace = "spark-k8s-operator" + timeout = "1200" + create_namespace = true + values = null + set = [] set_sensitive = null lint = false verify = false @@ -40,9 +37,9 @@ locals { description = "The spark_k8s_operator HelmChart Ingress Controller deployment configuration" postrender = "" } + spark_k8s_operator_helm_app = merge( local.default_spark_k8s_operator_helm_app, var.spark_on_k8s_operator_helm_chart ) - default_spark_k8s_operator_helm_values = [templatefile("${path.module}/spark-k8s-operator-values.yaml", {})] } diff --git a/kubernetes-addons/spark-k8s-operator/main.tf b/kubernetes-addons/spark-k8s-operator/main.tf index 21c726df64b1be0cd6d6a26312250c1153967fb5..5e504bab7a9beb935d9fa9439b5b1e1377ac2716 100644 --- a/kubernetes-addons/spark-k8s-operator/main.tf +++ b/kubernetes-addons/spark-k8s-operator/main.tf @@ -73,5 +73,4 @@ resource "helm_release" "spark_k8s_operator" { value = each_item.value.value } } - } diff --git a/kubernetes-addons/spark-k8s-operator/spark-k8s-operator-values.yaml b/kubernetes-addons/spark-k8s-operator/spark-k8s-operator-values.yaml deleted file mode 100644 index 1692f2bd36907b1cfac9164b80aac0ff76c3b979..0000000000000000000000000000000000000000 --- a/kubernetes-addons/spark-k8s-operator/spark-k8s-operator-values.yaml +++ /dev/null @@ -1,160 +0,0 @@ -# Default values for spark-operator. -# This is a YAML-formatted file. -# Declare variables to be passed into your templates. - -# replicaCount -- Desired number of pods, leaderElection will be enabled -# if this is greater than 1 -replicaCount: 1 - -image: - # -- Image repository - repository: gcr.io/spark-operator/spark-operator - # -- Image pull policy - pullPolicy: IfNotPresent - # -- Overrides the image tag whose default is the chart appVersion. - tag: "latest" - -# -- Image pull secrets -imagePullSecrets: [] - -# -- String to partially override `spark-operator.fullname` template (will maintain the release name) -nameOverride: "" - -# -- String to override release name -fullnameOverride: "" - -rbac: - # -- **DEPRECATED** use `createRole` and `createClusterRole` - create: false - # -- Create and use RBAC `Role` resources - createRole: true - # -- Create and use RBAC `ClusterRole` resources - createClusterRole: true - -serviceAccounts: - spark: - # -- Create a service account for spark apps - create: true - # -- Optional name for the spark service account - name: "" - sparkoperator: - # -- Create a service account for the operator - create: true - # -- Optional name for the operator service account - name: "" - -# -- Set this if running spark jobs in a different namespace than the operator -sparkJobNamespace: "" - -# -- Operator concurrency, higher values might increase memory usage -controllerThreads: 10 - -# -- Operator resync interval. Note that the operator will respond to events (e.g. create, update) -# unrelated to this setting -resyncInterval: 30 - -uiService: - # -- Enable UI service creation for Spark application - enable: true - -# -- Ingress URL format. -# Requires the UI service to be enabled by setting `uiService.enable` to true. -ingressUrlFormat: "" - -# -- Set higher levels for more verbose logging -logLevel: 2 - -# podSecurityContext -- Pod security context -podSecurityContext: {} - -# securityContext -- Operator container security context -securityContext: {} - -webhook: - # -- Enable webhook server - enable: false - # -- Webhook service port - port: 8080 - # -- The webhook server will only operate on namespaces with this label, specified in the form key1=value1,key2=value2. - # Empty string (default) will operate on all namespaces - namespaceSelector: "" - # -- The annotations applied to the cleanup job, required for helm lifecycle hooks - cleanupAnnotations: - "helm.sh/hook": pre-delete, pre-upgrade - "helm.sh/hook-delete-policy": hook-succeeded - -metrics: - # -- Enable prometheus metric scraping - enable: true - # -- Metrics port - port: 10254 - # -- Metrics port name - portName: metrics - # -- Metrics serving endpoint - endpoint: /metrics - # -- Metric prefix, will be added to all exported metrics - prefix: "" - -# -- Prometheus pod monitor for operator's pod. -podMonitor: - # -- If enabled, a pod monitor for operator's pod will be submitted. Note that prometheus metrics should be enabled as well. - enable: false - # -- Pod monitor labels - labels: {} - # -- The label to use to retrieve the job name from - jobLabel: spark-operator-podmonitor - # -- Prometheus metrics endpoint properties. `metrics.portName` will be used as a port - podMetricsEndpoint: - scheme: http - interval: 5s - -# nodeSelector -- Node labels for pod assignment -nodeSelector: {} - -# tolerations -- List of node taints to tolerate -tolerations: [] - -# affinity -- Affinity for pod assignment -affinity: {} - -# podAnnotations -- Additional annotations to add to the pod -podAnnotations: {} - -# podLabels -- Additional labels to add to the pod -podLabels: {} - -# resources -- Pod resource requests and limits -# Note, that each job submission will spawn a JVM within the Spark Operator Pod using "/usr/local/openjdk-11/bin/java -Xmx128m". -# Kubernetes may kill these Java processes at will to enforce resource limits. When that happens, you will see the following error: -# 'failed to run spark-submit for SparkApplication [...]: signal: killed' - when this happens, you may want to increase memory limits. -resources: {} - # limits: - # cpu: 100m - # memory: 300Mi - # requests: - # cpu: 100m -# memory: 300Mi - -batchScheduler: - # -- Enable batch scheduler for spark jobs scheduling. If enabled, users can specify batch scheduler name in spark application - enable: false - -resourceQuotaEnforcement: - # -- Whether to enable the ResourceQuota enforcement for SparkApplication resources. - # Requires the webhook to be enabled by setting `webhook.enable` to true. - # Ref: https://github.com/GoogleCloudPlatform/spark-on-k8s-operator/blob/master/docs/user-guide.md#enabling-resource-quota-enforcement. - enable: false - -leaderElection: - # -- Leader election lock name. - # Ref: https://github.com/GoogleCloudPlatform/spark-on-k8s-operator/blob/master/docs/user-guide.md#enabling-leader-election-for-high-availability. - lockName: "spark-operator-lock" - # -- Optionally store the lock in another namespace. Defaults to operator's namespace - lockNamespace: "" - -istio: - # -- When using `istio`, spark jobs need to run without a sidecar to properly terminate - enabled: false - -# labelSelectorFilter -- A comma-separated list of key=value, or key labels to filter resources during watch and list based on the specified labels. -labelSelectorFilter: "" diff --git a/kubernetes-addons/traefik-ingress/locals.tf b/kubernetes-addons/traefik-ingress/locals.tf index 3ba5192c53450a8f094648fc5fe10318bf9a1fea..0d70f7437de3bd21b24f68068afba0455ab21841 100644 --- a/kubernetes-addons/traefik-ingress/locals.tf +++ b/kubernetes-addons/traefik-ingress/locals.tf @@ -1,18 +1,15 @@ locals { default_traefik_helm_app = { - name = "traefik" - chart = "traefik" - repository = "https://helm.traefik.io/traefik" - version = "10.0.0" - namespace = "kube-system" - timeout = "1200" - create_namespace = false - values = null - set = [{ - name = "nodeSelector.kubernetes\\.io/os" - value = "linux" - }] + name = "traefik" + chart = "traefik" + repository = "https://helm.traefik.io/traefik" + version = "10.0.0" + namespace = "kube-system" + timeout = "1200" + create_namespace = false + values = null + set = [] set_sensitive = null lint = false verify = false @@ -40,6 +37,7 @@ locals { description = "The Traefik HelmChart is focused on Traefik deployment configuration" postrender = "" } + traefik_helm_app = merge( local.default_traefik_helm_app, var.traefik_helm_chart diff --git a/kubernetes-addons/windows-vpc-controllers/locals.tf b/kubernetes-addons/windows-vpc-controllers/locals.tf index 602b203ee4634b799152f9426fd45d9c5173b787..8e93b71e15a679c096facae1df2925cf9c8b0369 100644 --- a/kubernetes-addons/windows-vpc-controllers/locals.tf +++ b/kubernetes-addons/windows-vpc-controllers/locals.tf @@ -1,26 +1,18 @@ locals { + default_helm_values = [templatefile("${path.module}/values.yaml", {})] + default_windows_vpc_controllers_helm_app = { - name = "windows-vpc-controllers" - chart = "windows-vpc-controllers" - repository = "https://charts.jetstack.io" - version = "v1.5.4" - namespace = "kube-system" - timeout = "600" - create_namespace = false - set = [{ - name = "resourceController.containerCommand.args[0]" - value = "-stderrthreshold=info" - }, { - name = "admissionWebhook.certificate.key.rotationPolicy" - value = "Always" - }, { - name = "admissionWebhook.certificate.issuer.name" - value = "cert-manager-ca" - }] + name = "windows-vpc-controllers" + chart = "windows-vpc-controllers" + repository = "https://charts.jetstack.io" + version = "v1.5.4" + namespace = "kube-system" + timeout = "600" + create_namespace = false set_sensitive = null lint = false - values = null + values = local.default_helm_values wait = true wait_for_jobs = false description = "Cert Manager Helm chart deployment configuration" @@ -46,7 +38,9 @@ locals { replace = false postrender = "" } + windows_vpc_controllers_helm_app = merge( local.default_windows_vpc_controllers_helm_app, - var.windows_vpc_controllers_helm_chart) + var.windows_vpc_controllers_helm_chart + ) } diff --git a/kubernetes-addons/windows-vpc-controllers/values.yaml b/kubernetes-addons/windows-vpc-controllers/values.yaml new file mode 100644 index 0000000000000000000000000000000000000000..7e9405a468637b8d307f5bec947e53c86e1bd24d --- /dev/null +++ b/kubernetes-addons/windows-vpc-controllers/values.yaml @@ -0,0 +1,10 @@ +resourceController: + containerCommand: + args: + - -stderrthreshold=info +admissionWebhook: + certificate: + key: + rotationPolicy: Always + issuer: + name: cert-manager-ca \ No newline at end of file