Code development platform for open source projects from the European Union institutions

Skip to content
Snippets Groups Projects
Unverified Commit 9b661429 authored by Kevin Coleman's avatar Kevin Coleman Committed by GitHub
Browse files

Merge pull request #71 from aws-samples/feature/84-audit-add-on-values

Feature/84 audit add on values
parents aca96ab2 65ab0219
No related branches found
No related tags found
No related merge requests found
Showing
with 164 additions and 417 deletions
...@@ -144,6 +144,7 @@ SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ...@@ -144,6 +144,7 @@ SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
| <a name="module_aws_eks_fargate_profiles"></a> [aws\_eks\_fargate\_profiles](#module\_aws\_eks\_fargate\_profiles) | ./modules/aws-eks-fargate-profiles | n/a | | <a name="module_aws_eks_fargate_profiles"></a> [aws\_eks\_fargate\_profiles](#module\_aws\_eks\_fargate\_profiles) | ./modules/aws-eks-fargate-profiles | n/a |
| <a name="module_aws_eks_managed_node_groups"></a> [aws\_eks\_managed\_node\_groups](#module\_aws\_eks\_managed\_node\_groups) | ./modules/aws-eks-managed-node-groups | n/a | | <a name="module_aws_eks_managed_node_groups"></a> [aws\_eks\_managed\_node\_groups](#module\_aws\_eks\_managed\_node\_groups) | ./modules/aws-eks-managed-node-groups | n/a |
| <a name="module_aws_eks_self_managed_node_groups"></a> [aws\_eks\_self\_managed\_node\_groups](#module\_aws\_eks\_self\_managed\_node\_groups) | ./modules/aws-eks-self-managed-node-groups | n/a | | <a name="module_aws_eks_self_managed_node_groups"></a> [aws\_eks\_self\_managed\_node\_groups](#module\_aws\_eks\_self\_managed\_node\_groups) | ./modules/aws-eks-self-managed-node-groups | n/a |
| <a name="module_aws_load_balancer_controller"></a> [aws\_load\_balancer\_controller](#module\_aws\_load\_balancer\_controller) | ./kubernetes-addons/aws-load-balancer-controller | n/a |
| <a name="module_aws_managed_prometheus"></a> [aws\_managed\_prometheus](#module\_aws\_managed\_prometheus) | ./modules/aws-managed-prometheus | n/a | | <a name="module_aws_managed_prometheus"></a> [aws\_managed\_prometheus](#module\_aws\_managed\_prometheus) | ./modules/aws-managed-prometheus | n/a |
| <a name="module_aws_opentelemetry_collector"></a> [aws\_opentelemetry\_collector](#module\_aws\_opentelemetry\_collector) | ./kubernetes-addons/aws-opentelemetry-eks | n/a | | <a name="module_aws_opentelemetry_collector"></a> [aws\_opentelemetry\_collector](#module\_aws\_opentelemetry\_collector) | ./kubernetes-addons/aws-opentelemetry-eks | n/a |
| <a name="module_cert_manager"></a> [cert\_manager](#module\_cert\_manager) | ./kubernetes-addons/cert-manager | n/a | | <a name="module_cert_manager"></a> [cert\_manager](#module\_cert\_manager) | ./kubernetes-addons/cert-manager | n/a |
...@@ -152,7 +153,6 @@ SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ...@@ -152,7 +153,6 @@ SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
| <a name="module_emr_on_eks"></a> [emr\_on\_eks](#module\_emr\_on\_eks) | ./modules/emr-on-eks | n/a | | <a name="module_emr_on_eks"></a> [emr\_on\_eks](#module\_emr\_on\_eks) | ./modules/emr-on-eks | n/a |
| <a name="module_fargate_fluentbit"></a> [fargate\_fluentbit](#module\_fargate\_fluentbit) | ./kubernetes-addons/fargate-fluentbit | n/a | | <a name="module_fargate_fluentbit"></a> [fargate\_fluentbit](#module\_fargate\_fluentbit) | ./kubernetes-addons/fargate-fluentbit | n/a |
| <a name="module_keda"></a> [keda](#module\_keda) | ./kubernetes-addons/keda | n/a | | <a name="module_keda"></a> [keda](#module\_keda) | ./kubernetes-addons/keda | n/a |
| <a name="module_lb_ingress_controller"></a> [lb\_ingress\_controller](#module\_lb\_ingress\_controller) | ./kubernetes-addons/lb-ingress-controller | n/a |
| <a name="module_metrics_server"></a> [metrics\_server](#module\_metrics\_server) | ./kubernetes-addons/metrics-server | n/a | | <a name="module_metrics_server"></a> [metrics\_server](#module\_metrics\_server) | ./kubernetes-addons/metrics-server | n/a |
| <a name="module_nginx_ingress"></a> [nginx\_ingress](#module\_nginx\_ingress) | ./kubernetes-addons/nginx-ingress | n/a | | <a name="module_nginx_ingress"></a> [nginx\_ingress](#module\_nginx\_ingress) | ./kubernetes-addons/nginx-ingress | n/a |
| <a name="module_prometheus"></a> [prometheus](#module\_prometheus) | ./kubernetes-addons/prometheus | n/a | | <a name="module_prometheus"></a> [prometheus](#module\_prometheus) | ./kubernetes-addons/prometheus | n/a |
......
...@@ -101,7 +101,6 @@ module "aws_eks_fargate_profiles" { ...@@ -101,7 +101,6 @@ module "aws_eks_fargate_profiles" {
# AWS EKS Add-ons (VPC CNI, CoreDNS, KubeProxy ) # AWS EKS Add-ons (VPC CNI, CoreDNS, KubeProxy )
# --------------------------------------------------------------------------------------------------------------------- # ---------------------------------------------------------------------------------------------------------------------
module "aws_eks_addon" { module "aws_eks_addon" {
count = var.create_eks && var.enable_managed_nodegroups || var.create_eks && var.enable_self_managed_nodegroups || var.create_eks && var.enable_fargate ? 1 : 0 count = var.create_eks && var.enable_managed_nodegroups || var.create_eks && var.enable_self_managed_nodegroups || var.create_eks && var.enable_fargate ? 1 : 0
source = "./modules/aws-eks-addon" source = "./modules/aws-eks-addon"
...@@ -116,7 +115,5 @@ module "aws_eks_addon" { ...@@ -116,7 +115,5 @@ module "aws_eks_addon" {
kube_proxy_addon_version = var.kube_proxy_addon_version kube_proxy_addon_version = var.kube_proxy_addon_version
tags = module.eks_tags.tags tags = module.eks_tags.tags
depends_on = [ depends_on = [module.aws_eks]
module.aws_eks
]
} }
...@@ -45,6 +45,7 @@ module "prometheus" { ...@@ -45,6 +45,7 @@ module "prometheus" {
count = var.create_eks && var.prometheus_enable ? 1 : 0 count = var.create_eks && var.prometheus_enable ? 1 : 0
source = "./kubernetes-addons/prometheus" source = "./kubernetes-addons/prometheus"
prometheus_helm_chart = var.prometheus_helm_chart prometheus_helm_chart = var.prometheus_helm_chart
#AWS Managed Prometheus Workspace #AWS Managed Prometheus Workspace
aws_managed_prometheus_enable = var.aws_managed_prometheus_enable aws_managed_prometheus_enable = var.aws_managed_prometheus_enable
amp_workspace_id = var.aws_managed_prometheus_enable ? module.aws_managed_prometheus[0].amp_workspace_id : "" amp_workspace_id = var.aws_managed_prometheus_enable ? module.aws_managed_prometheus[0].amp_workspace_id : ""
...@@ -54,9 +55,9 @@ module "prometheus" { ...@@ -54,9 +55,9 @@ module "prometheus" {
depends_on = [module.aws_eks] depends_on = [module.aws_eks]
} }
module "lb_ingress_controller" { module "aws_load_balancer_controller" {
count = var.create_eks && var.aws_lb_ingress_controller_enable ? 1 : 0 count = var.create_eks && var.aws_lb_ingress_controller_enable ? 1 : 0
source = "./kubernetes-addons/lb-ingress-controller" source = "./kubernetes-addons/aws-load-balancer-controller"
eks_cluster_id = module.aws_eks.cluster_id eks_cluster_id = module.aws_eks.cluster_id
lb_ingress_controller_helm_app = var.aws_lb_ingress_controller_helm_app lb_ingress_controller_helm_app = var.aws_lb_ingress_controller_helm_app
eks_oidc_issuer_url = module.aws_eks.cluster_oidc_issuer_url eks_oidc_issuer_url = module.aws_eks.cluster_oidc_issuer_url
......
agones:
featureGates: ""
metrics:
prometheusEnabled: true
prometheusServiceDiscovery: true
stackdriverEnabled: false
stackdriverProjectID: ""
stackdriverLabels: ""
rbacEnabled: true
registerServiceAccounts: true
registerWebhooks: true
registerApiService: true
crds:
install: true
cleanupOnDelete: true
serviceaccount:
controller: agones-controller
sdk: agones-sdk
allocator: agones-allocator
createPriorityClass: true
priorityClassName: agones-system
controller:
resources: {}
# requests:
# cpu: 1
# memory: 256Mi
nodeSelector:
kubernetes.io/os: linux
annotations: {}
tolerations:
- key: "agones.dev/agones-system"
operator: "Equal"
value: "true"
effect: "NoExecute"
affinity:
nodeAffinity:
preferredDuringSchedulingIgnoredDuringExecution:
- weight: 1
preference:
matchExpressions:
- key: agones.dev/agones-system
operator: Exists
generateTLS: true
safeToEvict: false
persistentLogs: true
persistentLogsSizeLimitMB: 10000
logLevel: info
numWorkers: 100
apiServerQPS: 400
apiServerQPSBurst: 500
http:
port: 8080
healthCheck:
initialDelaySeconds: 3
periodSeconds: 3
failureThreshold: 3
timeoutSeconds: 1
ping:
install: true
resources: {}
# requests:
# cpu: 1
# memory: 256Mi
nodeSelector:
kubernetes.io/os: linux
annotations: {}
tolerations:
- key: "agones.dev/agones-system"
operator: "Equal"
value: "true"
effect: "NoExecute"
affinity:
nodeAffinity:
preferredDuringSchedulingIgnoredDuringExecution:
- weight: 1
preference:
matchExpressions:
- key: agones.dev/agones-system
operator: Exists
replicas: 2
http:
expose: true
response: ok
port: 80
serviceType: LoadBalancer
loadBalancerIP: ""
loadBalancerSourceRanges: []
annotations:
service.beta.kubernetes.io/aws-load-balancer-internal: "false"
service.beta.kubernetes.io/aws-load-balancer-type: "nlb"
udp:
expose: true
rateLimit: 20
port: 50000
serviceType: LoadBalancer
loadBalancerIP: ""
loadBalancerSourceRanges: []
annotations:
service.beta.kubernetes.io/aws-load-balancer-internal: "false"
service.beta.kubernetes.io/aws-load-balancer-type: "nlb"
healthCheck:
initialDelaySeconds: 3
periodSeconds: 3
failureThreshold: 3
timeoutSeconds: 5
allocator:
install: true
apiServerQPS: 400
apiServerQPSBurst: 500
logLevel: info
annotations: {}
resources: {}
# requests:
# cpu: 1
# memory: 256Mi
nodeSelector:
kubernetes.io/os: linux
healthCheck:
initialDelaySeconds: 3
periodSeconds: 3
failureThreshold: 3
timeoutSeconds: 1
tolerations:
- key: "agones.dev/agones-system"
operator: "Equal"
value: "true"
effect: "NoExecute"
affinity:
nodeAffinity:
preferredDuringSchedulingIgnoredDuringExecution:
- weight: 1
preference:
matchExpressions:
- key: agones.dev/agones-system
operator: Exists
replicas: 3
http:
port: 443
serviceType: LoadBalancer
loadBalancerIP: ""
loadBalancerSourceRanges: []
annotations:
service.beta.kubernetes.io/aws-load-balancer-internal: "false"
service.beta.kubernetes.io/aws-load-balancer-type: "nlb"
generateTLS: true
generateClientTLS: true
disableMTLS: false
disableTLS: false
remoteAllocationTimeout: 10s
totalRemoteAllocationTimeout: 30s
image:
registry: gcr.io/agones-images
tag: 1.15.0
controller:
name: agones-controller
pullPolicy: IfNotPresent
sdk:
name: agones-sdk
cpuRequest: 30m
cpuLimit: 0
memoryRequest: 0
memoryLimit: 0
alwaysPull: false
ping:
name: agones-ping
pullPolicy: IfNotPresent
allocator:
name: agones-allocator
pullPolicy: IfNotPresent
gameservers:
namespaces:
- default
minPort: 7000
maxPort: 8000
podPreserveUnknownFields: false
helm:
installTests: false
#!/usr/bin/env bash
echo "Generating certs..."
echo "Email should be: emailAddress=agones-discuss@googlegroups.com"
echo "Common Name should be: agones-controller-service.agones-system.svc"
openssl genrsa -out server.key 2048
openssl req -new -x509 -sha256 -key server.key -out server.crt -days 3650
echo "caBundle:"
base64 -w 0 server.crt
echo "done"
data "aws_region" "current" {} data "aws_region" "current" {}
locals { locals {
default_helm_values = [templatefile("${path.module}/values.yaml", {})]
default_agones_helm_app = { default_agones_helm_app = {
name = "agones" name = "agones"
chart = "agones" chart = "agones"
repository = "https://agones.dev/chart/stable" repository = "https://agones.dev/chart/stable"
version = "1.15.0" version = "1.18.0"
namespace = "agones-system" namespace = "agones-system"
timeout = "1200" timeout = "1200"
create_namespace = true create_namespace = true
description = "Agones Gaming Server Helm Chart deployment configuration" description = "Agones Gaming Server Helm Chart deployment configuration"
lint = false lint = false
values = local.default_agones_helm_values values = local.default_helm_values
wait = true wait = true
wait_for_jobs = false wait_for_jobs = false
verify = false verify = false
set = null set = []
set_sensitive = null set_sensitive = null
keyring = "" keyring = ""
repository_key_file = "" repository_key_file = ""
...@@ -41,11 +42,9 @@ locals { ...@@ -41,11 +42,9 @@ locals {
gameserver_minport = 7000 gameserver_minport = 7000
gameserver_maxport = 8000 gameserver_maxport = 8000
} }
agones_helm_app = merge( agones_helm_app = merge(
local.default_agones_helm_app, local.default_agones_helm_app,
var.agones_helm_chart var.agones_helm_chart
) )
default_agones_helm_values = [templatefile("${path.module}/agones-values.yaml", {})]
} }
agones:
ping:
http:
annotations:
service.beta.kubernetes.io/aws-load-balancer-internal: "false"
service.beta.kubernetes.io/aws-load-balancer-type: "nlb"
udp:
annotations:
service.beta.kubernetes.io/aws-load-balancer-internal: "false"
service.beta.kubernetes.io/aws-load-balancer-type: "nlb"
allocator:
http:
annotations:
service.beta.kubernetes.io/aws-load-balancer-internal: "false"
service.beta.kubernetes.io/aws-load-balancer-type: "nlb"
\ No newline at end of file
locals { locals {
default_helm_values = [templatefile("${path.module}/values.yaml", {})]
default_argocd_helm_app = { default_argocd_helm_app = {
name = "argo-cd" name = "argo-cd"
chart = "argo-cd" chart = "argo-cd"
repository = "https://argoproj.github.io/argo-helm" repository = "https://argoproj.github.io/argo-helm"
version = "3.26.3" version = "3.26.8"
namespace = "argocd" namespace = "argocd"
timeout = "1200" timeout = "1200"
create_namespace = true create_namespace = true
values = local.default_argocd_helm_values values = local.default_helm_values
set = [{ set = []
name = "nodeSelector.kubernetes\\.io/os"
value = "linux"
}]
set_sensitive = null set_sensitive = null
lint = false lint = false
verify = false verify = false
...@@ -40,11 +39,11 @@ locals { ...@@ -40,11 +39,11 @@ locals {
description = "The argocd HelmChart Ingress Controller deployment configuration" description = "The argocd HelmChart Ingress Controller deployment configuration"
postrender = "" postrender = ""
} }
argocd_helm_app = merge( argocd_helm_app = merge(
local.default_argocd_helm_app, local.default_argocd_helm_app,
var.argocd_helm_chart var.argocd_helm_chart
) )
default_argocd_helm_values = [templatefile("${path.module}/argocd-values.yaml", {})]
# Global Values for ArgoCD App of Apps. # Global Values for ArgoCD App of Apps.
global_values = { global_values = {
......
global:
## Override the deployment namespace
namespaceOverride: logging
image:
repository: amazon/aws-for-fluent-bit
tag: 2.1.0
pullPolicy: IfNotPresent
imagePullSecrets: []
nameOverride: ""
fullnameOverride: ""
service:
parsersFiles:
- /fluent-bit/parsers/parsers.conf
# extraParsers: |
# [PARSER]
# Name logfmt
# Format logfmt
input:
tag: "kube.*"
path: "/var/log/containers/*.log"
db: "/var/log/flb_kube.db"
parser: docker
dockerMode: "On"
memBufLimit: 5MB
skipLongLines: "On"
refreshInterval: 10
# extraInputs: |
# [INPUT]
# Name winlog
# Channels Setup,Windows PowerShell
# Interval_Sec 1
# DB winlog.sqlite
filter:
match: "kube.*"
kubeURL: "https://kubernetes.default.svc.cluster.local:443"
mergeLog: "On"
mergeLogKey: "data"
keepLog: "On"
k8sLoggingParser: "On"
k8sLoggingExclude: "On"
# extraFilters: |
# [FILTER]
# Name grep
# Match *
# Exclude log lvl=debug*
cloudWatch:
enabled: true
match: "*"
region: ${region}
logGroupName: ${aws_for_fluent_bit_cw_log_group}
logStreamName:
logStreamPrefix: "fluentbit-"
logKey:
logFormat:
roleArn:
autoCreateGroup: true
endpoint:
credentialsEndpoint: {}
firehose:
enabled: false
match: "*"
region: ${region}
deliveryStream: "my-stream"
dataKeys:
roleArn:
endpoint:
timeKey:
kinesis:
enabled: false
match: "*"
region: ${region}
stream: "my-kinesis-stream-name"
partitionKey: "container_id"
appendNewline:
dataKeys:
roleArn:
timeKey:
timeKeyFormat:
# extraOutputs: |
# [OUTPUT]
# Name file
# Format template
# Template {time} used={Mem.used} free={Mem.free} total={Mem.total}
serviceAccount:
create: true
annotations: {}
name:
resources:
limits:
memory: 500Mi
requests:
cpu: 500m
memory: 500Mi
## Assign a PriorityClassName to pods if set
# priorityClassName: system-node-critical
updateStrategy:
type: RollingUpdate
nodeSelector:
kubernetes.io/os: linux
tolerations: []
affinity: {}
annotations: {}
# iam.amazonaws.com/role: arn:aws:iam::123456789012:role/role-for-fluent-bit
env: []
volumes:
- name: varlog
hostPath:
path: /var/log
- name: varlibdockercontainers
hostPath:
path: /var/lib/docker/containers
volumeMounts:
- name: varlog
mountPath: /var/log
- name: varlibdockercontainers
mountPath: /var/lib/docker/containers
readOnly: true
locals { locals {
log_group_name = "/${var.eks_cluster_id}/worker-fluentbit-logs"
aws_for_fluentbit_cwlog_group_name = "/${var.eks_cluster_id}/worker-fluentbit-logs" default_helm_values = [templatefile("${path.module}/values.yaml", {
aws_region = data.aws_region.current.name,
log_group_name = local.log_group_name
})]
default_aws_for_fluentbit_helm_app = { default_aws_for_fluentbit_helm_app = {
name = "aws-for-fluent-bit" name = "aws-for-fluent-bit"
chart = "aws-for-fluent-bit" chart = "aws-for-fluent-bit"
repository = "https://aws.github.io/eks-charts" repository = "https://aws.github.io/eks-charts"
version = "0.1.0" version = "0.1.11"
namespace = "logging" namespace = "kube-system"
timeout = "1200" timeout = "1200"
create_namespace = true create_namespace = true
aws_for_fluent_bit_cw_log_group = local.aws_for_fluentbit_cwlog_group_name aws_for_fluent_bit_cw_log_group = local.log_group_name
aws_for_fluentbit_cwlog_retention_in_days = 90 aws_for_fluentbit_cwlog_retention_in_days = 90
values = [templatefile("${path.module}/aws-for-fluent-bit-values.yaml", { values = local.default_helm_values
region = data.aws_region.current.name, set = []
aws_for_fluent_bit_cw_log_group = local.aws_for_fluentbit_cwlog_group_name set_sensitive = null
})] lint = true
set = [ wait = true
{ wait_for_jobs = false
name = "nodeSelector.kubernetes\\.io/os" description = "aws-for-fluentbit Helm Chart deployment configuration"
value = "linux" verify = false
} keyring = ""
] repository_key_file = ""
set_sensitive = null repository_cert_file = ""
lint = true repository_ca_file = ""
wait = true repository_username = ""
wait_for_jobs = false repository_password = ""
description = "aws-for-fluentbit Helm Chart deployment configuration" disable_webhooks = false
verify = false reuse_values = false
keyring = "" reset_values = false
repository_key_file = "" force_update = false
repository_cert_file = "" recreate_pods = false
repository_ca_file = "" cleanup_on_fail = false
repository_username = "" max_history = 0
repository_password = "" atomic = false
disable_webhooks = false skip_crds = false
reuse_values = false render_subchart_notes = true
reset_values = false disable_openapi_validation = false
force_update = false dependency_update = false
recreate_pods = false replace = false
cleanup_on_fail = false postrender = ""
max_history = 0
atomic = false
skip_crds = false
render_subchart_notes = true
disable_openapi_validation = false
dependency_update = false
replace = false
postrender = ""
} }
aws_for_fluentbit_helm_app = merge( aws_for_fluentbit_helm_app = merge(
local.default_aws_for_fluentbit_helm_app, local.default_aws_for_fluentbit_helm_app,
var.aws_for_fluentbit_helm_chart) var.aws_for_fluentbit_helm_chart
)
} }
cloudWatch:
region: ${aws_region}
logGroupName: ${log_group_name}
firehose:
region: ${aws_region}
kinesis:
region: ${aws_region}
\ No newline at end of file
...@@ -94,12 +94,12 @@ No modules. ...@@ -94,12 +94,12 @@ No modules.
| Name | Type | | Name | Type |
|------|------| |------|------|
| [aws_iam_policy.eks_lb_controller](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_policy) | resource | | [aws_iam_policy.aws_load_balancer_controller](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_policy) | resource |
| [aws_iam_role.eks_lb_controller_role](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_role) | resource | | [aws_iam_role.aws_load_balancer_controller_role](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_role) | resource |
| [aws_iam_role_policy_attachment.eks_role_policy](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_role_policy_attachment) | resource | | [aws_iam_role_policy_attachment.eks_role_policy](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_role_policy_attachment) | resource |
| [helm_release.lb_ingress](https://registry.terraform.io/providers/hashicorp/helm/latest/docs/resources/release) | resource | | [helm_release.lb_ingress](https://registry.terraform.io/providers/hashicorp/helm/latest/docs/resources/release) | resource |
| [kubernetes_service_account.eks_lb_controller_sa](https://registry.terraform.io/providers/hashicorp/kubernetes/latest/docs/resources/service_account) | resource | | [kubernetes_service_account.aws_load_balancer_controller_sa](https://registry.terraform.io/providers/hashicorp/kubernetes/latest/docs/resources/service_account) | resource |
| [aws_iam_policy_document.eks_lb_controller_assume_policy](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/iam_policy_document) | data source | | [aws_iam_policy_document.aws_load_balancer_controller_assume_policy](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/iam_policy_document) | data source |
| [aws_region.current](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/region) | data source | | [aws_region.current](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/region) | data source |
## Inputs ## Inputs
......
data "aws_region" "current" {} data "aws_region" "current" {}
locals { locals {
aws_lb_controller_sa = "aws-load-balancer-controller" aws_load_balancer_controller_sa = "aws-load-balancer-controller-sa"
default_helm_values = [templatefile("${path.module}/values.yaml", {
aws_region = data.aws_region.current.name,
cluster_name = var.eks_cluster_id,
service_account_name = local.aws_load_balancer_controller_sa
})]
default_lb_ingress_controller_helm_app = { default_lb_ingress_controller_helm_app = {
name = "aws-lb-ingress-controller" name = "aws-load-balancer-controller"
chart = "aws-load-balancer-controller" chart = "aws-load-balancer-controller"
repository = "https://aws.github.io/eks-charts" repository = "https://aws.github.io/eks-charts"
version = "1.3.1" version = "1.3.1"
namespace = "kube-system" namespace = "kube-system"
timeout = "1200" timeout = "1200"
create_namespace = false create_namespace = false
values = [templatefile("${path.module}/lb-ingress-controller.yaml", { values = local.default_helm_values
region = data.aws_region.current.name, set = []
image = "602401143452.dkr.ecr.us-west-2.amazonaws.com/amazon/aws-load-balancer-controller"
tag = "v2.3.0"
clusterName = var.eks_cluster_id
aws_lb_controller_sa = local.aws_lb_controller_sa
replicaCount = "1"
})]
set = [
{
name = "nodeSelector.kubernetes\\.io/os"
value = "linux"
},
{
name = "serviceAccount.create"
value = "false"
},
{
name = "serviceAccount.name"
value = local.aws_lb_controller_sa
}
]
set_sensitive = null set_sensitive = null
lint = true lint = true
wait = true wait = true
wait_for_jobs = false wait_for_jobs = false
description = "aws-lb-ingress-controller Helm Chart for ingress resources" description = "aws-load-balancer-controller Helm Chart for ingress resources"
verify = false verify = false
keyring = "" keyring = ""
repository_key_file = "" repository_key_file = ""
...@@ -59,9 +45,10 @@ locals { ...@@ -59,9 +45,10 @@ locals {
dependency_update = false dependency_update = false
replace = false replace = false
postrender = "" postrender = ""
} }
lb_ingress_controller_helm_app = merge( lb_ingress_controller_helm_app = merge(
local.default_lb_ingress_controller_helm_app, local.default_lb_ingress_controller_helm_app,
var.lb_ingress_controller_helm_app) var.lb_ingress_controller_helm_app
)
} }
...@@ -74,10 +74,10 @@ resource "helm_release" "lb_ingress" { ...@@ -74,10 +74,10 @@ resource "helm_release" "lb_ingress" {
} }
} }
depends_on = [aws_iam_role.eks_lb_controller_role, kubernetes_service_account.eks_lb_controller_sa] depends_on = [aws_iam_role.aws_load_balancer_controller_role, kubernetes_service_account.aws_load_balancer_controller_sa]
} }
resource "aws_iam_policy" "eks_lb_controller" { resource "aws_iam_policy" "aws_load_balancer_controller" {
name = "${var.eks_cluster_id}-lb-controller-policy" name = "${var.eks_cluster_id}-lb-controller-policy"
description = "Allows lb controller to manage ALB and NLB" description = "Allows lb controller to manage ALB and NLB"
...@@ -310,7 +310,7 @@ resource "aws_iam_policy" "eks_lb_controller" { ...@@ -310,7 +310,7 @@ resource "aws_iam_policy" "eks_lb_controller" {
EOF EOF
} }
data "aws_iam_policy_document" "eks_lb_controller_assume_policy" { data "aws_iam_policy_document" "aws_load_balancer_controller_assume_policy" {
statement { statement {
actions = ["sts:AssumeRoleWithWebIdentity"] actions = ["sts:AssumeRoleWithWebIdentity"]
effect = "Allow" effect = "Allow"
...@@ -318,7 +318,7 @@ data "aws_iam_policy_document" "eks_lb_controller_assume_policy" { ...@@ -318,7 +318,7 @@ data "aws_iam_policy_document" "eks_lb_controller_assume_policy" {
condition { condition {
test = "StringEquals" test = "StringEquals"
variable = "${replace(var.eks_oidc_issuer_url, "https://", "")}:sub" variable = "${replace(var.eks_oidc_issuer_url, "https://", "")}:sub"
values = ["system:serviceaccount:kube-system:${local.aws_lb_controller_sa}"] values = ["system:serviceaccount:kube-system:${local.aws_load_balancer_controller_sa}"]
} }
principals { principals {
...@@ -329,24 +329,24 @@ data "aws_iam_policy_document" "eks_lb_controller_assume_policy" { ...@@ -329,24 +329,24 @@ data "aws_iam_policy_document" "eks_lb_controller_assume_policy" {
} }
# IAM role for eks alb controller # IAM role for eks alb controller
resource "aws_iam_role" "eks_lb_controller_role" { resource "aws_iam_role" "aws_load_balancer_controller_role" {
name = "${var.eks_cluster_id}-lb-controller-role" name = "${var.eks_cluster_id}-lb-controller-role"
assume_role_policy = data.aws_iam_policy_document.eks_lb_controller_assume_policy.json assume_role_policy = data.aws_iam_policy_document.aws_load_balancer_controller_assume_policy.json
} }
# Allows eks alb controller to manage LB's # Allows eks alb controller to manage LB's
resource "aws_iam_role_policy_attachment" "eks_role_policy" { resource "aws_iam_role_policy_attachment" "eks_role_policy" {
role = aws_iam_role.eks_lb_controller_role.name role = aws_iam_role.aws_load_balancer_controller_role.name
policy_arn = aws_iam_policy.eks_lb_controller.arn policy_arn = aws_iam_policy.aws_load_balancer_controller.arn
} }
# Kubernetes service account for lb controller # Kubernetes service account for lb controller
resource "kubernetes_service_account" "eks_lb_controller_sa" { resource "kubernetes_service_account" "aws_load_balancer_controller_sa" {
metadata { metadata {
name = local.aws_lb_controller_sa name = local.aws_load_balancer_controller_sa
namespace = "kube-system" namespace = "kube-system"
annotations = { "eks.amazonaws.com/role-arn" : aws_iam_role.eks_lb_controller_role.arn } annotations = { "eks.amazonaws.com/role-arn" : aws_iam_role.aws_load_balancer_controller_role.arn }
} }
automount_service_account_token = true automount_service_account_token = true
} }
\ No newline at end of file
...@@ -19,6 +19,7 @@ ...@@ -19,6 +19,7 @@
output "ingress_namespace" { output "ingress_namespace" {
value = helm_release.lb_ingress.metadata[0].namespace value = helm_release.lb_ingress.metadata[0].namespace
} }
output "ingress_name" { output "ingress_name" {
value = helm_release.lb_ingress.metadata[0].name value = helm_release.lb_ingress.metadata[0].name
} }
clusterName: ${cluster_name}
region: ${aws_region}
serviceAccount:
create: false
name: ${service_account_name}
\ No newline at end of file
...@@ -12,7 +12,9 @@ locals { ...@@ -12,7 +12,9 @@ locals {
aws_open_telemetry_mg_node_iam_role_arns = [] aws_open_telemetry_mg_node_iam_role_arns = []
aws_open_telemetry_self_mg_node_iam_role_arns = [] aws_open_telemetry_self_mg_node_iam_role_arns = []
} }
aws_open_telemetry_app = merge( aws_open_telemetry_app = merge(
local.default_aws_open_telemetry_helm_app, local.default_aws_open_telemetry_helm_app,
var.aws_open_telemetry_addon) var.aws_open_telemetry_addon
)
} }
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment