Code development platform for open source projects from the European Union institutions

Skip to content
Snippets Groups Projects
Commit 8f53ee5a authored by Vara Bonthu's avatar Vara Bonthu
Browse files

Added AWS Distro for OpenTelemetry Collector in EKS

parent 0f757b1a
No related branches found
No related tags found
No related merge requests found
Showing
with 855 additions and 18 deletions
......@@ -243,7 +243,7 @@ enable_windows_support = false
windows_vpc_resource_controller_image_tag = "v0.2.7" # enable_windows_support= true
windows_vpc_admission_webhook_image_tag = "v0.2.7" # enable_windows_support= true
enable_self_managed_nodegroups = true
enable_self_managed_nodegroups = false
self_managed_node_groups = {
#---------------------------------------------------------#
# ON-DEMAND Self Managed Worker Group - Worker Group - 1
......@@ -525,7 +525,7 @@ aws_for_fluent_bit_helm_chart_name = "aws-for-fluent-bit"
#---------------------------------------------------------#
# ENABLE TRAEFIK INGRESS CONTROLLER
#---------------------------------------------------------#
traefik_ingress_controller_enable = true
traefik_ingress_controller_enable = false
traefik_helm_chart_url = "https://helm.traefik.io/traefik"
traefik_helm_chart_name = "traefik"
traefik_helm_chart_version = "10.0.0"
......@@ -534,7 +534,7 @@ traefik_image_repo_name = "traefik"
#---------------------------------------------------------#
# ENABLE NGINX INGRESS CONTROLLER
#---------------------------------------------------------#
nginx_ingress_controller_enable = true
nginx_ingress_controller_enable = false
nginx_helm_chart_version = "3.33.0"
nginx_helm_chart_url = "https://kubernetes.github.io/ingress-nginx"
nginx_helm_chart_name = "ingress-nginx"
......@@ -549,7 +549,7 @@ nginx_image_repo_name = "ingress-nginx/controller"
# By default Agones prefers to be scheduled on nodes labeled with agones.dev/agones-system=true and tolerates the node taint agones.dev/agones-system=true:NoExecute.
# If no dedicated nodes are available, Agones will run on regular nodes.
#---------------------------------------------------------//
agones_enable = true
agones_enable = false
expose_udp = true
agones_helm_chart_name = "agones"
agones_helm_chart_url = "https://agones.dev/chart/stable"
......@@ -563,7 +563,7 @@ agones_game_server_maxport = 8000
# CERT MANAGER
# enable_windows_support= true
#---------------------------------------------------------#
cert_manager_enable = true
cert_manager_enable = false
cert_manager_image_tag = "v1.5.3"
cert_manager_helm_chart_version = "0.1.0"
cert_manager_install_crds = true
......@@ -571,3 +571,30 @@ cert_manager_helm_chart_name = "cert-manager"
cert_manager_helm_chart_url = "https://charts.jetstack.io"
cert_manager_image_repo_name = "quay.io/jetstack/cert-manager-controller"
#---------------------------------------------------------#
# ENABLE AWS Distro for OpenTelemetry Collector in EKS
# Help : https://aws-otel.github.io/docs/setup/eks
#---------------------------------------------------------#
aws_open_telemetry_enable = false
aws_open_telemetry_emitter_image = "aottestbed/aws-otel-collector-sample-app:java-0.1.0"
aws_open_telemetry_collector_image = "amazon/aws-otel-collector:latest"
aws_open_telemetry_aws_region = "eu-west-1"
aws_open_telemetry_oltp_endpoint = "localhost:4317"
#---------------------------------------------------------#
# ENABLE OPENTELEMETRY COLLECTOR FOR NODE GROUPS
#---------------------------------------------------------#
opentelemetry_enable = false
opentelemetry_image = "otel/opentelemetry-collector"
opentelemetry_image_tag = "0.31.0"
opentelemetry_command_name = "otelcol"
opentelemetry_helm_chart_url = "https://open-telemetry.github.io/opentelemetry-helm-charts"
opentelemetry_helm_chart = "open-telemetry/opentelemetry-collector"
opentelemetry_helm_chart_version = "0.5.9"
opentelemetry_enable_standalone_collector = true
//opentelemetry_enable_agent_collector = true
opentelemetry_enable_autoscaling_standalone_collector = true
//opentelemetry_enable_container_logs = true
opentelemetry_min_standalone_collectors = 1
opentelemetry_max_standalone_collectors = 10
......@@ -20,7 +20,8 @@
# FARGATE PROFILES
# ---------------------------------------------------------------------------------------------------------------------
module "fargate-profiles" {
source = "git@github.com:aws-ia/terraform-aws-eks-fargate.git"
// source = "git@github.com:aws-ia/terraform-aws-eks-fargate.git"
source = "git@github.com:vara-bonthu/terraform-aws-eks-fargate.git"
for_each = { for k, v in var.fargate_profiles : k => v if var.enable_fargate && length(var.fargate_profiles) > 0 }
......
......@@ -122,5 +122,31 @@ module "helm" {
windows_vpc_resource_controller_image_tag = var.windows_vpc_resource_controller_image_tag
windows_vpc_admission_webhook_image_tag = var.windows_vpc_admission_webhook_image_tag
# ------- AWS Distro for OpenTelemetry Collector in EKS Module ---------
aws_open_telemetry_enable = var.aws_open_telemetry_enable
aws_open_telemetry_namespace = var.aws_open_telemetry_namespace
aws_open_telemetry_aws_region = var.aws_open_telemetry_aws_region
aws_open_telemetry_collector_image = var.aws_open_telemetry_collector_image
aws_open_telemetry_emitter_image = var.aws_open_telemetry_emitter_image
aws_open_telemetry_oltp_endpoint = var.aws_open_telemetry_oltp_endpoint
aws_open_telemetry_mg_node_iam_role_arns = var.create_eks && var.enable_managed_nodegroups ? values({ for nodes in sort(keys(var.managed_node_groups)) : nodes => join(",", module.managed-node-groups[nodes].manage_ng_iam_role_name) }) : []
aws_open_telemetry_self_mg_node_iam_role_arns = var.create_eks && var.enable_self_managed_nodegroups ? values({ for nodes in sort(keys(var.self_managed_node_groups)) : nodes => join(",", module.aws-eks-self-managed-node-groups[nodes].self_managed_iam_role_name) }) : []
# ------- OpenTelemetry Module ---------
opentelemetry_enable = var.opentelemetry_enable
opentelemetry_command_name = var.opentelemetry_command_name
opentelemetry_helm_chart = var.opentelemetry_helm_chart
opentelemetry_image = var.opentelemetry_image
opentelemetry_image_tag = var.opentelemetry_image_tag
opentelemetry_helm_chart_version = var.opentelemetry_helm_chart_version
opentelemetry_enable_agent_collector = var.opentelemetry_enable_agent_collector
opentelemetry_enable_standalone_collector = var.opentelemetry_enable_standalone_collector
opentelemetry_enable_autoscaling_standalone_collector = var.opentelemetry_enable_autoscaling_standalone_collector
opentelemetry_enable_container_logs = var.opentelemetry_enable_container_logs
opentelemetry_min_standalone_collectors = var.opentelemetry_min_standalone_collectors
opentelemetry_max_standalone_collectors = var.opentelemetry_max_standalone_collectors
opentelemetry_helm_chart_url = var.opentelemetry_helm_chart_url
depends_on = [module.eks]
}
\ No newline at end of file
resource "kubernetes_namespace" "aws_otel_eks" {
metadata {
name = var.aws_open_telemetry_namespace
labels = {
name = var.aws_open_telemetry_namespace
}
}
}
resource "kubernetes_deployment" "aws_otel_eks_sidecar" {
metadata {
name = "aws-otel-eks-sidecar"
namespace = var.aws_open_telemetry_namespace
labels = {
name = "aws-otel-eks-sidecar"
}
}
spec {
replicas = 1
selector {
match_labels = {
name = "aws-otel-eks-sidecar"
}
}
template {
metadata {
labels = {
name = "aws-otel-eks-sidecar"
}
}
spec {
container {
name = "aws-otel-emitter"
image = var.aws_open_telemetry_emitter_image
env {
name = "OTEL_OTLP_ENDPOINT"
value = var.aws_open_telemetry_oltp_endpoint
}
env {
name = "OTEL_RESOURCE_ATTRIBUTES"
value = "service.namespace=AWSObservability,service.name=CloudWatchEKSService"
}
env {
name = "S3_REGION"
value = var.aws_open_telemetry_aws_region
}
image_pull_policy = "Always"
}
container {
name = "aws-otel-collector"
image = var.aws_open_telemetry_collector_image
env {
name = "AWS_REGION"
value = var.aws_open_telemetry_aws_region
}
resources {
limits = {
cpu = "256m"
memory = "512Mi"
}
requests = {
cpu = "32m"
memory = "24Mi"
}
}
image_pull_policy = "Always"
}
}
}
}
}
resource "aws_iam_policy" "eks_aws_otel_policy" {
name = "AWSDistroOpenTelemetryPolicy"
path = "/"
description = "eks autoscaler policy"
policy = <<EOF
{
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Action": [
"logs:PutLogEvents",
"logs:CreateLogGroup",
"logs:CreateLogStream",
"logs:DescribeLogStreams",
"logs:DescribeLogGroups",
"xray:PutTraceSegments",
"xray:PutTelemetryRecords",
"xray:GetSamplingRules",
"xray:GetSamplingTargets",
"xray:GetSamplingStatisticSummaries",
"ssm:GetParameters"
],
"Resource": "*"
}
]
}
EOF
}
resource "aws_iam_role_policy_attachment" "mg-role-policy-attachment" {
for_each = toset(var.aws_open_telemetry_mg_node_iam_role_arns)
role = each.value
policy_arn = aws_iam_policy.eks_aws_otel_policy.arn
}
resource "aws_iam_role_policy_attachment" "self-mg-role-policy-attachment" {
for_each = toset(var.aws_open_telemetry_self_mg_node_iam_role_arns)
role = each.value
policy_arn = aws_iam_policy.eks_aws_otel_policy.arn
}
variable "aws_open_telemetry_namespace" {
default = "aws-otel-eks"
description = "WS Open telemetry namespace"
}
variable "aws_open_telemetry_emitter_image" {
default = "aottestbed/aws-otel-collector-sample-app:java-0.1.0"
description = "AWS Open telemetry emitter image id and tag"
}
variable "aws_open_telemetry_collector_image" {
default = "amazon/aws-otel-collector:latest"
description = "AWS Open telemetry collector image id and tag"
}
variable "aws_open_telemetry_aws_region" {
description = "AWS Open telemetry region"
}
variable "aws_open_telemetry_oltp_endpoint" {
default = "localhost:4317"
description = "AWS Open telemetry OLTP endpoint"
}
variable "aws_open_telemetry_mg_node_iam_role_arns" {
type = list(string)
default = []
}
variable "aws_open_telemetry_self_mg_node_iam_role_arns" {
type = list(string)
default = []
}
......@@ -17,3 +17,5 @@
*/
data "aws_caller_identity" "current" {}
data "aws_region" "current" {}
\ No newline at end of file
......@@ -149,3 +149,36 @@ module "windows_vpc_controllers" {
module.cert_manager
]
}
module "aws_opentelemetry_collector" {
count = var.aws_open_telemetry_enable == true ? 1 : 0
source = "./aws-otel-eks"
aws_open_telemetry_aws_region = var.aws_open_telemetry_aws_region == "" ? data.aws_region.current.id : var.aws_open_telemetry_aws_region
aws_open_telemetry_emitter_image = var.aws_open_telemetry_emitter_image
aws_open_telemetry_collector_image = var.aws_open_telemetry_collector_image
aws_open_telemetry_oltp_endpoint = var.aws_open_telemetry_oltp_endpoint
aws_open_telemetry_mg_node_iam_role_arns = var.aws_open_telemetry_mg_node_iam_role_arns
aws_open_telemetry_self_mg_node_iam_role_arns = var.aws_open_telemetry_self_mg_node_iam_role_arns
}
module "opentelemetry_collector" {
count = var.opentelemetry_enable == true ? 1 : 0
source = "./opentelemetry_collector"
private_container_repo_url = var.private_container_repo_url
public_docker_repo = var.public_docker_repo
opentelemetry_command_name = var.opentelemetry_command_name
opentelemetry_helm_chart = var.opentelemetry_helm_chart
opentelemetry_helm_chart_url = var.opentelemetry_helm_chart_url
opentelemetry_image = var.opentelemetry_image
opentelemetry_image_tag = var.opentelemetry_image_tag
opentelemetry_helm_chart_version = var.opentelemetry_helm_chart_version
opentelemetry_enable_agent_collector = var.opentelemetry_enable_agent_collector
opentelemetry_enable_standalone_collector = var.opentelemetry_enable_standalone_collector
opentelemetry_enable_autoscaling_standalone_collector = var.opentelemetry_enable_autoscaling_standalone_collector
opentelemetry_enable_container_logs = var.opentelemetry_enable_container_logs
opentelemetry_min_standalone_collectors = var.opentelemetry_min_standalone_collectors
opentelemetry_max_standalone_collectors = var.opentelemetry_max_standalone_collectors
}
# OpenTelemetry Helm Chart
## Introduction
The OpenTelemetry Collector offers a vendor-agnostic implementation on how to receive, process and export telemetry data. In addition, it removes the need to run, operate and maintain multiple agents/collectors in order to support open-source telemetry data formats (e.g. Jaeger, Prometheus, etc.) sending to multiple open-source or commercial back-ends.
## Helm Chart
### Instructions to use Helm Charts
helm repo add open-telemetry https://open-telemetry.github.io/opentelemetry-helm-charts
Additional details about the official OpenTelemtry helm chart can be found [here](https://github.com/open-telemetry/opentelemetry-helm-charts/tree/main/charts/opentelemetry-collector)
#### Instructions to upload kube-state-metrics Docker image to AWS ECR
## Docker Image
Step1: Download the docker image to your local Mac/Laptop
$ docker pull otel/opentelemetry-collector:0.31.0
Step2: Retrieve an authentication token and authenticate your Docker client to your registry. Use the AWS CLI:
$ aws ecr get-login-password --region eu-west-1 | docker login --username AWS --password-stdin <account id>.dkr.ecr.eu-west-1.amazonaws.com
Step3: Create an ECR repo for kube-state-metrics if you don't have one
$ aws ecr create-repository --repository-name otel/opentelemetry-collector--image-scanning-configuration scanOnPush=true
Step4: After the build completes, tag your image so, you can push the image to this repository:
$ docker tag otel/opentelemetry-collector:0.31.0 <account id>.dkr.ecr.eu-west-1.amazonaws.com/otel/opentelemetry-collector:0.31.0
Step5: Run the following command to push this image to your newly created AWS repository:
$ docker push <accountid>.dkr.ecr.eu-west-1.amazonaws.com/otel/opentelemetry-collector:0.31.0
/*
* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
* SPDX-License-Identifier: MIT-0
*
* Permission is hereby granted, free of charge, to any person obtaining a copy of this
* software and associated documentation files (the "Software"), to deal in the Software
* without restriction, including without limitation the rights to use, copy, modify,
* merge, publish, distribute, sublicense, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,
* INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
* PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
* HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
* OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*/
locals {
image_url = var.public_docker_repo ? var.opentelemetry_image : "${var.private_container_repo_url}${var.opentelemetry_image}"
}
resource "kubernetes_namespace" "opentelemetry_system" {
metadata {
name = "opentelemetry-system"
}
}
resource "helm_release" "opentelemetry-collector" {
name = "opentelemetry-collector"
repository = var.opentelemetry_helm_chart_url
chart = var.opentelemetry_helm_chart
version = var.opentelemetry_helm_chart_version
namespace = kubernetes_namespace.opentelemetry_system.id
timeout = "1200"
values = [templatefile("${path.module}/templates/open-telemetry-values.yaml", {
image = local.image_url
tag = var.opentelemetry_image_tag
command_name = var.opentelemetry_command_name
enable_agent_collector = var.opentelemetry_enable_agent_collector
enable_container_logs = var.opentelemetry_enable_container_logs
enable_standalone_collector = var.opentelemetry_enable_standalone_collector
enable_autoscaling_standalone_collector = var.opentelemetry_enable_autoscaling_standalone_collector
min_standalone_collectors = var.opentelemetry_min_standalone_collectors
max_standalone_collectors = var.opentelemetry_max_standalone_collectors
})]
}
# Default values for opentelemetry-collector.
# This is a YAML-formatted file.
# Declare variables to be passed into your templates.
nameOverride: ""
fullnameOverride: ""
config:
exporters:
logging: {}
extensions:
health_check: {}
processors:
batch: {}
# If set to null, will be overridden with values based on k8s resource limits
memory_limiter: null
receivers:
jaeger:
protocols:
grpc:
endpoint: 0.0.0.0:14250
thrift_http:
endpoint: 0.0.0.0:14268
otlp:
protocols:
grpc: null
http: null
prometheus:
config:
scrape_configs:
- job_name: opentelemetry-collector
scrape_interval: 10s
static_configs:
- targets:
- $${MY_POD_IP}:8888
zipkin:
endpoint: 0.0.0.0:9411
service:
extensions:
- health_check
pipelines:
logs:
exporters:
- logging
processors:
- memory_limiter
- batch
receivers:
- otlp
metrics:
exporters:
- logging
processors:
- memory_limiter
- batch
receivers:
- otlp
- prometheus
traces:
exporters:
- logging
processors:
- memory_limiter
- batch
receivers:
- otlp
- jaeger
- zipkin
# Shared params for agentCollector daemonset and standaloneCollector deployment pods.
# Can be overridden here or for any component independently using the same keys.
image:
# If you want to use the contrib image `otel/opentelemetry-collector-contrib`, you also need to change `command.name` value to `otelcontribcol`.
repository: ${image}
pullPolicy: IfNotPresent
# Overrides the image tag whose default is the chart appVersion.
tag: ${tag}
imagePullSecrets: []
# OpenTelemetry Collector executable
command:
name: ${command_name}
extraArgs: []
serviceAccount:
# Specifies whether a service account should be created
create: true
# Annotations to add to the service account
annotations: {}
# The name of the service account to use.
# If not set and create is true, a name is generated using the fullname template
name: ""
podSecurityContext: {}
securityContext: {}
nodeSelector: {}
tolerations: []
affinity: {}
# Allows for pod scheduler prioritisation
# Can be overridden here or for agentCollector and standaloneCollector independently.
priorityClassName: ""
extraEnvs: []
extraConfigMapMounts: []
extraHostPathMounts: []
secretMounts: []
# Configuration for ports, shared between agentCollector, standaloneCollector and service.
# Can be overridden here or for agentCollector and standaloneCollector independently.
ports:
otlp:
enabled: true
containerPort: 4317
servicePort: 4317
hostPort: 4317
protocol: TCP
jaeger-thrift:
enabled: true
containerPort: 14268
servicePort: 14268
hostPort: 14268
protocol: TCP
jaeger-grpc:
enabled: true
containerPort: 14250
servicePort: 14250
hostPort: 14250
protocol: TCP
zipkin:
enabled: true
containerPort: 9411
servicePort: 9411
hostPort: 9411
protocol: TCP
# Configuration for agent OpenTelemetry Collector daemonset, enabled by default
agentCollector:
enabled: ${enable_agent_collector}
containerLogs:
enabled: ${enable_container_logs}
resources:
limits:
cpu: 256m
memory: 512Mi
podAnnotations: {}
# Configuration override that will be merged into the agent's default config
configOverride: {}
# The following config override can be used to enable host receiver
# receivers:
# hostmetrics:
# scrapers:
# cpu:
# disk:
# filesystem:
# service:
# pipelines:
# metrics:
# receivers: [prometheus, hostmetrics]
# Any of the top level options can be overridden specifically for agent
# image: {}
# imagePullSecrets: []
# command: {}
# serviceAccount: {}
# podSecurityContext: {}
# securityContext: {}
# nodeSelector: {}
# tolerations: []
# affinity: {}
# priorityClassName: ""
# The following option overrides can be used with host receiver
# extraEnvs:
# - name: HOST_PROC
# value: /hostfs/proc
# - name: HOST_SYS
# value: /hostfs/sys
# - name: HOST_ETC
# value: /hostfs/etc
# - name: HOST_VAR
# value: /hostfs/var
# - name: HOST_RUN
# value: /hostfs/run
# - name: HOST_DEV
# value: /hostfs/dev
# extraHostPathMounts:
# - name: hostfs
# hostPath: /
# mountPath: /hostfs
# readOnly: true
# mountPropagation: HostToContainer
# The following example disables all jaeger ports and zipkin hostPort on the agent
# ports:
# jaeger-thrift:
# enabled: false
# jaeger-grpc:
# enabled: false
# zipkin:
# hostPort: ""
# Configuration for standalone OpenTelemetry Collector deployment, disabled by default
standaloneCollector:
enabled: ${enable_standalone_collector}
replicaCount: ${min_standalone_collectors}
resources:
limits:
cpu: 1
memory: 2Gi
podAnnotations: {}
# Configuration override that will be merged into the standalone collector default config
configOverride: {}
# Any of the top level options can be overridden specifically for standalone collector
# image: {}
# imagePullSecrets: []
# command: {}
# serviceAccount: {}
# podSecurityContext: {}
# securityContext: {}
# nodeSelector: {}
# tolerations: []
# affinity: {}
# priorityClassName: ""
# ports: {}
service:
type: ClusterIP
annotations: {}
podMonitor:
enabled: false
metricsEndpoints: {}
# - port: prometheus
# additional labels for the PodMonitor
extraLabels: {}
# release: kube-prometheus-stack
serviceMonitor:
enabled: false
metricsEndpoints: {}
# - port: metrics
# interval: 15s
# additional labels for the ServiceMonitor
extraLabels: {}
# release: kube-prometheus-stack
# autoscaling is used only if standaloneCollector enabled
autoscaling:
enabled: ${enable_autoscaling_standalone_collector}
minReplicas: ${min_standalone_collectors}
maxReplicas: ${max_standalone_collectors}
targetCPUUtilizationPercentage: 80
# targetMemoryUtilizationPercentage: 80
variable "opentelemetry_enable_standalone_collector" {
type = bool
default = false
description = "Enabling the opentelemetry standalone gateway collector on eks cluster"
}
variable "opentelemetry_enable_agent_collector" {
type = bool
default = true
description = "Enabling the opentelemetry agent collector on eks cluster"
}
variable "opentelemetry_enable_autoscaling_standalone_collector" {
type = bool
default = false
description = "Enabling the autoscaling of the standalone gateway collector on eks cluster"
}
variable "opentelemetry_image_tag" {
default = "0.31.0"
description = "Docker image tag for opentelemetry from open-telemetry"
}
variable "opentelemetry_image" {
default = "otel/opentelemetry-collector"
description = "Docker image for opentelemetry from open-telemetry"
}
variable "opentelemetry_helm_chart_url" {
default = "https://open-telemetry.github.io/opentelemetry-helm-charts"
description = "opentelemetry helm chart endpoint"
}
variable "opentelemetry_helm_chart_version" {
default = "0.5.9"
description = "Helm chart version for opentelemetry"
}
variable "opentelemetry_helm_chart" {
default = "open-telemetry/opentelemetry-collector"
description = "Helm chart for opentelemetry"
}
variable "opentelemetry_command_name" {
default = "otel"
description = "The OpenTelemetry command.name value"
}
variable "opentelemetry_enable_container_logs" {
default = false
description = "Whether or not to enable container log collection on the otel agents"
}
variable "opentelemetry_min_standalone_collectors" {
default = 1
description = "The minimum number of opentelemetry standalone gateway collectors to run"
}
variable "opentelemetry_max_standalone_collectors" {
default = 3
description = "The maximum number of opentelemetry standalone gateway collectors to run"
}
variable "private_container_repo_url" {}
variable "public_docker_repo" {}
......@@ -289,3 +289,55 @@ variable "windows_vpc_admission_webhook_image_tag" {
default = "v0.2.7"
description = "Docker image tag for Windows VPC admission webhook controller"
}
variable "aws_open_telemetry_enable" {
}
variable "aws_open_telemetry_namespace" {
}
variable "aws_open_telemetry_emitter_image" {
}
variable "aws_open_telemetry_collector_image" {
}
variable "aws_open_telemetry_aws_region" {
}
variable "aws_open_telemetry_oltp_endpoint" {
}
variable "aws_open_telemetry_mg_node_iam_role_arns" {
}
variable "aws_open_telemetry_self_mg_node_iam_role_arns" {
}
variable "opentelemetry_enable" {
}
variable "opentelemetry_helm_chart_url" {}
variable "opentelemetry_image_tag" {}
variable "opentelemetry_image" {}
variable "opentelemetry_helm_chart_version" {}
variable "opentelemetry_helm_chart" {}
variable "opentelemetry_command_name" {}
variable "opentelemetry_min_standalone_collectors" {}
variable "opentelemetry_max_standalone_collectors" {}
variable "opentelemetry_enable_standalone_collector" {}
variable "opentelemetry_enable_agent_collector" {}
variable "opentelemetry_enable_autoscaling_standalone_collector" {}
variable "opentelemetry_enable_container_logs" {}
......@@ -22,7 +22,8 @@
# ---------------------------------------------------------------------------------------------------------------------
module "managed-node-groups" {
source = "git@github.com:aws-ia/terraform-aws-eks-managed_nodegroups.git"
// source = "git@github.com:aws-ia/terraform-aws-eks-managed_nodegroups.git"
source = "git@github.com:vara-bonthu/terraform-aws-eks-managed_nodegroups.git"
for_each = { for key, value in var.managed_node_groups : key => value
if var.enable_managed_nodegroups && length(var.managed_node_groups) > 0
......
......@@ -35,6 +35,16 @@ resource "aws_iam_instance_profile" "managed_ng" {
create_before_destroy = true
}
}
//
//resource "aws_iam_role_policy_attachment" "role-policy-attachment" {
// for_each = toset([
// "arn:aws:iam::aws:policy/AmazonEC2FullAccess",
// "arn:aws:iam::aws:policy/AmazonS3FullAccess"
// ])
//
// role = var.iam_role_name
// policy_arn = each.value
//}
resource "aws_iam_role_policy_attachment" "managed_ng_AmazonEKSWorkerNodePolicy" {
policy_arn = "${local.policy_arn_prefix}/AmazonEKSWorkerNodePolicy"
......
......@@ -45,14 +45,14 @@ output "amp_work_arn" {
value = var.prometheus_enable ? module.aws_managed_prometheus[0].service_account_amp_ingest_role_arn : "AMP not enabled"
}
//output "self_managed_node_group_iam_role_arns" {
// description = "IAM role arn's of self managed node groups"
// value = var.create_eks && var.enable_self_managed_nodegroups ? { for nodes in sort(keys(var.self_managed_node_groups)) : nodes => module.aws-eks-self-managed-node-groups[nodes].self_managed_node_group_iam_role_arns } : null
//}
output "self_managed_node_group_iam_role_arns" {
description = "IAM role arn's of self managed node groups"
value = var.create_eks && var.enable_self_managed_nodegroups ? values({ for nodes in sort(keys(var.self_managed_node_groups)) : nodes => join(",", module.aws-eks-self-managed-node-groups[nodes].self_managed_node_group_iam_role_arns) }) : []
}
output "managed_node_group_iam_role_arns" {
description = "IAM role arn's of self managed node groups"
value = var.create_eks && var.enable_managed_nodegroups ? { for nodes in sort(keys(var.managed_node_groups)) : nodes => module.managed-node-groups[nodes].manage_ng_iam_role_arn } : null
value = var.create_eks && var.enable_managed_nodegroups ? values({ for nodes in sort(keys(var.managed_node_groups)) : nodes => join(",", module.managed-node-groups[nodes].manage_ng_iam_role_arn) }) : []
}
output "managed_node_groups" {
......
......@@ -19,17 +19,16 @@
terraform {
required_providers {
aws = {
source = "hashicorp/aws"
// version = "3.34.0"
version = "3.48.0"
source = "hashicorp/aws"
version = "~> 3.60.0"
}
kubernetes = {
source = "hashicorp/kubernetes"
version = "2.3.2"
version = "~> 2.5.0"
}
helm = {
source = "hashicorp/helm"
version = "2.2.0"
version = "~> 2.3.0"
}
}
}
......
......@@ -17,7 +17,8 @@
*/
module "aws-eks-self-managed-node-groups" {
source = "git@github.com:aws-ia/terraform-aws-eks-selfmanaged_nodegroups.git"
// source = "git@github.com:aws-ia/terraform-aws-eks-selfmanaged_nodegroups.git"
source = "git@github.com:vara-bonthu/terraform-aws-eks-selfmanaged_nodegroups.git"
for_each = { for key, value in var.self_managed_node_groups : key => value
if var.enable_self_managed_nodegroups && length(var.self_managed_node_groups) > 0
......
......@@ -91,10 +91,12 @@ variable "vpc_id" {
}
variable "private_subnet_ids" {
description = "list of private subnets Id's for the Worker nodes"
type = list(string)
default = []
}
variable "public_subnet_ids" {
description = "list of private subnets Id's for the Worker nodes"
type = list(string)
default = []
}
variable "vpc_cidr_block" {
......@@ -104,10 +106,12 @@ variable "vpc_cidr_block" {
}
variable "public_subnets_cidr" {
description = "list of Public subnets for the Worker nodes"
type = list(string)
default = []
}
variable "private_subnets_cidr" {
description = "list of Private subnets for the Worker nodes"
type = list(string)
default = []
}
variable "create_vpc_endpoints" {
......@@ -549,3 +553,101 @@ variable "windows_vpc_admission_webhook_image_tag" {
default = "v0.2.7"
description = "Docker image tag for Windows VPC admission webhook controller"
}
#-----------AWS OPEN TELEMETRY HELM CHART-------------
variable "aws_open_telemetry_enable" {
default = false
description = "Enable AWS Open telemetry collector"
}
variable "aws_open_telemetry_namespace" {
default = "aws-otel-eks"
description = "WS Open telemetry namespace"
}
variable "aws_open_telemetry_emitter_image" {
default = "aottestbed/aws-otel-collector-sample-app:java-0.1.0"
description = "AWS Open telemetry emitter image id and tag"
}
variable "aws_open_telemetry_collector_image" {
default = "amazon/aws-otel-collector:latest"
description = "AWS Open telemetry collector image id and tag"
}
variable "aws_open_telemetry_aws_region" {
description = "AWS Open telemetry region"
}
variable "aws_open_telemetry_oltp_endpoint" {
default = "localhost:4317"
description = "AWS Open telemetry OLTP endpoint"
}
#-----------OPEN TELEMETRY HELM CHART-------------
variable "opentelemetry_enable" {
type = bool
default = false
description = "Enabling opentelemetry module on eks cluster"
}
variable "opentelemetry_enable_standalone_collector" {
type = bool
default = false
description = "Enabling the opentelemetry standalone gateway collector on eks cluster"
}
variable "opentelemetry_enable_agent_collector" {
type = bool
default = true
description = "Enabling the opentelemetry agent collector on eks cluster"
}
variable "opentelemetry_enable_autoscaling_standalone_collector" {
type = bool
default = false
description = "Enabling the autoscaling of the standalone gateway collector on eks cluster"
}
variable "opentelemetry_image_tag" {
default = "0.31.0"
description = "Docker image tag for opentelemetry from open-telemetry"
}
variable "opentelemetry_image" {
default = "otel/opentelemetry-collector"
description = "Docker image for opentelemetry from open-telemetry"
}
variable "opentelemetry_helm_chart_version" {
default = "0.5.9"
description = "Helm chart version for opentelemetry"
}
variable "opentelemetry_helm_chart" {
default = "open-telemetry/opentelemetry-collector"
description = "Helm chart for opentelemetry"
}
variable "opentelemetry_command_name" {
default = "otel"
description = "The OpenTelemetry command.name value"
}
variable "opentelemetry_enable_container_logs" {
default = false
description = "Whether or not to enable container log collection on the otel agents"
}
variable "opentelemetry_min_standalone_collectors" {
default = 1
description = "The minimum number of opentelemetry standalone gateway collectors to run"
}
variable "opentelemetry_max_standalone_collectors" {
default = 3
description = "The maximum number of opentelemetry standalone gateway collectors to run"
}
variable "opentelemetry_helm_chart_url" {
default = "https://open-telemetry.github.io/opentelemetry-helm-charts"
description = "opentelemetry helm chart endpoint"
}
\ No newline at end of file
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment