diff --git a/auth.tf b/auth.tf
index 224675782fe7236c5812a2a6e5ca84cedd5602d1..18253f25e9bcabeb209abc480415d469fc9d292a 100644
--- a/auth.tf
+++ b/auth.tf
@@ -27,9 +27,33 @@
 //  }
 //}
 
+
+//
+//resource "null_resource" "wait_for_cluster" {
+//  count      = var.apply_config_map_aws_auth ? 1 : 0
+//  depends_on = [module.eks.cluster_id]
+//
+//  provisioner "local-exec" {
+//    command     = var.wait_for_cluster_command
+//    interpreter = var.local_exec_interpreter
+//    environment = {
+//      ENDPOINT = module.eks.cluster_endpoint
+//    }
+//  }
+//}
 //
-//module "aws-auth" {
+//resource "kubernetes_config_map" "aws_auth" {
+//  count      = var.apply_config_map_aws_auth ? 1 : 0
+//  depends_on = [null_resource.wait_for_cluster[0]]
 //
+//  metadata {
+//    name      = "aws-auth"
+//    namespace = "kube-system"
+//  }
 //
-//  depends_on = [eks, mana, farga, self, rbac]
+//  data = {
+//    mapRoles    = replace(yamlencode(distinct(concat(local.rbac_roles, var.map_additional_iam_roles))), "\"", local.yaml_quote)
+//    mapUsers    = replace(yamlencode(var.map_additional_iam_users), "\"", local.yaml_quote)
+//    mapAccounts = replace(yamlencode(var.map_additional_aws_accounts), "\"", local.yaml_quote)
+//  }
 //}
\ No newline at end of file
diff --git a/data.tf b/data.tf
index c088f5ee7039624d5b1b23a5cbf3d42ce76d750b..dec7c3a6202168551d5808de2ff04bd7dc73d22f 100644
--- a/data.tf
+++ b/data.tf
@@ -41,7 +41,6 @@ data "aws_availability_zones" "available" {
   state = "available"
 }
 
-
 /*
 # Data source used to avoid race condition
 data "aws_vpc_endpoint_service" "dynamodb" {
diff --git a/eks.tf b/eks.tf
index eba5c297ad265733b6a1d2bc4c3e5252c928e258..b0286a579907a1eacb2d0bb634bf4f801ccc6f92 100644
--- a/eks.tf
+++ b/eks.tf
@@ -76,41 +76,41 @@ module "eks" {
   #----------------------------------------------------------------------------------
   # Conditionally allow Worker nodes <-> primary cluster SG traffic
   # See https://github.com/terraform-aws-modules/terraform-aws-eks/blob/master/docs/faq.md#im-using-both-aws-managed-node-groups-and-self-managed-worker-groups-and-pods-scheduled-on-a-aws-managed-node-groups-are-unable-resolve-dns-even-communication-between-pods
-  worker_create_cluster_primary_security_group_rules = var.enable_self_managed_nodegroups
-
-  # Conditionally create a self-managed node group (worker group) - either Windows or Linux
-  worker_groups_launch_template = var.enable_self_managed_nodegroups ? [{
-    name     = var.self_managed_nodegroup_name
-    platform = local.self_managed_node_platform
-
-    # Use custom AMI, user data script template, and its parameters, if provided in input. 
-    # Otherwise, use default EKS-optimized AMI, user data script for Windows / Linux.
-    ami_id                       = var.self_managed_node_ami_id != "" ? var.self_managed_node_ami_id : var.enable_windows_support ? data.aws_ami.windows2019core.id : data.aws_ami.amazonlinux2eks.id
-    userdata_template_file       = var.self_managed_node_userdata_template_file != "" ? var.self_managed_node_userdata_template_file : var.enable_windows_support ? "./templates/userdata-windows.tpl" : "./templates/userdata-amazonlinux2eks.tpl"
-    userdata_template_extra_args = var.self_managed_node_userdata_template_extra_params
-
-    override_instance_types = var.self_managed_node_instance_types
-    root_encrypted          = true
-    root_volume_size        = var.self_managed_node_volume_size
-
-    iam_instance_profile_name = var.enable_windows_support ? module.windows_support_iam[0].windows_instance_profile.name : null
-    asg_desired_capacity      = var.self_managed_node_desired_size
-    asg_min_size              = var.self_managed_node_min_size
-    asg_max_size              = var.self_managed_node_max_size
-
-    kubelet_extra_args = "--node-labels=Environment=${var.environment},Zone=${var.zone},WorkerType=SELF_MANAGED_${upper(local.self_managed_node_platform)}"
-
-    # Extra tags, needed for cluster autoscaler autodiscovery
-    tags = var.cluster_autoscaler_enable ? [{
-      key                 = "k8s.io/cluster-autoscaler/enabled",
-      value               = true,
-      propagate_at_launch = true
-      }, {
-      key                 = "k8s.io/cluster-autoscaler/${module.eks-label.id}",
-      value               = "owned",
-      propagate_at_launch = true
-    }] : []
-  }] : []
+  //  worker_create_cluster_primary_security_group_rules = var.enable_self_managed_nodegroups
+  //
+  //  # Conditionally create a self-managed node group (worker group) - either Windows or Linux
+  //  worker_groups_launch_template = var.enable_self_managed_nodegroups ? [{
+  //    name     = var.self_managed_nodegroup_name
+  //    platform = local.self_managed_node_platform
+  //
+  //    # Use custom AMI, user data script template, and its parameters, if provided in input.
+  //    # Otherwise, use default EKS-optimized AMI, user data script for Windows / Linux.
+  //    ami_id                       = var.self_managed_node_ami_id != "" ? var.self_managed_node_ami_id : var.enable_windows_support ? data.aws_ami.windows2019core.id : data.aws_ami.amazonlinux2eks.id
+  //    userdata_template_file       = var.self_managed_node_userdata_template_file != "" ? var.self_managed_node_userdata_template_file : var.enable_windows_support ? "./templates/userdata-windows.tpl" : "./templates/userdata-amazonlinux2eks.tpl"
+  //    userdata_template_extra_args = var.self_managed_node_userdata_template_extra_params
+  //
+  //    override_instance_types = var.self_managed_node_instance_type
+  //    root_encrypted          = true
+  //    root_volume_size        = var.self_managed_node_volume_size
+  //
+  //    iam_instance_profile_name = var.enable_windows_support ? module.windows_support_iam[0].windows_instance_profile.name : null
+  //    asg_desired_capacity      = var.self_managed_node_desired_size
+  //    asg_min_size              = var.self_managed_node_min_size
+  //    asg_max_size              = var.self_managed_node_max_size
+  //
+  //    kubelet_extra_args = "--node-labels=Environment=${var.environment},Zone=${var.zone},WorkerType=SELF_MANAGED_${upper(local.self_managed_node_platform)}"
+  //
+  //    # Extra tags, needed for cluster autoscaler autodiscovery
+  //    tags = var.cluster_autoscaler_enable ? [{
+  //      key                 = "k8s.io/cluster-autoscaler/enabled",
+  //      value               = true,
+  //      propagate_at_launch = true
+  //      }, {
+  //      key                 = "k8s.io/cluster-autoscaler/${module.eks-label.id}",
+  //      value               = "owned",
+  //      propagate_at_launch = true
+  //    }] : []
+  //  }] : []
 
 }
 
diff --git a/live/preprod/eu-west-1/application/dev/dev.tfvars b/live/preprod/eu-west-1/application/dev/dev.tfvars
index 3acaa1f99c3381fa471f3d5703d78bd370a9a337..e04fb02fe1629cb14947fcc8a47a203c7c5bc3e5 100644
--- a/live/preprod/eu-west-1/application/dev/dev.tfvars
+++ b/live/preprod/eu-west-1/application/dev/dev.tfvars
@@ -86,14 +86,107 @@ coredns_addon_version = "v1.8.3-eksbuild.1"
 enable_kube_proxy_addon  = true
 kube_proxy_addon_version = "v1.20.4-eksbuild.2"
 
+
 #---------------------------------------------------------#
-# EKS WORKER NODE GROUPS
+# EKS SELF MANAGED WORKER NODE GROUPS
 #---------------------------------------------------------#
+enable_self_managed_nodegroups = true
+self_managed_node_groups = {
+  #---------------------------------------------------------#
+  # ON-DEMAND Self Managed Worker Group - Worker Group - 1
+  #---------------------------------------------------------#
+  self_mg_4 = {
+    self_managed_nodegroup_name     = "self-mg-4"
+    os_ami_type                     = "amazonlinux2eks"       # amazonlinux2eks  or bottlerocket or windows
+    self_managed_node_ami_id        = "ami-0dfaa019a300f219c" # Modify this to fetch to use custom AMI ID.
+    self_managed_node_userdata      = <<-EOT
+            yum install -y amazon-ssm-agent \
+            systemctl enable amazon-ssm-agent && systemctl start amazon-ssm-agent \
+        EOT
+    self_managed_node_volume_size   = "20"
+    self_managed_node_instance_type = "m5.large"
+    self_managed_node_desired_size  = "2"
+    self_managed_node_max_size      = "20"
+    self_managed_node_min_size      = "2"
+    capacity_type                   = "" # Leave this empty if not for SPOT capacity.
+    kubelet_extra_args              = ""
+    bootstrap_extra_args            = ""
+
+    #self managed node group network configuration
+    subnet_type = "public" # private or public
+    subnet_ids  = []
+
+    #security_group ID
+    self_managed_custom_security_group_id = "" # Add custom sec group id if required from intended vpc or module creates new one.
+
+  },
+
+  #---------------------------------------------------------#
+  # Self Managed SPOT Worker Group - Worker Group - 2
+  #---------------------------------------------------------#
+  self_spot_mg_4 = {
+    self_managed_nodegroup_name     = "self-spot-mg-4"
+    os_ami_type                     = "amazonlinux2eks"       # amazonlinux2eks  or bottlerocket or windows
+    self_managed_node_ami_id        = "ami-0dfaa019a300f219c" # Modify this to fetch to use custom AMI ID.
+    self_managed_node_userdata      = <<-EOT
+            yum install -y amazon-ssm-agent \
+            systemctl enable amazon-ssm-agent && systemctl start amazon-ssm-agent \
+        EOT
+    self_managed_node_volume_size   = "20"
+    self_managed_node_instance_type = "m5.xlarge"
+    self_managed_node_desired_size  = "2"
+    self_managed_node_max_size      = "20"
+    self_managed_node_min_size      = "2"
+    capacity_type                   = "spot"
+    kubelet_extra_args              = ""
+    bootstrap_extra_args            = ""
+
+    #self managed node group network configuration
+    subnet_type = "public" # private or public
+    subnet_ids  = []
+
+    #security_group ID
+    self_managed_custom_security_group_id = "" # Add custom sec group id if required from intended vpc or module creates new one.
+
+  },
 
-managed_node_groups = {
   #---------------------------------------------------------#
-  # ON-DEMAND Worker Group - Worker Group - 1
+  # Bottlerocket Self Managed Worker Group - Worker Group - 3
   #---------------------------------------------------------#
+  bottlerocket_mg_4 = {
+    self_managed_nodegroup_name     = "bottlerocket-mg-4"
+    os_ami_type                     = "bottlerocket"          # amazonlinux2eks  or bottlerocket or windows
+    self_managed_node_ami_id        = "ami-044b114caf98ce8c5" # Modify this to fetch to use custom AMI ID.
+    self_managed_node_userdata      = ""
+    self_managed_node_volume_size   = "20"
+    self_managed_node_instance_type = "m5.large"
+    self_managed_node_desired_size  = "2"
+    self_managed_node_max_size      = "5"
+    self_managed_node_min_size      = "2"
+    capacity_type                   = "" # Leave this empty if not for SPOT capacity.
+    kubelet_extra_args              = ""
+    bootstrap_extra_args            = ""
+
+    #self managed node group network configuration
+    subnet_type = "public" # private or public
+    subnet_ids  = []
+
+    #security_group ID
+    self_managed_custom_security_group_id = "" # Add custom sec group id if required from intended vpc or module creates new one.
+
+  },
+
+}
+
+
+#---------------------------------------------------------#
+# EKS WORKER NODE GROUPS
+#---------------------------------------------------------#
+
+managed_node_groups = {
+  //  #---------------------------------------------------------#
+  //  # ON-DEMAND Worker Group - Worker Group - 1
+  //  #---------------------------------------------------------#
   mg_4 = {
     # 1> Node Group configuration - Part1
     node_group_name        = "mg_4"
@@ -160,7 +253,8 @@ managed_node_groups = {
     disk_size      = 50
 
     # Node Group network configuration
-    subnet_ids = ["subnet-xxx", "subnet-xxx", "subnet-xxx"]
+    subnet_type = "private" # private or public
+    subnet_ids  = []
 
     //         k8s_taints = [{
     //           key = "dedicated"
@@ -234,7 +328,7 @@ fargate_profiles = {
       }
     }]
 
-    subnet_ids = ["subnet-xxx", "subnet-xxx", "subnet-xxx"]
+    subnet_ids = []
 
     additional_tags = {
       ExtraTag    = "Fargate"
@@ -265,7 +359,7 @@ fargate_profiles = {
         }
     }]
 
-    subnet_ids = ["subnet-xxx", "subnet-xxx", "subnet-xxx"]
+    subnet_ids = []
 
     additional_tags = {
       ExtraTag = "Fargate"
@@ -282,9 +376,9 @@ fargate_fluent_bit_enable = false
 #---------------------------------------------------------#
 # SELF-MANAGED WINDOWS NODE GROUP (WORKER GROUP)
 #---------------------------------------------------------#
-enable_self_managed_nodegroups = false
-enable_windows_support         = false
-self_managed_nodegroup_name    = "ng-windows"
+#enable_self_managed_nodegroups = false
+#enable_windows_support         = false
+#self_managed_nodegroup_name    = "ng-windows"
 
 #---------------------------------------------------------#
 # ENABLE HELM MODULES
@@ -305,7 +399,7 @@ metric_server_helm_chart_version = "5.9.2"
 #---------------------------------------------------------#
 # ENABLE CLUSTER AUTOSCALER
 #---------------------------------------------------------#
-cluster_autoscaler_enable       = false
+cluster_autoscaler_enable       = true
 cluster_autoscaler_image_tag    = "v1.20.0"
 cluster_autoscaler_helm_version = "9.9.2"
 
diff --git a/live/preprod/eu-west-1/application/test/test.tfvars b/live/preprod/eu-west-1/application/test/test.tfvars
index c1481f97bf7c8a0f6fdf1335d342016c2201c8dd..78e1bdb95abc649a586c12199e58d694b42549b9 100644
--- a/live/preprod/eu-west-1/application/test/test.tfvars
+++ b/live/preprod/eu-west-1/application/test/test.tfvars
@@ -59,6 +59,7 @@ create_vpc_endpoints = true
 //create_vpc = false
 //vpc_id = "xxxxxx"
 //private_subnet_ids = ['xxxxxx','xxxxxx','xxxxxx']
+//public_subnet_ids = ['xxxxxx','xxxxxx','xxxxxx']
 
 #---------------------------------------------------------#
 # EKS CONTROL PLANE VARIABLES
@@ -90,7 +91,7 @@ kube_proxy_addon_version = "v1.20.4-eksbuild.2"
 #---------------------------------------------------------#
 # EKS WORKER NODE GROUPS
 #---------------------------------------------------------#
-
+enable_managed_nodegroups = true
 managed_node_groups = {
   mg_m5x = {
     # 1> Node Group configuration - Part1
@@ -173,15 +174,53 @@ fargate_profiles = {
   },
 }
 
-# Enable logging only when you create a Fargate profile e.g., enable_fargate = true
-fargate_fluent_bit_enable = false
-
 #---------------------------------------------------------#
 # SELF-MANAGED WINDOWS NODE GROUP (WORKER GROUP)
 #---------------------------------------------------------#
-enable_self_managed_nodegroups = false
-enable_windows_support         = false
-self_managed_nodegroup_name    = "ng-windows"
+enable_self_managed_nodegroups = true
+self_managed_node_groups = {
+  #---------------------------------------------------------#
+  # ON-DEMAND Self Managed Worker Group - Worker Group - 1
+  #---------------------------------------------------------#
+  self_mg_4 = {
+    node_group_name = "self-mg-5"
+    os_ami_type     = "amazonlinux2eks"       # amazonlinux2eks  or bottlerocket or windows
+    custom_ami_id   = "ami-0dfaa019a300f219c" # Modify this to fetch to use custom AMI ID.
+    public_ip       = false
+    pre_userdata    = <<-EOT
+            yum install -y amazon-ssm-agent \
+            systemctl enable amazon-ssm-agent && systemctl start amazon-ssm-agent \
+        EOT
+
+    disk_size     = "20"
+    instance_type = "m5.large"
+
+    desired_size = "2"
+    max_size     = "20"
+    min_size     = "2"
+
+    capacity_type = "" # Leave this empty if not for SPOT capacity.
+
+    k8s_labels = {
+      Environment = "preprod"
+      Zone        = "test"
+      WorkerType  = "SELF_MANAGED_ON_DEMAND"
+    }
+
+    additional_tags = {
+      ExtraTag    = "m5x-on-demand"
+      Name        = "m5x-on-demand"
+      subnet_type = "private"
+    }
+    #self managed node group network configuration
+    subnet_type = "private" # private or public
+    subnet_ids  = []
+
+    #security_group ID
+    create_worker_security_group = true # It will use default sec group created by EKS Core module
+
+  },
+}
 
 #---------------------------------------------------------#
 # ENABLE HELM MODULES
@@ -193,6 +232,9 @@ self_managed_nodegroup_name    = "ng-windows"
 # Or Make it false and set the private contianer image repo url in source/eks.tf; currently this defaults to ECR
 public_docker_repo = true
 
+# Enable logging only when you create a Fargate profile e.g., enable_fargate = true
+fargate_fluent_bit_enable = false
+
 #---------------------------------------------------------#
 # ENABLE METRICS SERVER
 #---------------------------------------------------------#
diff --git a/locals.tf b/locals.tf
index 839162852944b9d5e5c086c6e0275ccbefafdf05..7bf28e74b5df7261844cc28f058343dff2eb6aea 100644
--- a/locals.tf
+++ b/locals.tf
@@ -48,16 +48,16 @@ locals {
   //
   //  yaml_quote = var.aws_auth_yaml_strip_quotes ? "" : "\""
   //
-  //  # Self managed node IAM Roles for aws-auth
+  # Managed node IAM Roles for aws-auth
   //  managed_map_worker_roles = [
-  //  for role_arn in module.managed-node-groups.mg_linux_roles : {
-  //    rolearn : role_arn
-  //    username : "system:node:{{EC2PrivateDNSName}}"
-  //    groups : [
-  //      "system:bootstrappers",
-  //      "system:nodes"
-  //    ]
-  //  }
+  //    for role_arn in var.managed_node_groups["node_group_name"] : {
+  //      rolearn : role_arn
+  //      username : "system:node:{{EC2PrivateDNSName}}"
+  //      groups : [
+  //        "system:bootstrappers",
+  //        "system:nodes"
+  //      ]
+  //    }
   //  ]
   //
   //  # Self managed node IAM Roles for aws-auth
diff --git a/managed-nodegroups.tf b/managed-nodegroups.tf
index c2cb67823f2317b1046782dd1416835e7033e871..347382d36cd1b27b5213ac64d6ec4ef3dacd689a 100644
--- a/managed-nodegroups.tf
+++ b/managed-nodegroups.tf
@@ -4,20 +4,23 @@
 # MANAGED NODE GROUPS
 # ---------------------------------------------------------------------------------------------------------------------
 module "managed-node-groups" {
-  for_each = var.managed_node_groups
+  for_each = length(var.managed_node_groups) > 0 && var.enable_managed_nodegroups ? var.managed_node_groups : {}
 
   source     = "./modules/aws-eks-managed-node-groups"
   managed_ng = each.value
 
-  eks_cluster_name          = module.eks.cluster_id
-  private_subnet_ids        = var.create_vpc == false ? var.private_subnet_ids : module.vpc.private_subnets
-  public_subnet_ids         = var.create_vpc == false ? var.public_subnet_ids : module.vpc.public_subnets
-  cluster_ca_base64         = module.eks.cluster_certificate_authority_data
-  cluster_endpoint          = module.eks.cluster_endpoint
-  cluster_autoscaler_enable = var.cluster_autoscaler_enable
-  worker_security_group_id  = module.eks.worker_security_group_id # TODO Create New SecGroup for each node group
-  tags                      = module.eks-label.tags
+  eks_cluster_name  = module.eks.cluster_id
+  cluster_ca_base64 = module.eks.cluster_certificate_authority_data
+  cluster_endpoint  = module.eks.cluster_endpoint
+
+  private_subnet_ids = var.create_vpc == false ? var.private_subnet_ids : module.vpc.private_subnets
+  public_subnet_ids  = var.create_vpc == false ? var.public_subnet_ids : module.vpc.public_subnets
+
+  default_worker_security_group_id = module.eks.worker_security_group_id
+  tags                             = module.eks-label.tags
 
   depends_on = [module.eks]
+  # Ensure the cluster is fully created before trying to add the node group
+  //  module_depends_on = [module.eks.kubernetes_config_map_id]
 
 }
diff --git a/modules/aws-auth/aws-auth.tf b/modules/aws-auth/aws-auth.tf
deleted file mode 100644
index 9f94dff4d34bad19141ad27bf4a938c9385ef435..0000000000000000000000000000000000000000
--- a/modules/aws-auth/aws-auth.tf
+++ /dev/null
@@ -1,30 +0,0 @@
-//
-//
-//resource "null_resource" "wait_for_cluster" {
-//  count      = var.apply_config_map_aws_auth ? 1 : 0
-//  depends_on = [module.eks.cluster_id]
-//
-//  provisioner "local-exec" {
-//    command     = var.wait_for_cluster_command
-//    interpreter = var.local_exec_interpreter
-//    environment = {
-//      ENDPOINT = module.eks.cluster_endpoint
-//    }
-//  }
-//}
-//
-//resource "kubernetes_config_map" "aws_auth" {
-//  count      = var.apply_config_map_aws_auth ? 1 : 0
-//  depends_on = [null_resource.wait_for_cluster[0]]
-//
-//  metadata {
-//    name      = "aws-auth"
-//    namespace = "kube-system"
-//  }
-//
-//  data = {
-//    mapRoles    = replace(yamlencode(distinct(concat(local.rbac_roles, var.map_additional_iam_roles))), "\"", local.yaml_quote)
-//    mapUsers    = replace(yamlencode(var.map_additional_iam_users), "\"", local.yaml_quote)
-//    mapAccounts = replace(yamlencode(var.map_additional_aws_accounts), "\"", local.yaml_quote)
-//  }
-//}
\ No newline at end of file
diff --git a/modules/aws-eks-managed-node-groups/README.md b/modules/aws-eks-managed-node-groups/README.md
index 26008a947cac752f87cb2df92f2effb802831b5e..77f110bb631f7e7b4998e03b00f44b1b36ccbbf7 100644
--- a/modules/aws-eks-managed-node-groups/README.md
+++ b/modules/aws-eks-managed-node-groups/README.md
@@ -22,31 +22,34 @@ No modules.
 | Name | Type |
 |------|------|
 | [aws_eks_node_group.managed_ng](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/eks_node_group) | resource |
-| [aws_iam_instance_profile.mg_linux](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_instance_profile) | resource |
+| [aws_iam_instance_profile.managed_ng](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_instance_profile) | resource |
 | [aws_iam_policy.eks_autoscaler_policy](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_policy) | resource |
-| [aws_iam_role.mg_linux](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_role) | resource |
-| [aws_iam_role_policy_attachment.mg_linux_AmazonEC2ContainerRegistryReadOnly](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_role_policy_attachment) | resource |
-| [aws_iam_role_policy_attachment.mg_linux_AmazonEKSWorkerNodePolicy](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_role_policy_attachment) | resource |
-| [aws_iam_role_policy_attachment.mg_linux_AmazonEKS_CNI_Policy](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_role_policy_attachment) | resource |
-| [aws_iam_role_policy_attachment.mg_linux_AmazonPrometheusRemoteWriteAccess](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_role_policy_attachment) | resource |
-| [aws_iam_role_policy_attachment.mg_linux_AmazonSSMManagedInstanceCore](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_role_policy_attachment) | resource |
-| [aws_iam_role_policy_attachment.mg_linux_CloudWatchFullAccess](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_role_policy_attachment) | resource |
-| [aws_iam_role_policy_attachment.mg_linux_ElasticLoadBalancingFullAccess](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_role_policy_attachment) | resource |
-| [aws_iam_role_policy_attachment.mg_linux_cluster_autoscaler](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_role_policy_attachment) | resource |
+| [aws_iam_role.managed_ng](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_role) | resource |
+| [aws_iam_role_policy_attachment.managed_ng_AmazonEC2ContainerRegistryReadOnly](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_role_policy_attachment) | resource |
+| [aws_iam_role_policy_attachment.managed_ng_AmazonEKSWorkerNodePolicy](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_role_policy_attachment) | resource |
+| [aws_iam_role_policy_attachment.managed_ng_AmazonEKS_CNI_Policy](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_role_policy_attachment) | resource |
+| [aws_iam_role_policy_attachment.managed_ng_AmazonPrometheusRemoteWriteAccess](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_role_policy_attachment) | resource |
+| [aws_iam_role_policy_attachment.managed_ng_AmazonSSMManagedInstanceCore](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_role_policy_attachment) | resource |
+| [aws_iam_role_policy_attachment.managed_ng_CloudWatchFullAccess](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_role_policy_attachment) | resource |
+| [aws_iam_role_policy_attachment.managed_ng_ElasticLoadBalancingFullAccess](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_role_policy_attachment) | resource |
+| [aws_iam_role_policy_attachment.managed_ng_cluster_autoscaler](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_role_policy_attachment) | resource |
 | [aws_launch_template.managed_node_groups](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/launch_template) | resource |
 | [aws_caller_identity.current](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/caller_identity) | data source |
-| [aws_iam_policy_document.mg_linux_assume_role_policy](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/iam_policy_document) | data source |
+| [aws_iam_policy_document.managed_ng_assume_role_policy](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/iam_policy_document) | data source |
 | [aws_partition.current](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/partition) | data source |
 
 ## Inputs
 
 | Name | Description | Type | Default | Required |
 |------|-------------|------|---------|:--------:|
-| <a name="input_cluster_autoscaler_enable"></a> [cluster\_autoscaler\_enable](#input\_cluster\_autoscaler\_enable) | Enable Cluster Autoscaler | `bool` | `false` | no |
 | <a name="input_cluster_ca_base64"></a> [cluster\_ca\_base64](#input\_cluster\_ca\_base64) | Base64-encoded EKS cluster certificate-authority-data | `string` | `""` | no |
 | <a name="input_cluster_endpoint"></a> [cluster\_endpoint](#input\_cluster\_endpoint) | EKS Cluster K8s API server endpoint | `string` | `""` | no |
 | <a name="input_create_eks"></a> [create\_eks](#input\_create\_eks) | Controls if EKS resources should be created | `bool` | `true` | no |
+| <a name="input_default_worker_security_group_id"></a> [default\_worker\_security\_group\_id](#input\_default\_worker\_security\_group\_id) | Worker group security ID | `string` | `""` | no |
 | <a name="input_eks_cluster_name"></a> [eks\_cluster\_name](#input\_eks\_cluster\_name) | EKS Cluster name | `string` | n/a | yes |
+| <a name="input_http_endpoint"></a> [http\_endpoint](#input\_http\_endpoint) | Whether the Instance Metadata Service (IMDS) is available. Supported values: enabled, disabled | `string` | `"enabled"` | no |
+| <a name="input_http_put_response_hop_limit"></a> [http\_put\_response\_hop\_limit](#input\_http\_put\_response\_hop\_limit) | HTTP PUT response hop limit for instance metadata requests. Supported values: 1-64. | `number` | `1` | no |
+| <a name="input_http_tokens"></a> [http\_tokens](#input\_http\_tokens) | If enabled, will use Instance Metadata Service Version 2 (IMDSv2). Supported values: optional, required. | `string` | `"optional"` | no |
 | <a name="input_managed_ng"></a> [managed\_ng](#input\_managed\_ng) | Map of maps of `eks_node_groups` to create | `any` | `{}` | no |
 | <a name="input_path"></a> [path](#input\_path) | IAM resource path, e.g. /dev/ | `string` | `"/"` | no |
 | <a name="input_private_subnet_ids"></a> [private\_subnet\_ids](#input\_private\_subnet\_ids) | list of private subnets Id's for the Worker nodes | `list` | `[]` | no |
@@ -54,15 +57,16 @@ No modules.
 | <a name="input_tags"></a> [tags](#input\_tags) | A map of tags to add to all resources | `map(string)` | `{}` | no |
 | <a name="input_use_custom_ami"></a> [use\_custom\_ami](#input\_use\_custom\_ami) | Use custom AMI | `bool` | `false` | no |
 | <a name="input_worker_additional_security_group_ids"></a> [worker\_additional\_security\_group\_ids](#input\_worker\_additional\_security\_group\_ids) | A list of additional security group ids to attach to worker instances | `list(string)` | `[]` | no |
-| <a name="input_worker_security_group_id"></a> [worker\_security\_group\_id](#input\_worker\_security\_group\_id) | Worker group security ID | `string` | `""` | no |
 
 ## Outputs
 
 | Name | Description |
 |------|-------------|
+| <a name="output_launch_template_arn"></a> [launch\_template\_arn](#output\_launch\_template\_arn) | launch templated id for EKS Self Managed Node Group |
 | <a name="output_launch_template_ids"></a> [launch\_template\_ids](#output\_launch\_template\_ids) | launch templated id for EKS Managed Node Group |
 | <a name="output_launch_template_latest_versions"></a> [launch\_template\_latest\_versions](#output\_launch\_template\_latest\_versions) | launch templated version for EKS Managed Node Group |
-| <a name="output_mg_linux_roles"></a> [mg\_linux\_roles](#output\_mg\_linux\_roles) | IAM role ARN for EKS Managed Node Group |
+| <a name="output_manage_ng_iam_role_arn"></a> [manage\_ng\_iam\_role\_arn](#output\_manage\_ng\_iam\_role\_arn) | IAM role ARN for EKS Managed Node Group |
+| <a name="output_manage_ng_iam_role_name"></a> [manage\_ng\_iam\_role\_name](#output\_manage\_ng\_iam\_role\_name) | n/a |
 | <a name="output_node_groups"></a> [node\_groups](#output\_node\_groups) | EKS Managed node group id |
 <!-- END OF PRE-COMMIT-TERRAFORM DOCS HOOK -->
 
diff --git a/modules/aws-eks-managed-node-groups/data.tf b/modules/aws-eks-managed-node-groups/data.tf
index e34780db6f707ffe04fcc1a6824d832aba15e280..111c04a7e093cb23719ac4457071537e0aee624a 100644
--- a/modules/aws-eks-managed-node-groups/data.tf
+++ b/modules/aws-eks-managed-node-groups/data.tf
@@ -20,7 +20,7 @@ data "aws_caller_identity" "current" {}
 
 data "aws_partition" "current" {}
 
-data "aws_iam_policy_document" "mg_linux_assume_role_policy" {
+data "aws_iam_policy_document" "managed_ng_assume_role_policy" {
   statement {
     sid = "EKSWorkerAssumeRole"
 
diff --git a/modules/aws-eks-managed-node-groups/iam.tf b/modules/aws-eks-managed-node-groups/iam.tf
index ea9f26bd932b7c52a1c5fee3c7bbe2b9ded14509..641ec67f43f28e240af7f99bd61cc9c63b27eedf 100644
--- a/modules/aws-eks-managed-node-groups/iam.tf
+++ b/modules/aws-eks-managed-node-groups/iam.tf
@@ -16,17 +16,17 @@
  * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
  */
 
-resource "aws_iam_role" "mg_linux" {
-  name_prefix           = "${local.name_prefix_linux}-${local.managed_node_group["node_group_name"]}"
-  assume_role_policy    = data.aws_iam_policy_document.mg_linux_assume_role_policy.json
+resource "aws_iam_role" "managed_ng" {
+  name                  = "${var.eks_cluster_name}-${local.managed_node_group["node_group_name"]}"
+  assume_role_policy    = data.aws_iam_policy_document.managed_ng_assume_role_policy.json
   path                  = var.path
   force_detach_policies = true
   tags                  = var.tags
 }
 
-resource "aws_iam_instance_profile" "mg_linux" {
-  name_prefix = "${local.name_prefix_linux}-${local.managed_node_group["node_group_name"]}"
-  role        = aws_iam_role.mg_linux.name
+resource "aws_iam_instance_profile" "managed_ng" {
+  name = "${var.eks_cluster_name}-${local.managed_node_group["node_group_name"]}"
+  role = aws_iam_role.managed_ng.name
 
   path = var.path
   tags = var.tags
@@ -36,51 +36,48 @@ resource "aws_iam_instance_profile" "mg_linux" {
   }
 }
 
-resource "aws_iam_role_policy_attachment" "mg_linux_AmazonEKSWorkerNodePolicy" {
+resource "aws_iam_role_policy_attachment" "managed_ng_AmazonEKSWorkerNodePolicy" {
   policy_arn = "${local.policy_arn_prefix}/AmazonEKSWorkerNodePolicy"
-  role       = aws_iam_role.mg_linux.name
+  role       = aws_iam_role.managed_ng.name
 }
 
-resource "aws_iam_role_policy_attachment" "mg_linux_AmazonEKS_CNI_Policy" {
+resource "aws_iam_role_policy_attachment" "managed_ng_AmazonEKS_CNI_Policy" {
   policy_arn = "${local.policy_arn_prefix}/AmazonEKS_CNI_Policy"
-  role       = aws_iam_role.mg_linux.name
+  role       = aws_iam_role.managed_ng.name
 }
 
-resource "aws_iam_role_policy_attachment" "mg_linux_AmazonEC2ContainerRegistryReadOnly" {
+resource "aws_iam_role_policy_attachment" "managed_ng_AmazonEC2ContainerRegistryReadOnly" {
   policy_arn = "${local.policy_arn_prefix}/AmazonEC2ContainerRegistryReadOnly"
-  role       = aws_iam_role.mg_linux.name
+  role       = aws_iam_role.managed_ng.name
 }
 
-resource "aws_iam_role_policy_attachment" "mg_linux_AmazonSSMManagedInstanceCore" {
+resource "aws_iam_role_policy_attachment" "managed_ng_AmazonSSMManagedInstanceCore" {
   policy_arn = "${local.policy_arn_prefix}/AmazonSSMManagedInstanceCore"
-  role       = aws_iam_role.mg_linux.name
+  role       = aws_iam_role.managed_ng.name
 }
 
-resource "aws_iam_role_policy_attachment" "mg_linux_CloudWatchFullAccess" {
+resource "aws_iam_role_policy_attachment" "managed_ng_CloudWatchFullAccess" {
   policy_arn = "${local.policy_arn_prefix}/CloudWatchFullAccess"
-  role       = aws_iam_role.mg_linux.name
+  role       = aws_iam_role.managed_ng.name
 }
 
-resource "aws_iam_role_policy_attachment" "mg_linux_ElasticLoadBalancingFullAccess" {
+resource "aws_iam_role_policy_attachment" "managed_ng_ElasticLoadBalancingFullAccess" {
   policy_arn = "${local.policy_arn_prefix}/ElasticLoadBalancingFullAccess"
-  role       = aws_iam_role.mg_linux.name
+  role       = aws_iam_role.managed_ng.name
 }
 
-resource "aws_iam_role_policy_attachment" "mg_linux_AmazonPrometheusRemoteWriteAccess" {
+resource "aws_iam_role_policy_attachment" "managed_ng_AmazonPrometheusRemoteWriteAccess" {
   policy_arn = "${local.policy_arn_prefix}/AmazonPrometheusRemoteWriteAccess"
-  role       = aws_iam_role.mg_linux.name
+  role       = aws_iam_role.managed_ng.name
 }
 
-resource "aws_iam_role_policy_attachment" "mg_linux_cluster_autoscaler" {
-  count      = var.cluster_autoscaler_enable ? 1 : 0
-  policy_arn = aws_iam_policy.eks_autoscaler_policy[0].arn
-  role       = aws_iam_role.mg_linux.name
+resource "aws_iam_role_policy_attachment" "managed_ng_cluster_autoscaler" {
+  policy_arn = aws_iam_policy.eks_autoscaler_policy.arn
+  role       = aws_iam_role.managed_ng.name
 }
 
 resource "aws_iam_policy" "eks_autoscaler_policy" {
-  count = var.cluster_autoscaler_enable ? 1 : 0
-
-  name        = "${local.name_prefix_linux}-${local.managed_node_group["node_group_name"]}"
+  name        = "${var.eks_cluster_name}-${local.managed_node_group["node_group_name"]}"
   path        = "/"
   description = "eks autoscaler policy"
 
diff --git a/modules/aws-eks-managed-node-groups/locals.tf b/modules/aws-eks-managed-node-groups/locals.tf
index cbaf4e5d2be618d3233c4af9ef041d0e9534c10b..4363f17197be86468edda64bcf14c2bfc4dd7960 100644
--- a/modules/aws-eks-managed-node-groups/locals.tf
+++ b/modules/aws-eks-managed-node-groups/locals.tf
@@ -45,7 +45,7 @@ locals {
     k8s_taints                    = []
     remote_access                 = false
     ec2_ssh_key                   = ""
-    source_security_group_ids     = ""
+    ssh_security_group_id         = ""
     additional_tags               = {}
     custom_ami_type               = "amazonlinux2eks"
     custom_ami_id                 = ""
@@ -57,10 +57,21 @@ locals {
     { subnet_ids = var.managed_ng["subnet_ids"] == [] ? var.managed_ng["subnet_type"] == "public" ? var.public_subnet_ids : var.private_subnet_ids : var.managed_ng["subnet_ids"] }
   )
 
-}
-
-locals {
   policy_arn_prefix = "arn:aws:iam::aws:policy"
-  name_prefix_linux = "${var.eks_cluster_name}-"
   ec2_principal     = "ec2.${data.aws_partition.current.dns_suffix}"
+
+  userdata_params = {
+    cluster_name         = var.eks_cluster_name
+    cluster_ca_base64    = var.cluster_ca_base64
+    cluster_endpoint     = var.cluster_endpoint
+    bootstrap_extra_args = local.managed_node_group["bootstrap_extra_args"]
+    pre_userdata         = local.managed_node_group["pre_userdata"]
+    post_userdata        = local.managed_node_group["post_userdata"]
+    kubelet_extra_args   = local.managed_node_group["kubelet_extra_args"]
+  }
+
+  userdata_base64 = base64encode(
+    templatefile("${path.module}/templates/userdata-${local.managed_node_group["custom_ami_type"]}.tpl", local.userdata_params)
+  )
+
 }
\ No newline at end of file
diff --git a/modules/aws-eks-managed-node-groups/managed-launch-templates.tf b/modules/aws-eks-managed-node-groups/managed-launch-templates.tf
index 04740157bceb6ea96575437c1dc2d4dd0611f710..23e3dcbc8716d8849be8f81689d085596619d8cc 100644
--- a/modules/aws-eks-managed-node-groups/managed-launch-templates.tf
+++ b/modules/aws-eks-managed-node-groups/managed-launch-templates.tf
@@ -15,28 +15,14 @@
  * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
  * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
  */
-locals {
-  userdata_params = {
-    cluster_name         = var.eks_cluster_name
-    cluster_ca_base64    = var.cluster_ca_base64
-    cluster_endpoint     = var.cluster_endpoint
-    bootstrap_extra_args = local.managed_node_group["bootstrap_extra_args"]
-    pre_userdata         = local.managed_node_group["pre_userdata"]
-    post_userdata        = local.managed_node_group["post_userdata"]
-    kubelet_extra_args   = local.managed_node_group["kubelet_extra_args"]
-  }
-
-  userdata_base64 = base64encode(
-    templatefile("${path.module}/templates/userdata-${local.managed_node_group["custom_ami_type"]}.tpl", local.userdata_params)
-  )
-
-}
 
 resource "aws_launch_template" "managed_node_groups" {
-  name_prefix            = "${var.eks_cluster_name}-${local.managed_node_group["node_group_name"]}"
-  description            = "Launch Template for EKS Managed clusters"
+  name                   = "${var.eks_cluster_name}-${local.managed_node_group["node_group_name"]}"
+  description            = "Launch Template for EKS Managed Node Groups"
   update_default_version = true
 
+  user_data = local.userdata_base64
+
   block_device_mappings {
     device_name = "/dev/xvda"
 
@@ -44,6 +30,8 @@ resource "aws_launch_template" "managed_node_groups" {
       volume_size           = local.managed_node_group["disk_size"]
       volume_type           = local.managed_node_group["disk_type"]
       delete_on_termination = true
+      encrypted             = true
+      // kms_key_id            = ""
     }
   }
 
@@ -56,6 +44,12 @@ resource "aws_launch_template" "managed_node_groups" {
     enabled = true
   }
 
+  metadata_options {
+    http_endpoint               = var.http_endpoint
+    http_tokens                 = var.http_tokens
+    http_put_response_hop_limit = var.http_put_response_hop_limit
+  }
+
   tag_specifications {
     resource_type = "instance"
     tags          = merge(var.tags, tomap({ "Name" = "${var.eks_cluster_name}-${local.managed_node_group["node_group_name"]}" }))
@@ -63,11 +57,9 @@ resource "aws_launch_template" "managed_node_groups" {
 
   network_interfaces {
     associate_public_ip_address = local.managed_node_group["public_ip"]
-    security_groups             = [var.worker_security_group_id]
+    security_groups             = [var.default_worker_security_group_id]
   }
 
-  user_data = local.userdata_base64
-
   lifecycle {
     create_before_destroy = true
   }
diff --git a/modules/aws-eks-managed-node-groups/managed-node-groups.tf b/modules/aws-eks-managed-node-groups/managed-node-groups.tf
index e6982c959e3aa25aab170730259221fc6d90307d..d6a80649330749ee2ab8eaadb8607e5766c8e665 100644
--- a/modules/aws-eks-managed-node-groups/managed-node-groups.tf
+++ b/modules/aws-eks-managed-node-groups/managed-node-groups.tf
@@ -18,11 +18,11 @@
 
 resource "aws_eks_node_group" "managed_ng" {
 
-  cluster_name           = var.eks_cluster_name
-  node_group_name_prefix = local.managed_node_group["node_group_name"]
-  //   node_group_name = ""     # Optional when node_group_name_prefix is defined
-  node_role_arn = aws_iam_role.mg_linux.arn
-  subnet_ids    = local.managed_node_group["subnet_ids"]
+  cluster_name = var.eks_cluster_name
+  //  node_group_name_prefix = local.managed_node_group["node_group_name"]
+  node_group_name = local.managed_node_group["node_group_name"]
+  node_role_arn   = aws_iam_role.managed_ng.arn
+  subnet_ids      = local.managed_node_group["subnet_ids"]
 
   scaling_config {
     desired_size = local.managed_node_group["desired_size"]
@@ -56,7 +56,7 @@ resource "aws_eks_node_group" "managed_ng" {
     for_each = local.managed_node_group["remote_access"] == true ? [1] : []
     content {
       ec2_ssh_key               = local.managed_node_group["ec2_ssh_key"]
-      source_security_group_ids = local.managed_node_group["source_security_group_ids"]
+      source_security_group_ids = local.managed_node_group["ssh_security_group_id"]
     }
   }
 
@@ -80,9 +80,9 @@ resource "aws_eks_node_group" "managed_ng" {
   }
 
   depends_on = [
-    aws_iam_role_policy_attachment.mg_linux_AmazonEKS_CNI_Policy,
-    aws_iam_role_policy_attachment.mg_linux_AmazonEKSWorkerNodePolicy,
-    aws_iam_role_policy_attachment.mg_linux_AmazonEC2ContainerRegistryReadOnly,
+    aws_iam_role_policy_attachment.managed_ng_AmazonEKS_CNI_Policy,
+    aws_iam_role_policy_attachment.managed_ng_AmazonEKSWorkerNodePolicy,
+    aws_iam_role_policy_attachment.managed_ng_AmazonEC2ContainerRegistryReadOnly,
   ]
 
 }
\ No newline at end of file
diff --git a/modules/aws-eks-managed-node-groups/outputs.tf b/modules/aws-eks-managed-node-groups/outputs.tf
index 666f24225d591ff076707a39f60c08037e903abf..21d81fe8d3fb0522e8f162100eeb1098455428bf 100644
--- a/modules/aws-eks-managed-node-groups/outputs.tf
+++ b/modules/aws-eks-managed-node-groups/outputs.tf
@@ -16,23 +16,31 @@
  * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
  */
 
-
 output "node_groups" {
   description = "EKS Managed node group id"
-  value       = aws_eks_node_group.managed_ng.id
+  value       = aws_eks_node_group.managed_ng[*].id
 }
 
-output "mg_linux_roles" {
+output "manage_ng_iam_role_arn" {
   description = "IAM role ARN for EKS Managed Node Group"
-  value       = aws_iam_role.mg_linux.arn
+  value       = aws_iam_role.managed_ng[*].arn
+}
+
+output "manage_ng_iam_role_name" {
+  value = aws_iam_role.managed_ng[*].name
 }
 
 output "launch_template_ids" {
   description = "launch templated id for EKS Managed Node Group"
-  value       = aws_launch_template.managed_node_groups.id
+  value       = aws_launch_template.managed_node_groups[*].id
+}
+
+output "launch_template_arn" {
+  description = "launch templated id for EKS Self Managed Node Group"
+  value       = aws_launch_template.managed_node_groups[*].arn
 }
 
 output "launch_template_latest_versions" {
   description = "launch templated version for EKS Managed Node Group"
-  value       = aws_launch_template.managed_node_groups.default_version
+  value       = aws_launch_template.managed_node_groups[*].default_version
 }
diff --git a/modules/aws-eks-managed-node-groups/variables.tf b/modules/aws-eks-managed-node-groups/variables.tf
index cf85a2e40529b4328bfb2d8f427d81b170544574..d936a4462b7df4dfd4283b4e0c5c1e51358853f2 100644
--- a/modules/aws-eks-managed-node-groups/variables.tf
+++ b/modules/aws-eks-managed-node-groups/variables.tf
@@ -50,7 +50,7 @@ variable "public_subnet_ids" {
   default     = []
 }
 
-variable "worker_security_group_id" {
+variable "default_worker_security_group_id" {
   description = "Worker group security ID"
   type        = string
   default     = ""
@@ -85,8 +85,20 @@ variable "path" {
   description = "IAM resource path, e.g. /dev/"
 }
 
-variable "cluster_autoscaler_enable" {
-  type        = bool
-  description = "Enable Cluster Autoscaler"
-  default     = false
+variable "http_endpoint" {
+  type        = string
+  default     = "enabled"
+  description = "Whether the Instance Metadata Service (IMDS) is available. Supported values: enabled, disabled"
+}
+
+variable "http_tokens" {
+  type        = string
+  default     = "optional"
+  description = "If enabled, will use Instance Metadata Service Version 2 (IMDSv2). Supported values: optional, required."
+}
+
+variable "http_put_response_hop_limit" {
+  type        = number
+  default     = 1
+  description = "HTTP PUT response hop limit for instance metadata requests. Supported values: 1-64."
 }
diff --git a/modules/aws-eks-self-managed-node-groups/aws-eks-self-managed-node-groups.tf b/modules/aws-eks-self-managed-node-groups/aws-eks-self-managed-node-groups.tf
deleted file mode 100644
index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000
diff --git a/modules/aws-eks-self-managed-node-groups/data.tf b/modules/aws-eks-self-managed-node-groups/data.tf
index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..7b07b9ca015192fabdc8335b3e3a3650c834ee6f 100644
--- a/modules/aws-eks-self-managed-node-groups/data.tf
+++ b/modules/aws-eks-self-managed-node-groups/data.tf
@@ -0,0 +1,19 @@
+data "aws_caller_identity" "current" {}
+
+data "aws_partition" "current" {}
+
+data "aws_iam_policy_document" "self_managed_ng_assume_role_policy" {
+  statement {
+    sid = "EKSWorkerAssumeRole"
+
+    actions = [
+      "sts:AssumeRole",
+    ]
+
+    principals {
+      type        = "Service"
+      identifiers = [local.ec2_principal]
+    }
+  }
+}
+
diff --git a/modules/aws-eks-self-managed-node-groups/iam.tf b/modules/aws-eks-self-managed-node-groups/iam.tf
new file mode 100644
index 0000000000000000000000000000000000000000..2640f635108b02edcfbf4db06c6e458afdb4e9b1
--- /dev/null
+++ b/modules/aws-eks-self-managed-node-groups/iam.tf
@@ -0,0 +1,75 @@
+resource "aws_iam_role" "self_managed_ng" {
+  name                  = "${var.eks_cluster_name}-${local.self_managed_node_group["node_group_name"]}"
+  assume_role_policy    = data.aws_iam_policy_document.self_managed_ng_assume_role_policy.json
+  path                  = var.path
+  force_detach_policies = true
+  tags                  = var.tags
+}
+
+resource "aws_iam_role_policy_attachment" "self_managed_AmazonEKSWorkerNodePolicy" {
+  policy_arn = "${local.policy_arn_prefix}/AmazonEKSWorkerNodePolicy"
+  role       = aws_iam_role.self_managed_ng.name
+}
+
+resource "aws_iam_role_policy_attachment" "self_managed_AmazonEKS_CNI_Policy" {
+  policy_arn = "${local.policy_arn_prefix}/AmazonEKS_CNI_Policy"
+  role       = aws_iam_role.self_managed_ng.name
+}
+
+resource "aws_iam_role_policy_attachment" "self_managed_AmazonEC2ContainerRegistryReadOnly" {
+  policy_arn = "${local.policy_arn_prefix}/AmazonEC2ContainerRegistryReadOnly"
+  role       = aws_iam_role.self_managed_ng.name
+}
+
+
+resource "aws_iam_role_policy_attachment" "self_managed_cloudWatchFullAccess" {
+  policy_arn = "${local.policy_arn_prefix}/CloudWatchFullAccess"
+  role       = aws_iam_role.self_managed_ng.name
+}
+
+resource "aws_iam_role_policy_attachment" "self_managed_ElasticLoadBalancingFullAccess" {
+  policy_arn = "${local.policy_arn_prefix}/ElasticLoadBalancingFullAccess"
+  role       = aws_iam_role.self_managed_ng.name
+}
+
+resource "aws_iam_role_policy_attachment" "self_managed_AmazonPrometheusRemoteWriteAccess" {
+  policy_arn = "${local.policy_arn_prefix}/AmazonPrometheusRemoteWriteAccess"
+  role       = aws_iam_role.self_managed_ng.name
+}
+
+resource "aws_iam_role_policy_attachment" "self_managed_cluster_autoscaler" {
+  policy_arn = aws_iam_policy.eks_autoscaler_policy.arn
+  role       = aws_iam_role.self_managed_ng.name
+}
+
+resource "aws_iam_policy" "eks_autoscaler_policy" {
+
+  name        = "${var.eks_cluster_name}-${local.self_managed_node_group["node_group_name"]}-policy"
+  path        = "/"
+  description = "eks autoscaler policy"
+
+  policy = <<EOF
+{
+  "Version": "2012-10-17",
+  "Statement": [
+    {
+      "Effect": "Allow",
+      "Action": [
+        "autoscaling:DescribeAutoScalingGroups",
+        "autoscaling:DescribeAutoScalingInstances",
+        "autoscaling:DescribeLaunchConfigurations",
+        "autoscaling:DescribeTags",
+        "autoscaling:SetDesiredCapacity",
+        "autoscaling:TerminateInstanceInAutoScalingGroup"
+      ],
+      "Resource": "arn:aws:autoscaling:*:${data.aws_caller_identity.current.account_id}:autoScalingGroup:*:autoScalingGroupName/*"
+    }
+  ]
+}
+EOF
+}
+
+resource "aws_iam_instance_profile" "self_managed_ng" {
+  name = "${var.eks_cluster_name}-${local.self_managed_node_group["node_group_name"]}"
+  role = aws_iam_role.self_managed_ng.name
+}
\ No newline at end of file
diff --git a/modules/aws-eks-self-managed-node-groups/locals.tf b/modules/aws-eks-self-managed-node-groups/locals.tf
index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..8acb0f4674e6a73ab1f6d93e0ba7a140a5493e1b 100644
--- a/modules/aws-eks-self-managed-node-groups/locals.tf
+++ b/modules/aws-eks-self-managed-node-groups/locals.tf
@@ -0,0 +1,80 @@
+locals {
+
+  default_self_managed_ng = {
+    node_group_name               = "m4_on_demand"
+    desired_size                  = "1"
+    instance_type                 = "m4.large"
+    key_name                      = ""
+    launch_template_id            = null
+    launch_template_version       = "$Latest"
+    max_size                      = "3"
+    min_size                      = "1"
+    max_unavailable               = "1"
+    kubelet_extra_args            = ""
+    bootstrap_extra_args          = ""
+    disk_size                     = 50
+    disk_type                     = "gp2"
+    enable_monitoring             = true
+    eni_delete                    = true
+    public_ip                     = false
+    pre_userdata                  = ""
+    post_userdata                 = ""
+    additional_security_group_ids = []
+    capacity_type                 = ""
+    ami_type                      = ""
+    create_launch_template        = false
+    subnet_type                   = "private"
+    k8s_labels                    = {}
+    k8s_taints                    = []
+    remote_access                 = false
+    ec2_ssh_key                   = ""
+    security_group_id             = ""
+    additional_tags               = {}
+    os_ami_type                   = "amazonlinux2eks"
+    custom_ami_id                 = ""
+    create_worker_security_group  = false
+  }
+
+  self_managed_node_group = merge(
+    local.default_self_managed_ng,
+    var.self_managed_ng,
+    { subnet_ids = var.self_managed_ng["subnet_ids"] == [] ? var.self_managed_ng["subnet_type"] == "public" ? var.public_subnet_ids : var.private_subnet_ids : var.self_managed_ng["subnet_ids"] }
+  )
+
+  predefined_custom_ami_types = tolist(["amazonlinux2eks", "bottlerocket", "windows"])
+
+  userdata_base64 = {
+    for os_ami_type in local.predefined_custom_ami_types : os_ami_type => base64encode(
+      templatefile(
+        "${path.module}/templates/userdata-${os_ami_type}.tpl",
+        local.userdata_params
+      )
+    )
+  }
+
+  custom_userdata_base64 = contains(local.predefined_custom_ami_types, local.self_managed_node_group["os_ami_type"]) ? local.userdata_base64[local.self_managed_node_group["os_ami_type"]] : null
+
+  userdata_params = {
+    cluster_name         = var.eks_cluster_name
+    cluster_ca_base64    = var.cluster_ca_base64
+    cluster_endpoint     = var.cluster_endpoint
+    bootstrap_extra_args = local.self_managed_node_group["bootstrap_extra_args"]
+    pre_userdata         = local.self_managed_node_group["pre_userdata"]
+    post_userdata        = local.self_managed_node_group["post_userdata"]
+    kubelet_extra_args   = local.self_managed_node_group["kubelet_extra_args"]
+  }
+
+  policy_arn_prefix = "arn:aws:iam::aws:policy"
+  ec2_principal     = "ec2.${data.aws_partition.current.dns_suffix}"
+
+  common_tags = merge(
+    var.tags,
+    {
+      Name                                                = "${var.eks_cluster_name}-${local.self_managed_node_group["node_group_name"]}"
+      "k8s.io/cluster-autoscaler/${var.eks_cluster_name}" = "owned"
+      "k8s.io/cluster-autoscaler/enabled"                 = "TRUE"
+      "kubernetes.io/cluster/${var.eks_cluster_name}"     = "owned"
+      "ControlledBy"                                      = "terraform"
+  })
+
+}
diff --git a/modules/aws-eks-self-managed-node-groups/outputs.tf b/modules/aws-eks-self-managed-node-groups/outputs.tf
index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..93b70e425dc136fe7e4b14c08af9e572d778554f 100644
--- a/modules/aws-eks-self-managed-node-groups/outputs.tf
+++ b/modules/aws-eks-self-managed-node-groups/outputs.tf
@@ -0,0 +1,35 @@
+output "self_managed_node_group_name" {
+  description = "EKS Self Managed node group id"
+  value       = local.self_managed_node_group["node_group_name"].*
+}
+
+output "self_managed_node_group_iam_role_arns" {
+  value = aws_iam_role.self_managed_ng[*].arn
+}
+
+output "self_managed_iam_role_name" {
+  value = aws_iam_role.self_managed_ng[*].name
+}
+
+output "self_managed_sec_group_id" {
+  value = var.default_worker_security_group_id == "" ? aws_security_group.self_managed_ng[*].id : [var.default_worker_security_group_id]
+}
+
+output "self_managed_asg_name" {
+  value = aws_autoscaling_group.self_managed_ng[*].name
+}
+
+output "launch_template_latest_versions" {
+  description = "launch templated version for EKS Self Managed Node Group"
+  value       = aws_launch_template.self_managed_ng[*].latest_version
+}
+
+output "launch_template_ids" {
+  description = "launch templated id for EKS Self Managed Node Group"
+  value       = aws_launch_template.self_managed_ng[*].id
+}
+
+output "launch_template_arn" {
+  description = "launch templated id for EKS Self Managed Node Group"
+  value       = aws_launch_template.self_managed_ng[*].arn
+}
diff --git a/modules/aws-eks-self-managed-node-groups/secgroups.tf b/modules/aws-eks-self-managed-node-groups/secgroups.tf
new file mode 100644
index 0000000000000000000000000000000000000000..b5ec067db6cc3cedd3d14de260322d3efd178552
--- /dev/null
+++ b/modules/aws-eks-self-managed-node-groups/secgroups.tf
@@ -0,0 +1,98 @@
+resource "aws_security_group" "self_managed_ng" {
+  count = local.self_managed_node_group["create_worker_security_group"] == true ? 1 : 0
+
+  name        = "${var.eks_cluster_name}-${local.self_managed_node_group["node_group_name"]}"
+  description = "Security group for all nodes in the ${var.eks_cluster_name} cluster- self managed"
+  vpc_id      = var.vpc_id
+
+  egress {
+    from_port   = 0
+    to_port     = 0
+    protocol    = "-1"
+    cidr_blocks = ["0.0.0.0/0"]
+  }
+
+  tags = local.common_tags
+}
+
+resource "aws_security_group_rule" "worker_to_worker_tcp" {
+  description              = "Allow workers tcp communication with each other"
+  from_port                = 0
+  protocol                 = "tcp"
+  security_group_id        = aws_security_group.self_managed_ng[0].id
+  source_security_group_id = aws_security_group.self_managed_ng[0].id
+  to_port                  = 65535
+  type                     = "ingress"
+}
+
+resource "aws_security_group_rule" "worker_to_worker_udp" {
+  count                    = var.default_worker_security_group_id == "" ? 1 : 0
+  description              = "Allow workers udp communication with each other"
+  from_port                = 0
+  protocol                 = "udp"
+  security_group_id        = aws_security_group.self_managed_ng[0].id
+  source_security_group_id = aws_security_group.self_managed_ng[0].id
+  to_port                  = 65535
+  type                     = "ingress"
+}
+
+resource "aws_security_group_rule" "workers_masters_ingress" {
+  count                    = var.default_worker_security_group_id == "" ? 1 : 0
+  description              = "Allow workes kubelets and pods to receive communication from the cluster control plane"
+  from_port                = 1025
+  to_port                  = 65535
+  protocol                 = "tcp"
+  security_group_id        = aws_security_group.self_managed_ng[0].id
+  source_security_group_id = var.cluster_primary_security_group_id
+  type                     = "ingress"
+}
+
+resource "aws_security_group_rule" "workers_masters_https_ingress" {
+  description              = "Allow workers kubelets and pods to receive https from the cluster control plane"
+  from_port                = 443
+  protocol                 = "tcp"
+  security_group_id        = aws_security_group.self_managed_ng[0].id
+  source_security_group_id = var.cluster_primary_security_group_id
+  to_port                  = 443
+  type                     = "ingress"
+}
+
+resource "aws_security_group_rule" "masters_api_ingress" {
+  description              = "Allow cluster control plane to receive communication from workers kubelets and pods"
+  from_port                = 443
+  protocol                 = "tcp"
+  security_group_id        = var.cluster_primary_security_group_id
+  source_security_group_id = aws_security_group.self_managed_ng[0].id
+  to_port                  = 443
+  type                     = "ingress"
+}
+
+resource "aws_security_group_rule" "masters_kubelet_egress" {
+  description              = "Allow the cluster control plane to reach out workers kubelets and pods"
+  from_port                = 10250
+  protocol                 = "tcp"
+  security_group_id        = var.cluster_primary_security_group_id
+  source_security_group_id = aws_security_group.self_managed_ng[0].id
+  to_port                  = 10250
+  type                     = "egress"
+}
+
+resource "aws_security_group_rule" "masters_kubelet_https_egress" {
+  description              = "Allow the cluster control plane to reach out workers kubelets and pods https"
+  from_port                = 443
+  protocol                 = "tcp"
+  security_group_id        = var.cluster_primary_security_group_id
+  source_security_group_id = aws_security_group.self_managed_ng[0].id
+  to_port                  = 443
+  type                     = "egress"
+}
+
+resource "aws_security_group_rule" "masters_workers_egress" {
+  description              = "Allow the cluster control plane to reach out all worker node security group"
+  from_port                = 1025
+  to_port                  = 65535
+  protocol                 = "tcp"
+  security_group_id        = var.cluster_primary_security_group_id
+  source_security_group_id = aws_security_group.self_managed_ng[0].id
+  type                     = "egress"
+}
\ No newline at end of file
diff --git a/modules/aws-eks-self-managed-node-groups/self-managed-launch-templates.tf b/modules/aws-eks-self-managed-node-groups/self-managed-launch-templates.tf
new file mode 100644
index 0000000000000000000000000000000000000000..07f83826c7afd9e436570686968b1f77533d9e08
--- /dev/null
+++ b/modules/aws-eks-self-managed-node-groups/self-managed-launch-templates.tf
@@ -0,0 +1,59 @@
+resource "aws_launch_template" "self_managed_ng" {
+  name        = "${var.eks_cluster_name}-${local.self_managed_node_group["node_group_name"]}"
+  description = "Launch Template for EKS Self Managed Node Groups"
+
+  instance_type = local.self_managed_node_group["instance_type"]
+  image_id      = local.self_managed_node_group["custom_ami_id"]
+
+  update_default_version = true
+  user_data              = local.custom_userdata_base64
+
+  dynamic "instance_market_options" {
+    for_each = local.self_managed_node_group["capacity_type"] == "spot" ? [1] : []
+    content {
+      market_type = local.self_managed_node_group["capacity_type"]
+    }
+  }
+
+  iam_instance_profile {
+    name = aws_iam_instance_profile.self_managed_ng.name
+  }
+
+  ebs_optimized = true
+
+  block_device_mappings {
+    device_name = "/dev/xvda"
+
+    ebs {
+      volume_type = "gp2"
+      volume_size = local.self_managed_node_group["disk_size"]
+      encrypted   = true
+      // kms_key_id            = ""
+      delete_on_termination = true
+    }
+  }
+
+  metadata_options {
+    http_endpoint               = var.http_endpoint
+    http_tokens                 = var.http_tokens
+    http_put_response_hop_limit = var.http_put_response_hop_limit
+  }
+
+  monitoring {
+    enabled = true
+  }
+
+  lifecycle {
+    create_before_destroy = true
+  }
+
+  network_interfaces {
+    associate_public_ip_address = local.self_managed_node_group["public_ip"]
+    security_groups             = local.self_managed_node_group["create_worker_security_group"] == true ? [aws_security_group.self_managed_ng[0].id] : [var.default_worker_security_group_id]
+  }
+
+  tag_specifications {
+    resource_type = "volume"
+    tags          = local.common_tags
+  }
+}
\ No newline at end of file
diff --git a/modules/aws-eks-self-managed-node-groups/self-managed-node-groups.tf b/modules/aws-eks-self-managed-node-groups/self-managed-node-groups.tf
new file mode 100644
index 0000000000000000000000000000000000000000..cb6942bc442d24d35a3228048c85fa2b051e4693
--- /dev/null
+++ b/modules/aws-eks-self-managed-node-groups/self-managed-node-groups.tf
@@ -0,0 +1,26 @@
+resource "aws_autoscaling_group" "self_managed_ng" {
+  name = "${var.eks_cluster_name}-${local.self_managed_node_group["node_group_name"]}"
+
+  max_size            = local.self_managed_node_group["max_size"]
+  min_size            = local.self_managed_node_group["min_size"]
+  vpc_zone_identifier = local.self_managed_node_group["subnet_ids"]
+
+  launch_template {
+    id      = aws_launch_template.self_managed_ng.id
+    version = aws_launch_template.self_managed_ng.latest_version
+  }
+
+  lifecycle {
+    create_before_destroy = true
+  }
+
+  dynamic "tag" {
+    for_each = local.common_tags
+
+    content {
+      key                 = tag.key
+      value               = tag.value
+      propagate_at_launch = true
+    }
+  }
+}
diff --git a/modules/aws-eks-self-managed-node-groups/templates/userdata-amazonlinux2eks.tpl b/modules/aws-eks-self-managed-node-groups/templates/userdata-amazonlinux2eks.tpl
new file mode 100644
index 0000000000000000000000000000000000000000..403537f7c4093977f8f37cc5dac59675f4bcf233
--- /dev/null
+++ b/modules/aws-eks-self-managed-node-groups/templates/userdata-amazonlinux2eks.tpl
@@ -0,0 +1,18 @@
+MIME-Version: 1.0
+Content-Type: multipart/mixed; boundary="//"
+
+--//
+Content-Type: text/x-shellscript; charset="us-ascii"
+#!/bin/bash
+set -ex
+
+# User-supplied pre userdata code
+${pre_userdata}
+
+# Bootstrap and join the cluster
+/etc/eks/bootstrap.sh --b64-cluster-ca '${cluster_ca_base64}' --apiserver-endpoint '${cluster_endpoint}' ${bootstrap_extra_args} --kubelet-extra-args "${kubelet_extra_args}" '${cluster_name}'
+
+# User-supplied post userdata code
+${post_userdata}
+
+--//--
diff --git a/modules/aws-eks-self-managed-node-groups/templates/userdata-bottlerocket.tpl b/modules/aws-eks-self-managed-node-groups/templates/userdata-bottlerocket.tpl
new file mode 100644
index 0000000000000000000000000000000000000000..d231565c5aa95cec0355c98755574582964650d9
--- /dev/null
+++ b/modules/aws-eks-self-managed-node-groups/templates/userdata-bottlerocket.tpl
@@ -0,0 +1,6 @@
+${pre_userdata}
+[settings.kubernetes]
+api-server = "${cluster_endpoint}"
+cluster-certificate = "${cluster_ca_base64}"
+cluster-name = "${cluster_name}"
+${post_userdata}
\ No newline at end of file
diff --git a/modules/aws-eks-self-managed-node-groups/templates/userdata-windows.tpl b/modules/aws-eks-self-managed-node-groups/templates/userdata-windows.tpl
new file mode 100644
index 0000000000000000000000000000000000000000..85ed08c8e0ffb0acf4df1f1a0aa4517fe23941c4
--- /dev/null
+++ b/modules/aws-eks-self-managed-node-groups/templates/userdata-windows.tpl
@@ -0,0 +1,11 @@
+<powershell>
+${pre_userdata}
+
+[string]$EKSBinDir = "$env:ProgramFiles\Amazon\EKS"
+[string]$EKSBootstrapScriptName = 'Start-EKSBootstrap.ps1'
+[string]$EKSBootstrapScriptFile = "$EKSBinDir\$EKSBootstrapScriptName"
+& $EKSBootstrapScriptFile -EKSClusterName ${cluster_name} -KubeletExtraArgs '${kubelet_extra_args}' 3>&1 4>&1 5>&1 6>&1
+$LastError = if ($?) { 0 } else { $Error[0].Exception.HResult }
+
+${post_userdata}
+</powershell>
diff --git a/modules/aws-eks-self-managed-node-groups/templates/userdata.sh.tpl b/modules/aws-eks-self-managed-node-groups/templates/userdata.sh.tpl
new file mode 100644
index 0000000000000000000000000000000000000000..b44034cfb5d0d1838f4edfd18a361b185756ad18
--- /dev/null
+++ b/modules/aws-eks-self-managed-node-groups/templates/userdata.sh.tpl
@@ -0,0 +1,12 @@
+MIME-Version: 1.0
+Content-Type: multipart/mixed; boundary="//"
+
+--//
+Content-Type: text/x-shellscript; charset="us-ascii"
+#!/bin/bash
+set -xe
+yum install -y amazon-ssm-agent
+systemctl enable amazon-ssm-agent && systemctl start amazon-ssm-agent
+# Bootstrap and join the cluster
+
+--//--
\ No newline at end of file
diff --git a/modules/aws-eks-self-managed-node-groups/variables.tf b/modules/aws-eks-self-managed-node-groups/variables.tf
index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..9c3b03e5261dab7c70ef0f4cc2500832cfe26243 100644
--- a/modules/aws-eks-self-managed-node-groups/variables.tf
+++ b/modules/aws-eks-self-managed-node-groups/variables.tf
@@ -0,0 +1,77 @@
+variable "self_managed_ng" {
+  description = "Map of maps of `eks_self_managed_node_groups` to create"
+  type        = any
+  default     = {}
+}
+
+variable "vpc_id" {
+  type = string
+}
+
+variable "public_subnet_ids" {
+  type = list(string)
+}
+
+variable "private_subnet_ids" {
+  type = list(string)
+}
+
+variable "eks_cluster_name" {
+  description = "EKS Cluster name"
+  type        = string
+}
+
+variable "cluster_endpoint" {
+  type = string
+}
+
+variable "cluster_ca_base64" {
+  type = string
+}
+
+variable "cluster_version" {
+  type        = string
+  description = "Kubernetes cluster version"
+}
+
+variable "default_worker_security_group_id" {
+  type        = string
+  default     = ""
+  description = "Default worker security group id"
+}
+
+variable "cluster_primary_security_group_id" {
+  type        = string
+  default     = ""
+  description = "Cluster Primary security group ID for self managed node group"
+}
+
+variable "tags" {
+  description = "A map of tags to add to all resources"
+  type        = map(string)
+  default     = {}
+}
+
+variable "path" {
+  type        = string
+  default     = "/"
+  description = "IAM resource path, e.g. /dev/"
+}
+
+variable "http_endpoint" {
+  type        = string
+  default     = "enabled"
+  description = "Whether the Instance Metadata Service (IMDS) is available. Supported values: enabled, disabled"
+}
+
+variable "http_tokens" {
+  type        = string
+  default     = "optional"
+  description = "If enabled, will use Instance Metadata Service Version 2 (IMDSv2). Supported values: optional, required."
+}
+
+variable "http_put_response_hop_limit" {
+  type        = number
+  default     = 1
+  description = "HTTP PUT response hop limit for instance metadata requests. Supported values: 1-64."
+}
diff --git a/outputs.tf b/outputs.tf
index 7ab3dc2561cc66851d6a9d14d73f305e9106de06..81250dc7edf1837c6a3e741c5f2b893c15940257 100644
--- a/outputs.tf
+++ b/outputs.tf
@@ -45,9 +45,19 @@ output "amp_work_arn" {
   value = var.prometheus_enable ? module.aws_managed_prometheus[0].service_account_amp_ingest_role_arn : "AMP not enabled"
 }
 
-output "node_groups" {
+output "self_managed_node_group_iam_role_arns" {
+  description = "IAM role arn's of self managed node groups"
+  value       = var.create_eks && var.enable_self_managed_nodegroups ? { for nodes in sort(keys(var.self_managed_node_groups)) : nodes => module.aws-eks-self-managed-node-groups[nodes].self_managed_node_group_iam_role_arns } : null
+}
+
+output "managed_node_group_iam_role_arns" {
+  description = "IAM role arn's of self managed node groups"
+  value       = var.create_eks && var.enable_managed_nodegroups ? { for nodes in sort(keys(var.managed_node_groups)) : nodes => module.managed-node-groups[nodes].manage_ng_iam_role_arn } : null
+}
+
+output "managed_node_groups" {
   description = "Outputs from EKS node groups "
-  value       = var.create_eks ? module.managed-node-groups.* : []
+  value       = var.create_eks && var.enable_managed_nodegroups ? module.managed-node-groups.* : []
 }
 
 output "fargate_profiles" {
diff --git a/self-managed-nodegroups.tf b/self-managed-nodegroups.tf
index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..dea6a6d55c1453eb644cf34d86535f58be2bfdd1 100644
--- a/self-managed-nodegroups.tf
+++ b/self-managed-nodegroups.tf
@@ -0,0 +1,25 @@
+module "aws-eks-self-managed-node-groups" {
+  for_each = length(var.self_managed_node_groups) > 0 && var.enable_self_managed_nodegroups ? var.self_managed_node_groups : {}
+
+  source          = "./modules/aws-eks-self-managed-node-groups"
+  self_managed_ng = each.value
+
+  eks_cluster_name  = module.eks.cluster_id
+  cluster_endpoint  = module.eks.cluster_endpoint
+  cluster_ca_base64 = module.eks.cluster_certificate_authority_data
+  cluster_version   = var.kubernetes_version
+  tags              = module.eks-label.tags
+
+
+  vpc_id             = var.create_vpc == false ? var.vpc_id : module.vpc.vpc_id
+  private_subnet_ids = var.create_vpc == false ? var.private_subnet_ids : module.vpc.private_subnets
+  public_subnet_ids  = var.create_vpc == false ? var.public_subnet_ids : module.vpc.public_subnets
+
+  default_worker_security_group_id  = module.eks.worker_security_group_id
+  cluster_primary_security_group_id = module.eks.cluster_primary_security_group_id
+
+  depends_on = [module.eks]
+  # Ensure the cluster is fully created before trying to add the node group
+  //  module_depends_on = [module.eks.kubernetes_config_map_id]
+
+}
\ No newline at end of file
diff --git a/variables.tf b/variables.tf
index 5f114d5a8c2cd457555fe7e05e0a47e3a770416e..e539716784c932189b719757ab4f878ccb547f61 100644
--- a/variables.tf
+++ b/variables.tf
@@ -137,6 +137,11 @@ variable "enable_irsa" {
 #----------------------------------------------------------
 // EKS CONTROL PLANE
 #----------------------------------------------------------
+variable "create_eks" {
+  type    = bool
+  default = false
+
+}
 variable "kubernetes_version" {
   type        = string
   default     = "1.20"
@@ -184,62 +189,114 @@ variable "enable_kube_proxy_addon" {
 #----------------------------------------------------------
 // EKS WORKER NODES
 #----------------------------------------------------------
+variable "enable_managed_nodegroups" {
+  description = "Enable self-managed worker groups"
+  type        = bool
+  default     = false
+}
 
-# Self-managed NodeGroup (Worker Group)
+variable "managed_node_groups" {
+  type    = any
+  default = {}
+}
 variable "enable_self_managed_nodegroups" {
   description = "Enable self-managed worker groups"
   type        = bool
   default     = false
 }
+variable "self_managed_node_groups" {
+  type    = any
+  default = {}
+}
+variable "enable_fargate" {
+  default = false
+}
+variable "fargate_profiles" {
+  type    = any
+  default = {}
+}
+
 variable "enable_windows_support" {
-  description = "Enable Windows support in the cluster"
-  type        = bool
-  default     = false
+  type    = string
+  default = false
 }
-variable "self_managed_nodegroup_name" {
-  type        = string
-  default     = "ng-linux"
-  description = "Self-managed worker node group name"
+
+#----------------------------------------------------------
+# CONFIG MAP AWS-AUTH
+#----------------------------------------------------------
+
+variable "map_accounts" {
+  description = "Additional AWS account numbers to add to the aws-auth configmap. "
+  type        = list(string)
+  default     = []
 }
-variable "self_managed_node_ami_id" {
-  type        = string
-  default     = ""
-  description = "Self-managed worker node custom AMI ID"
+
+variable "map_roles" {
+  description = "Additional IAM roles to add to the aws-auth configmap."
+  type = list(object({
+    rolearn  = string
+    username = string
+    groups   = list(string)
+  }))
+  default = []
+}
+
+variable "map_users" {
+  description = "Additional IAM users to add to the aws-auth configmap. "
+  type = list(object({
+    userarn  = string
+    username = string
+    groups   = list(string)
+  }))
+  default = []
 }
-variable "self_managed_node_userdata_template_file" {
+variable "iam_path" {
+  description = "If provided, all IAM roles will be created on this path."
   type        = string
-  default     = ""
-  description = "Self-managed worker node custom userdata template file path"
+  default     = "/"
 }
-variable "self_managed_node_userdata_template_extra_params" {
-  type        = map(any)
+
+variable "manage_aws_auth" {
+  description = "Whether to apply the aws-auth configmap file."
+  default     = true
+}
+variable "aws_auth_additional_labels" {
+  description = "Additional kubernetes labels applied on aws-auth ConfigMap"
   default     = {}
-  description = "Self-managed worker node custom userdata template extra parameters"
+  type        = map(string)
 }
-variable "self_managed_node_volume_size" {
-  type        = number
-  default     = 50
-  description = "Volume size in GiB for worker nodes. Defaults to 50. Terraform will only perform drift detection if a configuration value is provided"
+
+variable "aws_auth_yaml_strip_quotes" {
+  type        = bool
+  default     = true
+  description = "If true, remove double quotes from the generated aws-auth ConfigMap YAML to reduce spurious diffs in plans"
 }
-variable "self_managed_node_instance_types" {
-  type        = list(string)
-  default     = ["m5.large", "m5a.large", "m5n.large"]
-  description = "Set of instance types associated with the EKS Node Group"
+
+variable "apply_config_map_aws_auth" {
+  type        = bool
+  default     = true
+  description = "Whether to apply the ConfigMap to allow worker nodes to join the EKS cluster and allow additional users, accounts and roles to acces the cluster"
 }
-variable "self_managed_node_desired_size" {
-  type        = number
-  default     = 3
-  description = "Desired number of worker nodes"
+
+variable "local_exec_interpreter" {
+  type        = list(string)
+  default     = ["/bin/sh", "-c"]
+  description = "shell to use for local_exec"
 }
-variable "self_managed_node_max_size" {
-  type        = number
-  default     = 3
-  description = "The maximum size of the AutoScaling Group"
+
+variable "wait_for_cluster_command" {
+  type        = string
+  default     = "curl --silent --fail --retry 60 --retry-delay 5 --retry-connrefused --insecure --output /dev/null $ENDPOINT/healthz"
+  description = "`local-exec` command to execute to determine if the EKS cluster is healthy. Cluster endpoint are available as environment variable `ENDPOINT`"
 }
-variable "self_managed_node_min_size" {
-  type        = number
-  default     = 3
-  description = "The minimum size of the AutoScaling Group"
+
+#----------------------------------------------------------
+# HELM CHART VARIABLES
+#----------------------------------------------------------
+variable "public_docker_repo" {
+  type        = bool
+  default     = true
+  description = "public docker repo access"
 }
 
 variable "metrics_server_enable" {
@@ -287,11 +344,7 @@ variable "ekslog_retention_in_days" {
   description = "Number of days to retain log events. Default retention - 90 days."
   type        = number
 }
-variable "public_docker_repo" {
-  type        = bool
-  default     = true
-  description = "public docker repo access"
-}
+
 variable "agones_enable" {
   type        = bool
   default     = false
@@ -385,118 +438,4 @@ variable "aws_for_fluent_bit_helm_chart_version" {
   description = "Helm chart version for aws_for_fluent_bit"
 }
 
-variable "managed_node_groups" {
-  type    = any
-  default = {}
-}
-
-variable "create_eks" {
-  type    = bool
-  default = false
-
-}
-
-variable "map_accounts" {
-  description = "Additional AWS account numbers to add to the aws-auth configmap. "
-  type        = list(string)
-  default     = []
-}
-
-variable "map_roles" {
-  description = "Additional IAM roles to add to the aws-auth configmap."
-  type = list(object({
-    rolearn  = string
-    username = string
-    groups   = list(string)
-  }))
-  default = []
-}
-
-variable "map_users" {
-  description = "Additional IAM users to add to the aws-auth configmap. "
-  type = list(object({
-    userarn  = string
-    username = string
-    groups   = list(string)
-  }))
-  default = []
-}
-variable "iam_path" {
-  description = "If provided, all IAM roles will be created on this path."
-  type        = string
-  default     = "/"
-}
-
-variable "manage_aws_auth" {
-  description = "Whether to apply the aws-auth configmap file."
-  default     = true
-}
-variable "aws_auth_additional_labels" {
-  description = "Additional kubernetes labels applied on aws-auth ConfigMap"
-  default     = {}
-  type        = map(string)
-}
-
-variable "enable_fargate" {
-  default = false
-}
-
-variable "fargate_profiles" {
-  type    = any
-  default = {}
-}
-
-# CONFIG MAP AWS-AUTH
-variable "aws_auth_yaml_strip_quotes" {
-  type        = bool
-  default     = true
-  description = "If true, remove double quotes from the generated aws-auth ConfigMap YAML to reduce spurious diffs in plans"
-}
-
-variable "apply_config_map_aws_auth" {
-  type        = bool
-  default     = true
-  description = "Whether to apply the ConfigMap to allow worker nodes to join the EKS cluster and allow additional users, accounts and roles to acces the cluster"
-}
-
-variable "local_exec_interpreter" {
-  type        = list(string)
-  default     = ["/bin/sh", "-c"]
-  description = "shell to use for local_exec"
-}
-
-variable "wait_for_cluster_command" {
-  type        = string
-  default     = "curl --silent --fail --retry 60 --retry-delay 5 --retry-connrefused --insecure --output /dev/null $ENDPOINT/healthz"
-  description = "`local-exec` command to execute to determine if the EKS cluster is healthy. Cluster endpoint are available as environment variable `ENDPOINT`"
-}
-
-variable "map_additional_iam_roles" {
-  description = "Additional IAM roles to add to `config-map-aws-auth` ConfigMap"
-
-  type = list(object({
-    rolearn  = string
-    username = string
-    groups   = list(string)
-  }))
-
-  default = []
-}
 
-variable "map_additional_iam_users" {
-  description = "Additional IAM users to add to `config-map-aws-auth` ConfigMap"
-
-  type = list(object({
-    userarn  = string
-    username = string
-    groups   = list(string)
-  }))
-
-  default = []
-}
-
-variable "map_additional_aws_accounts" {
-  description = "Additional AWS account numbers to add to `config-map-aws-auth` ConfigMap"
-  type        = list(string)
-  default     = []
-}