Code development platform for open source projects from the European Union institutions

Skip to content
Snippets Groups Projects
Commit 634c8579 authored by Vara Bonthu's avatar Vara Bonthu
Browse files

EMR on EKS feature added

parent 6c7863dc
No related branches found
No related tags found
No related merge requests found
SHELL := /usr/bin/env bash
# HOW TO EXECUTE
# Executing Terraform PLAN
# $ make tf-plan-eks env=<env> region=<region> account=<account> subenv=<subenv>
# e.g.,
# make tf-plan-eks env=preprod region=eu-west-1 account=application subenv=dev
# Executing Terraform APPLY
# $ make tf-apply-eks env=<env> region=<region> account=<account> subenv=<subenv>
# Executing Terraform DESTROY
# $ make tf-destroy-eks env=<env> region=<region> account=<account> subenv=<subenv>
# Example for running the dev config in preprod -> live/preprod/eu-west-1/application/dev/test-eks.tfvars
# $ make tf-plan-eks env=preprod region=eu-west-1 account=application subenv=dev
# Example for running the dev config in preprod -> live/preprod/eu-west-1/gaming/test/test-eks.tfvars
# $make tf-plan-eks env=preprod region=eu-west-1 account=gaming subenv=test
all-test: clean tf-plan-eks
.PHONY: clean
clean:
rm -rf .terraform .terraform.lock.hcl
.PHONY: tf-plan-eks
tf-plan-eks:
export AWS_REGION=${region} && terraform init -backend-config ./deploy/live/${env}/${region}/${account}/${subenv}/backend.conf -reconfigure && terraform validate && terraform plan -var-file ./deploy/live/${env}/${region}/${account}/${subenv}/${subenv}.tfvars -refresh=false
.PHONY: tf-apply-eks
tf-apply-eks:
export AWS_REGION=${region} && terraform init -backend-config ./deploy/live/${env}/${region}/${account}/${subenv}/backend.conf -reconfigure && terraform validate && terraform apply -var-file ./deploy/live/${env}/${region}/${account}/${subenv}/${subenv}.tfvars -auto-approve
.PHONY: tf-destroy-eks
tf-destroy-eks:
export AWS_REGION=${region} && terraform init -backend-config ./deploy/live/${env}/${region}/${account}/${subenv}/backend.conf -reconfigure && terraform validate && terraform destroy -var-file ./deploy/live/${env}/${region}/${account}/${subenv}/${subenv}.tfvars -auto-approve
......@@ -38,6 +38,7 @@ resource "kubernetes_config_map" "aws_auth" {
local.self_managed_node_group_aws_auth_config_map,
local.windows_node_group_aws_auth_config_map,
local.fargate_profiles_aws_auth_config_map,
local.emr_on_eks_config_map,
var.map_roles,
))
)
......
data "aws_region" "current" {}
resource "kubernetes_namespace" "spark" {
metadata {
......@@ -80,8 +81,6 @@ resource "kubernetes_role_binding" "emr_containers" {
}
}
data "aws_caller_identity" "current" {}
# EMR jobs will assume this IAM role when they run on EKS
resource "aws_iam_role" "emr_on_eks_execution" {
......@@ -141,11 +140,10 @@ resource "aws_iam_role_policy_attachment" "emr_on_eks_execution" {
policy_arn = aws_iam_policy.emr_on_eks_execution.arn
}
data "aws_region" "current" {}
# Update trust relationship for job execution role
#$(aws sts assume-role --role-arn ${local.role} --role-session-name terraform_run_instance_refresh --query 'Credentials.[`export#AWS_ACCESS_KEY_ID=`,AccessKeyId,`#AWS_SECRET_ACCESS_KEY=`,SecretAccessKey,`#AWS_SESSION_TOKEN=`,SessionToken]' --output text | sed $'s/\t//g' | sed 's/#/ /g')
# Use the below command in shell script to assume a different role
# $(aws sts assume-role --role-arn ${local.pass_local_deployment_role} --role-session-name terraform_run_instance_refresh --query 'Credentials.[`export#AWS_ACCESS_KEY_ID=`,AccessKeyId,`#AWS_SECRET_ACCESS_KEY=`,SecretAccessKey,`#AWS_SESSION_TOKEN=`,SessionToken]' --output text | sed $'s/\t//g' | sed 's/#/ /g')
# TODO Replace this resource once the provider is available for aws emr-containers
resource "null_resource" "update_trust_policy" {
provisioner "local-exec" {
interpreter = ["/bin/sh", "-c"]
......@@ -157,21 +155,13 @@ set -e
aws emr-containers update-role-trust-policy \
--cluster-name ${var.eks_cluster_id} \
--namespace ${var.emr_on_eks_namespace}
--namespace ${kubernetes_namespace.spark.id} \
--role-name ${aws_iam_role.emr_on_eks_execution.id}
aws emr-containers create-virtual-cluster \
--name ${var.eks_cluster_id} \
--container-provider '{
"id": ${var.eks_cluster_id},
"type": "EKS",
"info": {
"eksInfo": {
"namespace": ${var.emr_on_eks_namespace}
}
}
}'
EOF
}
// triggers = {
// always_run = timestamp()
// }
depends_on = [kubernetes_namespace.spark, aws_iam_role.emr_on_eks_execution]
}
output "emr_on_eks_role_arn" {
description = "IAM execution role ARN for EMR on EKS"
value = aws_iam_role.emr_on_eks_execution.arn
}
output "emr_on_eks_role_id" {
description = "IAM execution role ID for EMR on EKS"
value = aws_iam_role.emr_on_eks_execution.id
}
......@@ -110,3 +110,13 @@ output "fargate_profiles_aws_auth_config_map" {
description = "Fargate profiles AWS auth map"
value = local.fargate_profiles_aws_auth_config_map.*
}
output "emr_on_eks_role_arn" {
description = "IAM execution role ARN for EMR on EKS"
value = var.create_eks && var.enable_emr_on_eks ? module.emr_on_eks[0].emr_on_eks_role_arn : "EMR on EKS not enabled"
}
output "emr_on_eks_role_id" {
description = "IAM execution role ID for EMR on EKS"
value = var.create_eks && var.enable_emr_on_eks ? module.emr_on_eks[0].emr_on_eks_role_id : "EMR on EKS not enabled"
}
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment