I have a question regarding AWS API Gateway which forwards traffic to an internal facing ALB. This is configured with terraform:
resource "aws_lb" "foo-load-balancer" {
name = "foo-dev"
load_balancer_type = "application"
internal = true
subnets = local.private_subnet_ids
security_groups = [aws_security_group.lb_security_group.id]
}
resource "aws_security_group" "lb_security_group" {
name = "${local.name}-lb-security-group"
vpc_id = local.vpc_id
ingress {
from_port = 8080
to_port = 8383
protocol = "tcp"
cidr_blocks = local.private_subnet_cidrs
}
ingress {
from_port = 5432
to_port = 5432
protocol = "tcp"
cidr_blocks = local.private_subnet_cidrs
}
ingress {
from_port = 80
to_port = 80
protocol = "tcp"
cidr_blocks = local.private_subnet_cidrs
}
egress {
from_port = 0
# Allowing any incoming port
to_port = 0
# Allowing any outgoing port
protocol = "-1"
# Allowing any outgoing protocol
cidr_blocks = [
"0.0.0.0/0"]
# Allowing traffic out to all IP addresses
}
tags = local.tags
}
resource "aws_apigatewayv2_vpc_link" "alb-vpc-link" {
name = local.name
security_group_ids = [
aws_security_group.lb_security_group.id]
subnet_ids = local.private_subnet_ids
tags = local.tags
}
resource "aws_apigatewayv2_api" "foo" {
name = "${local.name}-foo"
protocol_type = "HTTP"
}
resource "aws_apigatewayv2_integration" "foo" {
api_id = aws_apigatewayv2_api.foo.id
integration_type = "HTTP_PROXY"
integration_method = "ANY"
connection_type = "VPC_LINK"
connection_id = aws_apigatewayv2_vpc_link.alb-vpc-link.id
integration_uri = module.foo-backend-service.aws_lb_listener.arn
depends_on = [
module.foo-backend-service]
}
resource "aws_apigatewayv2_route" "foo" {
api_id = aws_apigatewayv2_api.foo.id
route_key = "ANY /{proxy+}"
target = "integrations/${aws_apigatewayv2_integration.foo.id}"
}
resource "aws_apigatewayv2_deployment" "foo" {
api_id = aws_apigatewayv2_api.foo.id
depends_on = [
aws_apigatewayv2_route.foo]
}
resource "aws_apigatewayv2_stage" "foo" {
name = "$default"
deployment_id = aws_apigatewayv2_deployment.foo.id
api_id = aws_apigatewayv2_api.foo.id
}
resource "aws_apigatewayv2_domain_name" "foo" {
domain_name = aws_acm_certificate.foo.domain_name
domain_name_configuration {
certificate_arn = aws_acm_certificate.foo.arn
endpoint_type = "REGIONAL"
security_policy = "TLS_1_2"
}
depends_on = [
aws_acm_certificate_validation.foo]
}
resource "aws_apigatewayv2_api_mapping" "foo" {
api_id = aws_apigatewayv2_api.foo.id
domain_name = aws_apigatewayv2_domain_name.foo.id
stage = aws_apigatewayv2_stage.foo.id
}
resource "aws_ecs_task_definition" "foo-service-task" {
family = "${var.application_name}-${var.name}-service-task"
container_definitions = jsonencode(
[
{
name: "${var.application_name}-${var.name}-service-task",
image: var.image,
essential: true,
portMappings: [
{
containerPort: 8282,
hostPort: var.port
}
],
memory: var.memory,
cpu: var.cpu,
environment: var.environment,
logConfiguration: {
logDriver: "awslogs",
options: {
awslogs-group: var.cloudwatch_group,
awslogs-region: var.aws_region,
awslogs-stream-prefix: var.name
}
}
}
])
network_mode = "awsvpc"
tags = merge(var.tags, {
service = var.name
})
depends_on = [var.depends_on_relation]
}
resource "aws_ecs_service" "foo-service" {
name = "${var.application_name}-${var.name}"
cluster = data.aws_ecs_cluster.ecs.id
task_definition = aws_ecs_task_definition.foo-service-task.arn
desired_count = var.desired_count
load_balancer {
target_group_arn = aws_lb_target_group.target-group.arn
container_name = aws_ecs_task_definition.foo-service-task.family
container_port = 8282
}
network_configuration {
subnets = var.subnet_ids
security_groups = [aws_security_group.securitygroup.id]
}
depends_on = [aws_lb_listener.listener]
}
resource "aws_security_group" "securitygroup" {
name = "${var.application_name}-${var.name}"
vpc_id = var.vpc_id
ingress {
from_port = 0
to_port = 0
protocol = "-1"
cidr_blocks = local.subnet_cidrs
}
egress {
from_port = 0
# Allowing any incoming port
to_port = 0
# Allowing any outgoing port
protocol = "-1"
# Allowing any outgoing protocol
cidr_blocks = [
"0.0.0.0/0"]
# Allowing traffic out to all IP addresses
}
tags = merge(var.tags, {
service = var.name
})
}
resource "aws_lb_target_group" "target-group" {
name = "${local.stage}-${var.name}"
port = var.port
protocol = var.protocol
target_type = "ip"
vpc_id = var.vpc_id
health_check {
enabled = var.health_check_enabled
matcher = var.protocol == "TCP" ? null : "200,301,302"
path = var.health_check_protocol == "TCP" ? null : var.health_check_path
port = var.health_check_port
protocol = var.health_check_protocol
interval = var.health_check_interval
timeout = var.protocol == "TCP" ? null : var.health_check_timeout
healthy_threshold = var.health_check_healthy_threshold
unhealthy_threshold = var.health_check_unhealthy_threshold
}
slow_start = var.protocol == "TCP" ? null : var.health_check_slow_start
tags = merge(var.tags, {
service = var.name
})
depends_on = [
data.aws_lb.loadbalancer]
}
resource "aws_lb_listener" "listener" {
load_balancer_arn = data.aws_lb.loadbalancer.arn
port = var.port
protocol = var.protocol
default_action {
type = "forward"
target_group_arn = aws_lb_target_group.target-group.arn
}
depends_on = [
aws_lb_target_group.target-group]
}
resource "aws_route53_record" "foo" {
name = aws_apigatewayv2_domain_name.foo.domain_name
type = "A"
zone_id = data.aws_route53_zone.hosted_zone.zone_id
alias {
name = aws_apigatewayv2_domain_name.foo.domain_name_configuration[0].target_domain_name
zone_id = aws_apigatewayv2_domain_name.foo.domain_name_configuration[0].hosted_zone_id
evaluate_target_health = false
}
}
resource "aws_acm_certificate" "foo" {
domain_name = local.application_domain
validation_method = "DNS"
}
resource "aws_route53_record" "foo_acm" {
for_each = {
for dvo in aws_acm_certificate.foo.domain_validation_options : dvo.domain_name => {
name = dvo.resource_record_name
record = dvo.resource_record_value
type = dvo.resource_record_type
zone_id = data.aws_route53_zone.hosted_zone.zone_id
}
}
allow_overwrite = true
name = each.value.name
records = [each.value.record]
ttl = 60
type = each.value.type
zone_id = each.value.zone_id
}
resource "aws_acm_certificate_validation" "foo" {
certificate_arn = aws_acm_certificate.foo.arn
validation_record_fqdns = [for record in aws_route53_record.foo_acm : record.fqdn]
}
resource "aws_route53_record" "application_load_balancer" {
name = "alb.${local.application_domain}"
type = "CNAME"
zone_id = data.aws_route53_zone.hosted_zone.zone_id
ttl = "60"
records = [aws_lb.foo-load-balancer.dns_name]
}
The Service/Task, which I am starting is a simple Spring-Boot app ("Greeting-Controller") with an enabled health-check. The service is up and running and healthy.
The problem is the ALB doesn't recognize any traffic (I can't see any requests at the ALB in the AWS console) and when I try to access to the registered Route53 DNS I get the "Service unavailable" JSON.
Any ideas what to change?
Related
I have 1 deployment and 1 service configurations:
Deployment
apiVersion: apps/v1
kind: Deployment
metadata:
name: dashboard-backend-deployment
spec:
replicas: 2
selector:
matchLabels:
app: dashboard-backend
template:
metadata:
labels:
app: dashboard-backend
spec:
containers:
- name: dashboard-backend
image: $BACKEND_IMAGE
imagePullPolicy: Always
env:
- name: NODE_ENV
value: $NODE_ENV
- name: PORT
value: '3000'
- name: ACCESS_TOKEN_JWT_KEY
value: $ACCESS_TOKEN_JWT_KEY
- name: REFRESH_TOKEN_JWT_KEY
value: $REFRESH_TOKEN_JWT_KEY
- name: GOOGLE_OAUTH_CLIENT_ID
value: $GOOGLE_OAUTH_CLIENT_ID
- name: GOOGLE_OAUTH_CLIENT_SECRET
value: $GOOGLE_OAUTH_CLIENT_SECRET
- name: GOOGLE_OAUTH_REDIRECT_URI
value: $GOOGLE_OAUTH_REDIRECT_URI
- name: GH_OAUTH_CLIENT_ID
value: $GH_OAUTH_CLIENT_ID
- name: GH_OAUTH_CLIENT_SECRET
value: $GH_OAUTH_CLIENT_SECRET
- name: GITHUB_OAUTH_REDIRECT_URI
value: $GITHUB_OAUTH_REDIRECT_URI
- name: MIXPANEL_TOKEN
value: $MIXPANEL_TOKEN
- name: FRONTEND_URL
value: $FRONTEND_URL
- name: CLI_TOKEN_JWT_KEY
value: $CLI_TOKEN_JWT_KEY
- name: DATABASE_URL
value: $DATABASE_URL
Service
apiVersion: v1
kind: Service
metadata:
name: backend-service
annotations:
service.beta.kubernetes.io/aws-load-balancer-ssl-cert: $SSL_CERTIFICATE_ARN
service.beta.kubernetes.io/aws-load-balancer-backend-protocol: http
service.beta.kubernetes.io/aws-load-balancer-ssl-ports: https
spec:
selector:
app: dashboard-backend
type: LoadBalancer
ports:
- name: https
protocol: TCP
port: 3000
targetPort: 3000
I have a cluster, AWS EKS, configured. I run this command:
kubectl apply -f=./k8s/backend-deployment.yaml -f=./k8s/backend-service.yaml, of course, when kubectl is "connected" to my AWS EKS cluster.
Output of command:
Using kubectl version: Client Version: v1.26.0
Kustomize Version: v4.5.7
Using aws-iam-authenticator version: {"Version":"0.6.2","Commit":"..."}
deployment.apps/dashboard-backend-deployment unchanged
service/backend-service unchanged
When I enter the load balancers section in EC2 service in AWS, there are no load balancers at all. Why?
These are the Terraform files, I used to deploy my cluster:
eks-cluster:
data "aws_iam_policy_document" "eks_cluster_policy" {
version = "2012-10-17"
statement {
actions = ["sts:AssumeRole"]
effect = "Allow"
principals {
type = "Service"
identifiers = ["eks.amazonaws.com"]
}
}
}
resource "aws_iam_role" "cluster" {
name = "${var.project}-Cluster-Role"
assume_role_policy = data.aws_iam_policy_document.eks_cluster_policy.json
tags = merge(
var.tags,
{
Stack = "backend"
Name = "${var.project}-eks-cluster-iam-role",
}
)
}
resource "aws_iam_role_policy_attachment" "cluster_AmazonEKSClusterPolicy" {
policy_arn = "arn:aws:iam::aws:policy/AmazonEKSClusterPolicy"
role = aws_iam_role.cluster.name
}
resource "aws_eks_cluster" "main" {
name = "${var.project}-cluster"
role_arn = aws_iam_role.cluster.arn
version = "1.24"
vpc_config {
subnet_ids = flatten([aws_subnet.public[*].id, aws_subnet.private[*].id])
endpoint_private_access = true
endpoint_public_access = true
public_access_cidrs = ["0.0.0.0/0"]
}
tags = merge(
var.tags,
{
Stack = "backend"
Name = "${var.project}-eks-cluster",
}
)
depends_on = [
aws_iam_role_policy_attachment.cluster_AmazonEKSClusterPolicy
]
}
resource "aws_security_group" "eks_cluster" {
name = "${var.project}-cluster-sg"
description = "Cluster communication with worker nodes"
vpc_id = aws_vpc.main.id
tags = merge(
var.tags,
{
Stack = "backend"
Name = "${var.project}-cluster-sg"
}
)
}
resource "aws_security_group_rule" "cluster_inbound" {
description = "Allow worker nodes to communicate with the cluster API Server"
from_port = 443
protocol = "tcp"
security_group_id = aws_security_group.eks_cluster.id
source_security_group_id = aws_security_group.eks_nodes.id
to_port = 443
type = "ingress"
}
resource "aws_security_group_rule" "cluster_outbound" {
description = "Allow cluster API Server to communicate with the worker nodes"
from_port = 1024
protocol = "tcp"
security_group_id = aws_security_group.eks_cluster.id
source_security_group_id = aws_security_group.eks_nodes.id
to_port = 65535
type = "egress"
}
EKS WORKER NODES
data "aws_iam_policy_document" "eks_node_policy" {
version = "2012-10-17"
statement {
actions = ["sts:AssumeRole"]
effect = "Allow"
principals {
type = "Service"
identifiers = ["ec2.amazonaws.com"]
}
}
}
resource "aws_iam_role" "node" {
name = "${var.project}-Worker-Role"
assume_role_policy = data.aws_iam_policy_document.eks_node_policy.json
tags = merge(
var.tags,
{
Stack = "backend"
Name = "${var.project}-eks-node-iam-role",
}
)
}
resource "aws_iam_role_policy_attachment" "node_AmazonEKSWorkerNodePolicy" {
policy_arn = "arn:aws:iam::aws:policy/AmazonEKSWorkerNodePolicy"
role = aws_iam_role.node.name
}
resource "aws_iam_role_policy_attachment" "node_AmazonEKS_CNI_Policy" {
policy_arn = "arn:aws:iam::aws:policy/AmazonEKS_CNI_Policy"
role = aws_iam_role.node.name
}
resource "aws_iam_role_policy_attachment" "node_AmazonEC2ContainerRegistryReadOnly" {
policy_arn = "arn:aws:iam::aws:policy/AmazonEC2ContainerRegistryReadOnly"
role = aws_iam_role.node.name
}
resource "aws_eks_node_group" "main" {
cluster_name = aws_eks_cluster.main.name
node_group_name = var.project
node_role_arn = aws_iam_role.node.arn
subnet_ids = aws_subnet.private[*].id
scaling_config {
desired_size = 1
max_size = 2
min_size = 1
}
ami_type = "AL2_x86_64"
capacity_type = "ON_DEMAND"
disk_size = 20
instance_types = ["t3.small"]
tags = merge(
var.tags,
{
Stack = "backend"
Name = "${var.project}-eks-node-group",
}
)
depends_on = [
aws_iam_role_policy_attachment.node_AmazonEKSWorkerNodePolicy,
aws_iam_role_policy_attachment.node_AmazonEKS_CNI_Policy,
aws_iam_role_policy_attachment.node_AmazonEC2ContainerRegistryReadOnly,
]
}
resource "aws_security_group" "eks_nodes" {
name = "${var.project}-node-sg"
description = "Security group for all nodes in the cluster"
vpc_id = aws_vpc.main.id
egress {
from_port = 0
to_port = 0
protocol = "-1"
cidr_blocks = ["0.0.0.0/0"]
}
tags = merge(
var.tags,
{
Stack = "backend"
Name = "${var.project}-node-sg"
"kubernetes.io/cluster/${var.project}-cluster" = "owned"
}
)
}
resource "aws_security_group_rule" "nodes_internal" {
description = "Allow nodes to communicate with each other"
from_port = 0
protocol = "-1"
security_group_id = aws_security_group.eks_nodes.id
source_security_group_id = aws_security_group.eks_nodes.id
to_port = 65535
type = "ingress"
}
resource "aws_security_group_rule" "nodes_cluster_inbound" {
description = "Allow worker Kubelets and pods to receive communication from the cluster control plane"
from_port = 1025
protocol = "tcp"
security_group_id = aws_security_group.eks_nodes.id
source_security_group_id = aws_security_group.eks_cluster.id
to_port = 65535
type = "ingress"
}
VPC
resource "aws_vpc" "main" {
cidr_block = var.vpc_cidr
enable_dns_hostnames = true
enable_dns_support = true
tags = merge(
var.tags,
{
Name = "${var.project}-vpc",
"kubernetes.io/cluster/${var.project}-cluster" = "shared"
}
)
}
resource "aws_subnet" "public" {
count = var.availability_zones_count
vpc_id = aws_vpc.main.id
cidr_block = cidrsubnet(var.vpc_cidr, var.subnet_cidr_bits, count.index)
availability_zone = data.aws_availability_zones.available.names[count.index]
tags = merge(
var.tags,
{
Name = "${var.project}-public-subnet",
"kubernetes.io/cluster/${var.project}-cluster" = "shared"
"kubernetes.io/role/elb" = 1
}
)
map_public_ip_on_launch = true
}
resource "aws_subnet" "private" {
count = var.availability_zones_count
vpc_id = aws_vpc.main.id
cidr_block = cidrsubnet(var.vpc_cidr, var.subnet_cidr_bits, count.index + var.availability_zones_count)
availability_zone = data.aws_availability_zones.available.names[count.index]
tags = merge(
var.tags,
{
Name = "${var.project}-private-sg"
"kubernetes.io/cluster/${var.project}-cluster" = "shared"
"kubernetes.io/role/internal-elb" = 1
}
)
}
resource "aws_internet_gateway" "igw" {
vpc_id = aws_vpc.main.id
tags = merge(
var.tags,
{
Name = "${var.project}-igw",
}
)
depends_on = [aws_vpc.main]
}
resource "aws_route_table" "primary" {
vpc_id = aws_vpc.main.id
route {
cidr_block = "0.0.0.0/0"
gateway_id = aws_internet_gateway.igw.id
}
tags = merge(
var.tags,
{
Name = "${var.project}-primary-route-table",
}
)
}
resource "aws_route_table_association" "internet_access" {
count = var.availability_zones_count
subnet_id = aws_subnet.public[count.index].id
route_table_id = aws_route_table.primary.id
}
resource "aws_eip" "main" {
vpc = true
tags = merge(
var.tags,
{
Name = "${var.project}-ngw-ip"
}
)
}
resource "aws_nat_gateway" "main" {
allocation_id = aws_eip.main.id
subnet_id = aws_subnet.public[0].id
tags = merge(
var.tags,
{
Name = "${var.project}-ngw"
}
)
}
resource "aws_route" "main" {
route_table_id = aws_vpc.main.default_route_table_id
nat_gateway_id = aws_nat_gateway.main.id
destination_cidr_block = "0.0.0.0/0"
}
resource "aws_security_group" "public_sg" {
name = "${var.project}-Public-sg"
vpc_id = aws_vpc.main.id
tags = merge(
var.tags,
{
Name = "${var.project}-Public-sg",
}
)
}
resource "aws_security_group_rule" "sg_ingress_public_443" {
security_group_id = aws_security_group.public_sg.id
type = "ingress"
from_port = 443
to_port = 443
protocol = "tcp"
cidr_blocks = ["0.0.0.0/0"]
}
resource "aws_security_group_rule" "sg_ingress_public_80" {
security_group_id = aws_security_group.public_sg.id
type = "ingress"
from_port = 80
to_port = 80
protocol = "tcp"
cidr_blocks = ["0.0.0.0/0"]
}
resource "aws_security_group_rule" "sg_egress_public" {
security_group_id = aws_security_group.public_sg.id
type = "egress"
from_port = 0
to_port = 0
protocol = "-1"
cidr_blocks = ["0.0.0.0/0"]
}
resource "aws_security_group" "data_plane_sg" {
name = "${var.project}-Worker-sg"
vpc_id = aws_vpc.main.id
tags = merge(
var.tags,
{
Name = "${var.project}-Worker-sg",
}
)
}
resource "aws_security_group_rule" "nodes" {
description = "Allow nodes to communicate with each other"
security_group_id = aws_security_group.data_plane_sg.id
type = "ingress"
from_port = 0
to_port = 65535
protocol = "-1"
cidr_blocks = flatten([cidrsubnet(var.vpc_cidr, var.subnet_cidr_bits, 0), cidrsubnet(var.vpc_cidr, var.subnet_cidr_bits, 1), cidrsubnet(var.vpc_cidr, var.subnet_cidr_bits, 2), cidrsubnet(var.vpc_cidr, var.subnet_cidr_bits, 3)])
}
resource "aws_security_group_rule" "nodes_inbound" {
description = "Allow worker Kubelets and pods to receive communication from the cluster control plane"
security_group_id = aws_security_group.data_plane_sg.id
type = "ingress"
from_port = 1025
to_port = 65535
protocol = "tcp"
cidr_blocks = flatten([cidrsubnet(var.vpc_cidr, var.subnet_cidr_bits, 2), cidrsubnet(var.vpc_cidr, var.subnet_cidr_bits, 3)])
}
resource "aws_security_group_rule" "node_outbound" {
security_group_id = aws_security_group.data_plane_sg.id
type = "egress"
from_port = 0
to_port = 0
protocol = "-1"
cidr_blocks = ["0.0.0.0/0"]
}
resource "aws_security_group" "control_plane_sg" {
name = "${var.project}-ControlPlane-sg"
vpc_id = aws_vpc.main.id
tags = merge(
var.tags,
{
Name = "${var.project}-ControlPlane-sg",
}
)
}
resource "aws_security_group_rule" "control_plane_inbound" {
security_group_id = aws_security_group.control_plane_sg.id
type = "ingress"
from_port = 0
to_port = 65535
protocol = "tcp"
cidr_blocks = flatten([cidrsubnet(var.vpc_cidr, var.subnet_cidr_bits, 0), cidrsubnet(var.vpc_cidr, var.subnet_cidr_bits, 1), cidrsubnet(var.vpc_cidr, var.subnet_cidr_bits, 2), cidrsubnet(var.vpc_cidr, var.subnet_cidr_bits, 3)])
}
resource "aws_security_group_rule" "control_plane_outbound" {
security_group_id = aws_security_group.control_plane_sg.id
type = "egress"
from_port = 0
to_port = 65535
protocol = "-1"
cidr_blocks = ["0.0.0.0/0"]
}
There are more files of course, but not sure they are relevant.
In order to create Loadbalancers automatically on the basis of service and ingress you need to deploy aws-load-balancer-controller in your EKS cluster.
AWS also has official documentation at here.
! Disclaimer !! I have created the below-mentioned repo.
You can also find terraform code to deploy and use the aws-load-balancer-controller in this ishuar/terraform-eks GitHub Repo along with EKS module for practical referencing.
You need to install aws-load-balancer-controller.
Check here
and here
I'm trying to create both services within the same VPC and give them appropriate security groups but they I can't make it work.
variable "vpc_cidr_block" {
default = "10.1.0.0/16"
}
variable "cidr_block_subnet_public" {
default = "10.1.1.0/24"
}
variable "cidr_block_subnets_private" {
default = ["10.1.2.0/24", "10.1.3.0/24", "10.1.4.0/24"]
}
data "aws_availability_zones" "available" {
state = "available"
}
resource "aws_vpc" "vpc" {
cidr_block = var.vpc_cidr_block
}
resource "aws_subnet" "private" {
count = length(var.cidr_block_subnets_private)
cidr_block = var.cidr_block_subnets_private[count.index]
vpc_id = aws_vpc.vpc.id
availability_zone = data.aws_availability_zones.available.names[count.index]
}
resource "aws_security_group" "lambda" {
vpc_id = aws_vpc.vpc.id
egress {
from_port = 0
to_port = 0
protocol = "-1"
cidr_blocks = ["0.0.0.0/0"]
}
}
resource "aws_security_group" "rds" {
vpc_id = aws_vpc.vpc.id
ingress {
description = "PostgreSQL"
from_port = 5432
protocol = "tcp"
to_port = 5432
// security_groups = [aws_security_group.lambda.id]
}
egress {
from_port = 0
to_port = 0
protocol = "-1"
cidr_blocks = ["0.0.0.0/0"]
}
}
resource "aws_lambda_function" "event" {
function_name = "ServerlessExampleEvent"
timeout = 30
s3_bucket = "mn-lambda"
s3_key = "mn/v1.0.0/lambda-1.0.0-all.jar"
handler = "dk.fitfit.handler.EventRequestHandler"
runtime = "java11"
memory_size = 256
role = aws_iam_role.event.arn
vpc_config {
security_group_ids = [aws_security_group.lambda.id]
subnet_ids = [for s in aws_subnet.private: s.id]
}
environment {
variables = {
JDBC_DATABASE_URL = "jdbc:postgresql://${aws_db_instance.rds.address}:${aws_db_instance.rds.port}/${aws_db_instance.rds.identifier}"
DATABASE_USERNAME = aws_db_instance.rds.username
DATABASE_PASSWORD = aws_db_instance.rds.password
}
}
}
resource "aws_db_subnet_group" "db" {
subnet_ids = aws_subnet.private.*.id
}
resource "aws_db_instance" "rds" {
allocated_storage = 10
engine = "postgres"
engine_version = "11.5"
instance_class = "db.t2.micro"
username = "postgres"
password = random_password.password.result
skip_final_snapshot = true
apply_immediately = true
vpc_security_group_ids = [aws_security_group.rds.id]
db_subnet_group_name = aws_db_subnet_group.db.name
}
resource "random_password" "password" {
length = 32
special = false
}
I tried to not clutter the question by only posting the relevant part of my HCL. Please let me know if I missed anything important.
The biggest issue is the commented out security_groups parameter on the ingress block of the rds security group. Uncommenting that should then allow Postgresql traffic from the lambda security group:
resource "aws_security_group" "rds" {
vpc_id = aws_vpc.vpc.id
ingress {
description = "PostgreSQL"
from_port = 5432
protocol = "tcp"
to_port = 5432
security_groups = [aws_security_group.lambda.id]
}
egress {
from_port = 0
to_port = 0
protocol = "-1"
cidr_blocks = ["0.0.0.0/0"]
}
}
As well as that your JDBC string is basically resolving to something like jdbc:postgresql://terraform-20091110230000000000000001.xxxx.us-east-1.rds.amazonaws.com:5432/terraform-20091110230000000000000001 because you aren't specifying an identifier for the RDS instance and so it defaults to generating an identifier prefixed with terraform- plus the timestamp and a counter. The important part to note here is that your RDS instance doesn't yet include a database of the name terraform-20091110230000000000000001 for your application to connect to because you haven't specified it.
You can have RDS create a database on the RDS instance by using the name parameter. You can then update your JDBC connection string to specify the database name as well:
resource "aws_db_instance" "rds" {
allocated_storage = 10
engine = "postgres"
engine_version = "11.5"
instance_class = "db.t2.micro"
username = "postgres"
password = random_password.password.result
skip_final_snapshot = true
apply_immediately = true
name = "foo"
vpc_security_group_ids = [aws_security_group.rds.id]
db_subnet_group_name = aws_db_subnet_group.db.name
}
resource "aws_lambda_function" "event" {
function_name = "ServerlessExampleEvent"
timeout = 30
s3_bucket = "mn-lambda"
s3_key = "mn/v1.0.0/lambda-1.0.0-all.jar"
handler = "dk.fitfit.handler.EventRequestHandler"
runtime = "java11"
memory_size = 256
role = aws_iam_role.event.arn
vpc_config {
security_group_ids = [aws_security_group.lambda.id]
subnet_ids = [for s in aws_subnet.private : s.id]
}
environment {
variables = {
JDBC_DATABASE_URL = "jdbc:postgresql://${aws_db_instance.rds.address}:${aws_db_instance.rds.port}/${aws_db_instance.rds.name}"
DATABASE_USERNAME = aws_db_instance.rds.username
DATABASE_PASSWORD = aws_db_instance.rds.password
}
}
}
I would like to use AWS Auto Scaling Group (ASG).
How to correct an error?
Error: Error creating AutoScaling Group: ValidationError: The subnet ID 'aws_subnet.firstsubnet.id' does not exist
status code: 400, request id: 06571fdb-585b-486e-ae9c-19d3acb14d9e
...........................................................................
My code below:
resource "aws_vpc" "myvpc"{
cidr_block = "192.168.0.0/16"
instance_tenancy = "default"
enable_dns_hostnames = true
tags = {
Name = "newvpc"
}
}
resource "aws_subnet" "firstsubnet"{
vpc_id = aws_vpc.myvpc.id
cidr_block = "192.168.1.0/24"
availability_zone = "${var.availability_zone1}"
map_public_ip_on_launch = true
tags = {
Name = "public_subnet"
}
}
resource "aws_subnet" "secondsubnet"{
vpc_id = aws_vpc.myvpc.id
cidr_block = "192.168.2.0/24"
availability_zone = "${var.availability_zone2}"
tags = {
Name = "private_subnet"
}
}
resource "aws_internet_gateway" "my_igv" {
vpc_id = aws_vpc.myvpc.id
tags = {
Name = "my_igv"
}
}
resource "aws_route_table" "my_route" {
vpc_id = aws_vpc.myvpc.id
route {
cidr_block = "0.0.0.0/0"
gateway_id = aws_internet_gateway.my_igv.id
}
tags = {
Name = "my_route"
}
}
resource "aws_route_table_association" "subnet_assosiate" {
subnet_id = aws_subnet.firstsubnet.id
route_table_id = aws_route_table.my_route.id
}
resource "aws_security_group" "mysc1" {
name = "mysc1-http"
description = "Allow inbound traffic"
vpc_id = aws_vpc.myvpc.id
ingress {
from_port = 80
to_port = 80
protocol = "tcp"
cidr_blocks = ["0.0.0.0/0"]
}
ingress {
from_port = 22
to_port = 22
protocol = "tcp"
cidr_blocks = ["0.0.0.0/0"]
}
egress {
from_port = 0
to_port = 0
protocol = "-1"
cidr_blocks = ["0.0.0.0/0"]
}
tags = {
Name = "allow_http"
}
}
resource "aws_instance" "wp" {
ami = "ami-07d9160fa81ccffb5"
instance_type = "t2.micro"
vpc_security_group_ids = ["${aws_security_group.mysc1.id}"]
subnet_id = aws_subnet.firstsubnet.id
key_name = "MyKey"
tags = {
Name = "wordpress"
}
connection {
type = "ssh"
user = "ec2-user"
private_key = file("./MyKey.pem")
host = aws_instance.wp.public_ip
}
}
resource "aws_security_group" "mysc2" {
name = "mysc2-db"
description = "Allow inbound traffic"
vpc_id = aws_vpc.myvpc.id
ingress {
from_port = 3306
to_port = 3306
protocol = "tcp"
cidr_blocks = ["0.0.0.0/0"]
}
ingress {
from_port = 22
to_port = 22
protocol = "tcp"
cidr_blocks = ["0.0.0.0/0"]
}
egress {
from_port = 0
to_port = 0
protocol = "-1"
cidr_blocks = ["0.0.0.0/0"]
}
tags = {
Name = "allow_db"
}
}
resource "aws_instance" "db" {
ami = "ami-07d9160fa81ccffb5"
instance_type = "t2.micro"
vpc_security_group_ids = ["${aws_security_group.mysc2.id}"]
subnet_id = aws_subnet.secondsubnet.id
key_name = "MyKey"
tags = {
Name = "mysql"
}
}
output "instance_ip_addr" {
value = aws_instance.db.private_ip
}
module "asg" {
source = "terraform-aws-modules/autoscaling/aws"
version = "~> 3.0"
name = "service"
# Launch configuration
lc_name = "example-lc"
image_id = "ami-ebd02392"
instance_type = "t2.micro"
security_groups = ["${aws_security_group.mysc1.id}"]
ebs_block_device = [
{
device_name = "/dev/xvdz"
volume_type = "gp2"
volume_size = "50"
delete_on_termination = true
},
]
root_block_device = [
{
volume_size = "50"
volume_type = "gp2"
},
]
# Auto scaling group
asg_name = "example-asg"
vpc_zone_identifier = ["aws_subnet.firstsubnet.id", "aws_subnet.secondsubnet.id"]
health_check_type = "EC2"
min_size = 0
max_size = 4
desired_capacity = 4
wait_for_capacity_timeout = 0
tags = [
{
key = "Environment"
value = "dev"
propagate_at_launch = true
},
{
key = "Project"
value = "megasecret"
propagate_at_launch = true
},
]
tags_as_map = {
extra_tag1 = "extra_value1"
extra_tag2 = "extra_value2"
}
}
module "elb_http" {
source = "terraform-aws-modules/elb/aws"
version = "~> 2.0"
name = "elb-example"
subnets = ["${aws_subnet.firstsubnet.id}", "${aws_subnet.secondsubnet.id}"]
security_groups = ["${aws_security_group.mysc1.id}"]
internal = false
listener = [
{
instance_port = "80"
instance_protocol = "HTTP"
lb_port = "80"
lb_protocol = "HTTP"
},
{
instance_port = "8080"
instance_protocol = "http"
lb_port = "8080"
lb_protocol = "http"
#ssl_certificate_id = "arn:aws:acm:eu-west-1:235367859451:certificate/6c270328-2cd5-4b2d-8dfd-ae8d0004ad31"
},
]
health_check = {
target = "HTTP:80/"
interval = 30
healthy_threshold = 2
unhealthy_threshold = 2
timeout = 5
}
access_logs = {
bucket = "my-access-logs-bucket"
}
// ELB attachments
number_of_instances = 2
instances = ["i-06ff41a77dfb5349d", "i-4906ff41a77dfb53d"]
tags = {
Owner = "user"
Environment = "dev"
}
}
There reason for the error is that you are passing strings instead of actual subnet ids.
Thus, instead of:
vpc_zone_identifier = ["aws_subnet.firstsubnet.id", "aws_subnet.secondsubnet.id"]
there should be:
vpc_zone_identifier = [aws_subnet.firstsubnet.id, aws_subnet.secondsubnet.id]
or
vpc_zone_identifier = ["${aws_subnet.firstsubnet.id}", "${aws_subnet.secondsubnet.id}"]
Is there a good / definitive reference or course for managing a ECS service using Terraform. I have referred this which creates the ECS Service, but I can't get to a state where my task runs on that cluster.
Here is what I have for now:
# create the VPC
resource "aws_vpc" "vpc" {
cidr_block = var.cidr_vpc
instance_tenancy = var.instanceTenancy
enable_dns_support = var.dnsSupport
enable_dns_hostnames = var.dnsHostNames
tags = {
Name = "tdemo"
}
}
# Create the Internet Gateway
resource "aws_internet_gateway" "igw" {
vpc_id = "${aws_vpc.vpc.id}"
tags = {
Name = "tdemo"
}
}
# Create the Public subnet
resource "aws_subnet" "subnet_public1" {
vpc_id = "${aws_vpc.vpc.id}"
cidr_block = var.cidr_pubsubnet1
map_public_ip_on_launch = "true"
availability_zone = var.availability_zone1
tags = {
Name = "tdemo"
}
}
resource "aws_subnet" "subnet_public2" {
vpc_id = "${aws_vpc.vpc.id}"
cidr_block = var.cidr_pubsubnet2
map_public_ip_on_launch = "true"
availability_zone = var.availability_zone2
tags = {
Name = "tdemo"
}
}
# Route table to connect to Internet Gateway
resource "aws_route_table" "rta_public" {
vpc_id = "${aws_vpc.vpc.id}"
route {
cidr_block = "0.0.0.0/0"
gateway_id = "${aws_internet_gateway.igw.id}"
}
tags = {
Name = "tdemo"
}
}
# Create Route Table Association to make the subet public over internet
resource "aws_route_table_association" "rta_subnet_public" {
subnet_id = "${aws_subnet.subnet_public1.id}"
route_table_id = "${aws_route_table.rta_public.id}"
}
# Configure Security Group inbound and outbound rules
resource "aws_security_group" "sg_22" {
name = "sg_22"
vpc_id = "${aws_vpc.vpc.id}"
ingress {
from_port = 22
to_port = 22
protocol = "tcp"
cidr_blocks = ["0.0.0.0/0"]
}
ingress {
from_port = 0
to_port = 80
protocol = "tcp"
cidr_blocks = ["0.0.0.0/0"]
}
egress {
from_port = 0
to_port = 0
protocol = "-1"
cidr_blocks = ["0.0.0.0/0"]
}
tags = {
Name = "tdemo"
}
}
###############################################################################
resource "aws_iam_role" "ecs-service-role" {
name = "tdemo-ecs-service-role"
path = "/"
assume_role_policy = "${data.aws_iam_policy_document.ecs-service-policy.json}"
}
resource "aws_iam_role_policy_attachment" "ecs-service-role-attachment" {
role = "${aws_iam_role.ecs-service-role.name}"
policy_arn = "arn:aws:iam::aws:policy/service-role/AmazonEC2ContainerServiceRole"
}
data "aws_iam_policy_document" "ecs-service-policy" {
statement {
actions = ["sts:AssumeRole"]
principals {
type = "Service"
identifiers = ["ecs.amazonaws.com"]
}
}
}
resource "aws_iam_role" "ecs-instance-role" {
name = "tdemo-ecs-instance-role"
path = "/"
assume_role_policy = "${data.aws_iam_policy_document.ecs-instance-policy.json}"
}
data "aws_iam_policy_document" "ecs-instance-policy" {
statement {
actions = ["sts:AssumeRole"]
principals {
type = "Service"
identifiers = ["ec2.amazonaws.com"]
}
}
}
resource "aws_iam_role_policy_attachment" "ecs-instance-role-attachment" {
role = "${aws_iam_role.ecs-instance-role.name}"
policy_arn = "arn:aws:iam::aws:policy/service-role/AmazonEC2ContainerServiceforEC2Role"
}
resource "aws_iam_instance_profile" "ecs-instance-profile" {
name = "tdemo-ecs-instance-profile"
path = "/"
roles = ["${aws_iam_role.ecs-instance-role.id}"]
provisioner "local-exec" {
command = "ping 127.0.0.1 -n 11 > nul"
}
}
resource "aws_launch_configuration" "ecs-launch-configuration" {
name = "tdemo-ecs-launch-configuration"
image_id = var.amiid
instance_type = "t2.xlarge"
iam_instance_profile = "${aws_iam_instance_profile.ecs-instance-profile.id}"
root_block_device {
volume_type = "standard"
volume_size = 100
delete_on_termination = true
}
lifecycle {
create_before_destroy = true
}
security_groups = ["${aws_security_group.sg_22.id}"]
associate_public_ip_address = "true"
key_name = "${var.ecs_public_keyname}"
user_data = <<-EOF
#! /bin/bash
echo ECS_CLUSTER=your_cluster_name >> /etc/ecs/ecs.config
sudo sysctl -w vm.max_map_count=524288
sudo apt-get update
sudo apt-get install -y apache2
sudo systemctl start apache2
sudo systemctl enable apache2
echo "<h1>Deployed via Terraform</h1>" | sudo tee /var/www/html/index.html
EOF
}
resource "aws_ecs_cluster" "ecs-cluster" {
name = var.ecs_cluster
}
###############################################################################
data "aws_ecs_task_definition" "ecs_task_definition" {
task_definition = "${aws_ecs_task_definition.ecs_task_definition.family}"
}
resource "aws_ecs_task_definition" "ecs_task_definition" {
family = "hello_world"
container_definitions = <<DEFINITION
[
{
"name": "hello-world",
"image": "nginx:latest",
"essential": true,
"portMappings": [
{
"containerPort": 80,
"hostPort": 80
}
],
"memory": 500,
"cpu": 10
}
]
DEFINITION
}
resource "aws_alb" "ecs-load-balancer" {
name = "ecs-load-balancer"
security_groups = ["${aws_security_group.sg_22.id}"]
subnets = ["${aws_subnet.subnet_public1.id}", "${aws_subnet.subnet_public2.id}"]
tags = {
Name = "ecs-load-balancer"
}
}
resource "aws_alb_target_group" "ecs-target-group" {
name = "ecs-target-group"
port = "80"
protocol = "HTTP"
vpc_id = "${aws_vpc.vpc.id}"
health_check {
healthy_threshold = "5"
unhealthy_threshold = "2"
interval = "30"
matcher = "200"
path = "/"
port = "traffic-port"
protocol = "HTTP"
timeout = "5"
}
tags = {
Name = "ecs-target-group"
}
}
resource "aws_alb_listener" "alb-listener" {
load_balancer_arn = "${aws_alb.ecs-load-balancer.arn}"
port = "80"
protocol = "HTTP"
default_action {
target_group_arn = "${aws_alb_target_group.ecs-target-group.arn}"
type = "forward"
}
}
resource "aws_autoscaling_group" "ecs-autoscaling-group" {
name = "ecs-autoscaling-group"
max_size = "${var.max_instance_size}"
min_size = "${var.min_instance_size}"
desired_capacity = "${var.desired_capacity}"
vpc_zone_identifier = ["${aws_subnet.subnet_public1.id}", "${aws_subnet.subnet_public2.id}"]
launch_configuration = "${aws_launch_configuration.ecs-launch-configuration.name}"
health_check_type = "ELB"
}
resource "aws_ecs_service" "ecs-service" {
name = "tdemo-ecs-service"
iam_role = "${aws_iam_role.ecs-service-role.name}"
cluster = "${aws_ecs_cluster.ecs-cluster.id}"
task_definition = "${aws_ecs_task_definition.ecs_task_definition.family}:${max("${aws_ecs_task_definition.ecs_task_definition.revision}", "${data.aws_ecs_task_definition.ecs_task_definition.revision}")}"
desired_count = 1
load_balancer {
target_group_arn = "${aws_alb_target_group.ecs-target-group.arn}"
container_port = 80
container_name = "hello-world"
}
}
Thanks,
One thing that is apparent and that may be the source of the issue (at least one of them) is:
echo ECS_CLUSTER=your_cluster_name >> /etc/ecs/ecs.config
However, your cluster name is var.ecs_cluster. Thus the above line should be:
echo ECS_CLUSTER=${var.ecs_cluster} >> /etc/ecs/ecs.config
Please note, that there could be many other issues, which are not that clear to spot without actually deploying your terraform script.
I deploy ecs using terraform.
When I run terraform apply everything is okay but when I browse to ecs service on events tab I have this error:
service nginx-ecs-service was unable to place a task because no container instance met all of its requirements. Reason: No Container Instances were found in your cluster.
How do I fix that? What is missing in my terraform file?
locals {
name = "myapp"
environment = "prod"
# This is the convention we use to know what belongs to each other
ec2_resources_name = "${local.name}-${local.environment}"
}
resource "aws_iam_server_certificate" "lb_cert" {
name = "lb_cert"
certificate_body = "${file("./www.example.com/cert.pem")}"
private_key = "${file("./www.example.com/privkey.pem")}"
certificate_chain = "${file("./www.example.com/chain.pem")}"
}
resource "aws_security_group" "bastion-sg" {
name = "bastion-security-group"
vpc_id = "${module.vpc.vpc_id}"
ingress {
protocol = "tcp"
from_port = 22
to_port = 22
cidr_blocks = ["0.0.0.0/0"]
}
egress {
protocol = -1
from_port = 0
to_port = 0
cidr_blocks = ["0.0.0.0/0"]
}
}
resource "aws_instance" "bastion" {
depends_on = ["aws_security_group.bastion-sg"]
ami = "ami-0d5d9d301c853a04a"
key_name = "myapp"
instance_type = "t2.micro"
vpc_security_group_ids = ["${aws_security_group.bastion-sg.id}"]
associate_public_ip_address = true
subnet_id = "${element(module.vpc.public_subnets, 0)}"
tags = {
Name = "bastion"
}
}
# VPC Definition
module "vpc" {
source = "terraform-aws-modules/vpc/aws"
version = "~> 2.0"
name = "my-vpc"
cidr = "10.1.0.0/16"
azs = ["us-east-2a", "us-east-2b", "us-east-2c"]
private_subnets = ["10.1.1.0/24", "10.1.2.0/24", "10.1.3.0/24"]
public_subnets = ["10.1.101.0/24", "10.1.102.0/24", "10.1.103.0/24"]
single_nat_gateway = true
enable_nat_gateway = true
enable_vpn_gateway = false
enable_dns_hostnames = true
public_subnet_tags = {
Name = "public"
}
private_subnet_tags = {
Name = "private"
}
public_route_table_tags = {
Name = "public-RT"
}
private_route_table_tags = {
Name = "private-RT"
}
tags = {
Environment = local.environment
Name = local.name
}
}
# ------------
resource "aws_ecs_cluster" "public-ecs-cluster" {
name = "myapp-${local.environment}"
lifecycle {
create_before_destroy = true
}
}
resource "aws_security_group" "ecs-vpc-secgroup" {
name = "ecs-vpc-secgroup"
description = "ecs-vpc-secgroup"
# vpc_id = "vpc-b8daecde"
vpc_id = "${module.vpc.vpc_id}"
ingress {
from_port = 0
to_port = 65535
protocol = "tcp"
cidr_blocks = ["0.0.0.0/0"]
}
egress {
from_port = 0
to_port = 0
protocol = "-1"
cidr_blocks = ["0.0.0.0/0"]
}
tags = {
Name = "ecs-security-group"
}
}
resource "aws_lb" "nginx-ecs-alb" {
name = "nginx-ecs-alb"
internal = false
load_balancer_type = "application"
subnets = module.vpc.public_subnets
security_groups = ["${aws_security_group.ecs-vpc-secgroup.id}"]
}
resource "aws_alb_target_group" "nginx-ecs-tg" {
name = "nginx-ecs-tg"
port = "80"
protocol = "HTTP"
vpc_id = "${module.vpc.vpc_id}"
health_check {
healthy_threshold = 3
unhealthy_threshold = 10
timeout = 5
interval = 10
path = "/"
}
depends_on = ["aws_lb.nginx-ecs-alb"]
}
resource "aws_alb_listener" "alb_listener" {
load_balancer_arn = "${aws_lb.nginx-ecs-alb.arn}"
port = "80"
protocol = "HTTP"
default_action {
target_group_arn = "${aws_alb_target_group.nginx-ecs-tg.arn}"
type = "forward"
}
}
resource "aws_ecs_task_definition" "nginx-image" {
family = "nginx-server"
network_mode = "bridge"
container_definitions = <<DEFINITION
[
{
"name": "nginx-web",
"image": "nginx:latest",
"essential": true,
"portMappings": [
{
"containerPort": 80,
"hostPort": 0,
"protocol": "tcp"
}
],
"memory": 128,
"cpu": 10
}
]
DEFINITION
}
data "aws_ecs_task_definition" "nginx-image" {
depends_on = ["aws_ecs_task_definition.nginx-image"]
task_definition = "${aws_ecs_task_definition.nginx-image.family}"
}
resource "aws_launch_configuration" "ecs-launch-configuration" {
name = "ecs-launch-configuration"
image_id = "ami-0d5d9d301c853a04a"
instance_type = "t2.micro"
iam_instance_profile = "ecsInstanceRole"
root_block_device {
volume_type = "standard"
volume_size = 35
delete_on_termination = true
}
security_groups = ["${aws_security_group.ecs-vpc-secgroup.id}"]
associate_public_ip_address = "true"
key_name = "myapp"
user_data = <<-EOF
#!/bin/bash
echo ECS_CLUSTER=${aws_ecs_cluster.public-ecs-cluster.name} >> /etc/ecs/ecs.config
EOF
}
resource "aws_autoscaling_group" "ecs-autoscaling-group" {
name = "ecs-autoscaling-group"
max_size = "1"
min_size = "1"
desired_capacity = "1"
# vpc_zone_identifier = ["subnet-5c66053a", "subnet-9cd1a2d4"]
vpc_zone_identifier = module.vpc.public_subnets
launch_configuration = "${aws_launch_configuration.ecs-launch-configuration.name}"
health_check_type = "EC2"
default_cooldown = "300"
lifecycle {
create_before_destroy = true
}
tag {
key = "Name"
value = "wizardet972_ecs-instance"
propagate_at_launch = true
}
tag {
key = "Owner"
value = "Wizardnet972"
propagate_at_launch = true
}
}
resource "aws_autoscaling_policy" "ecs-scale" {
name = "ecs-scale-policy"
policy_type = "TargetTrackingScaling"
autoscaling_group_name = "${aws_autoscaling_group.ecs-autoscaling-group.name}"
estimated_instance_warmup = 60
target_tracking_configuration {
predefined_metric_specification {
predefined_metric_type = "ASGAverageCPUUtilization"
}
target_value = "70"
}
}
resource "aws_ecs_service" "nginx-ecs-service" {
name = "nginx-ecs-service"
cluster = "${aws_ecs_cluster.public-ecs-cluster.id}"
task_definition = "${aws_ecs_task_definition.nginx-image.family}:${max("${aws_ecs_task_definition.nginx-image.revision}", "${aws_ecs_task_definition.nginx-image.revision}")}"
launch_type = "EC2"
desired_count = 1
load_balancer {
target_group_arn = "${aws_alb_target_group.nginx-ecs-tg.arn}"
container_name = "nginx-web"
container_port = 80
}
depends_on = ["aws_ecs_task_definition.nginx-image"]
}
Update:
I tried to create the terraform stack you shared with me, I was able to reproduce the issue.
The issue was, The ec2 instance was unhealthy and the autoscaling group was continuously terminating the instance and launch a new one.
the solution was to remove the following configuration.I think the volume_type standard was causing trouble.
root_block_device {
volume_type = "standard"
volume_size = 100
delete_on_termination = true
}
See if you have done the basic steps to prepare the ec2 instance. You should use an ecs-optimized ami to create the instance and then attach the AmazonEC2ContainerServiceforEC2Role permission to IAM role.
Reference:
AWS ECS Error when running task: No Container Instances were found in your cluster
setup instance role