ECS with Terraform - amazon-web-services

Is there a good / definitive reference or course for managing a ECS service using Terraform. I have referred this which creates the ECS Service, but I can't get to a state where my task runs on that cluster.
Here is what I have for now:
# create the VPC
resource "aws_vpc" "vpc" {
cidr_block = var.cidr_vpc
instance_tenancy = var.instanceTenancy
enable_dns_support = var.dnsSupport
enable_dns_hostnames = var.dnsHostNames
tags = {
Name = "tdemo"
}
}
# Create the Internet Gateway
resource "aws_internet_gateway" "igw" {
vpc_id = "${aws_vpc.vpc.id}"
tags = {
Name = "tdemo"
}
}
# Create the Public subnet
resource "aws_subnet" "subnet_public1" {
vpc_id = "${aws_vpc.vpc.id}"
cidr_block = var.cidr_pubsubnet1
map_public_ip_on_launch = "true"
availability_zone = var.availability_zone1
tags = {
Name = "tdemo"
}
}
resource "aws_subnet" "subnet_public2" {
vpc_id = "${aws_vpc.vpc.id}"
cidr_block = var.cidr_pubsubnet2
map_public_ip_on_launch = "true"
availability_zone = var.availability_zone2
tags = {
Name = "tdemo"
}
}
# Route table to connect to Internet Gateway
resource "aws_route_table" "rta_public" {
vpc_id = "${aws_vpc.vpc.id}"
route {
cidr_block = "0.0.0.0/0"
gateway_id = "${aws_internet_gateway.igw.id}"
}
tags = {
Name = "tdemo"
}
}
# Create Route Table Association to make the subet public over internet
resource "aws_route_table_association" "rta_subnet_public" {
subnet_id = "${aws_subnet.subnet_public1.id}"
route_table_id = "${aws_route_table.rta_public.id}"
}
# Configure Security Group inbound and outbound rules
resource "aws_security_group" "sg_22" {
name = "sg_22"
vpc_id = "${aws_vpc.vpc.id}"
ingress {
from_port = 22
to_port = 22
protocol = "tcp"
cidr_blocks = ["0.0.0.0/0"]
}
ingress {
from_port = 0
to_port = 80
protocol = "tcp"
cidr_blocks = ["0.0.0.0/0"]
}
egress {
from_port = 0
to_port = 0
protocol = "-1"
cidr_blocks = ["0.0.0.0/0"]
}
tags = {
Name = "tdemo"
}
}
###############################################################################
resource "aws_iam_role" "ecs-service-role" {
name = "tdemo-ecs-service-role"
path = "/"
assume_role_policy = "${data.aws_iam_policy_document.ecs-service-policy.json}"
}
resource "aws_iam_role_policy_attachment" "ecs-service-role-attachment" {
role = "${aws_iam_role.ecs-service-role.name}"
policy_arn = "arn:aws:iam::aws:policy/service-role/AmazonEC2ContainerServiceRole"
}
data "aws_iam_policy_document" "ecs-service-policy" {
statement {
actions = ["sts:AssumeRole"]
principals {
type = "Service"
identifiers = ["ecs.amazonaws.com"]
}
}
}
resource "aws_iam_role" "ecs-instance-role" {
name = "tdemo-ecs-instance-role"
path = "/"
assume_role_policy = "${data.aws_iam_policy_document.ecs-instance-policy.json}"
}
data "aws_iam_policy_document" "ecs-instance-policy" {
statement {
actions = ["sts:AssumeRole"]
principals {
type = "Service"
identifiers = ["ec2.amazonaws.com"]
}
}
}
resource "aws_iam_role_policy_attachment" "ecs-instance-role-attachment" {
role = "${aws_iam_role.ecs-instance-role.name}"
policy_arn = "arn:aws:iam::aws:policy/service-role/AmazonEC2ContainerServiceforEC2Role"
}
resource "aws_iam_instance_profile" "ecs-instance-profile" {
name = "tdemo-ecs-instance-profile"
path = "/"
roles = ["${aws_iam_role.ecs-instance-role.id}"]
provisioner "local-exec" {
command = "ping 127.0.0.1 -n 11 > nul"
}
}
resource "aws_launch_configuration" "ecs-launch-configuration" {
name = "tdemo-ecs-launch-configuration"
image_id = var.amiid
instance_type = "t2.xlarge"
iam_instance_profile = "${aws_iam_instance_profile.ecs-instance-profile.id}"
root_block_device {
volume_type = "standard"
volume_size = 100
delete_on_termination = true
}
lifecycle {
create_before_destroy = true
}
security_groups = ["${aws_security_group.sg_22.id}"]
associate_public_ip_address = "true"
key_name = "${var.ecs_public_keyname}"
user_data = <<-EOF
#! /bin/bash
echo ECS_CLUSTER=your_cluster_name >> /etc/ecs/ecs.config
sudo sysctl -w vm.max_map_count=524288
sudo apt-get update
sudo apt-get install -y apache2
sudo systemctl start apache2
sudo systemctl enable apache2
echo "<h1>Deployed via Terraform</h1>" | sudo tee /var/www/html/index.html
EOF
}
resource "aws_ecs_cluster" "ecs-cluster" {
name = var.ecs_cluster
}
###############################################################################
data "aws_ecs_task_definition" "ecs_task_definition" {
task_definition = "${aws_ecs_task_definition.ecs_task_definition.family}"
}
resource "aws_ecs_task_definition" "ecs_task_definition" {
family = "hello_world"
container_definitions = <<DEFINITION
[
{
"name": "hello-world",
"image": "nginx:latest",
"essential": true,
"portMappings": [
{
"containerPort": 80,
"hostPort": 80
}
],
"memory": 500,
"cpu": 10
}
]
DEFINITION
}
resource "aws_alb" "ecs-load-balancer" {
name = "ecs-load-balancer"
security_groups = ["${aws_security_group.sg_22.id}"]
subnets = ["${aws_subnet.subnet_public1.id}", "${aws_subnet.subnet_public2.id}"]
tags = {
Name = "ecs-load-balancer"
}
}
resource "aws_alb_target_group" "ecs-target-group" {
name = "ecs-target-group"
port = "80"
protocol = "HTTP"
vpc_id = "${aws_vpc.vpc.id}"
health_check {
healthy_threshold = "5"
unhealthy_threshold = "2"
interval = "30"
matcher = "200"
path = "/"
port = "traffic-port"
protocol = "HTTP"
timeout = "5"
}
tags = {
Name = "ecs-target-group"
}
}
resource "aws_alb_listener" "alb-listener" {
load_balancer_arn = "${aws_alb.ecs-load-balancer.arn}"
port = "80"
protocol = "HTTP"
default_action {
target_group_arn = "${aws_alb_target_group.ecs-target-group.arn}"
type = "forward"
}
}
resource "aws_autoscaling_group" "ecs-autoscaling-group" {
name = "ecs-autoscaling-group"
max_size = "${var.max_instance_size}"
min_size = "${var.min_instance_size}"
desired_capacity = "${var.desired_capacity}"
vpc_zone_identifier = ["${aws_subnet.subnet_public1.id}", "${aws_subnet.subnet_public2.id}"]
launch_configuration = "${aws_launch_configuration.ecs-launch-configuration.name}"
health_check_type = "ELB"
}
resource "aws_ecs_service" "ecs-service" {
name = "tdemo-ecs-service"
iam_role = "${aws_iam_role.ecs-service-role.name}"
cluster = "${aws_ecs_cluster.ecs-cluster.id}"
task_definition = "${aws_ecs_task_definition.ecs_task_definition.family}:${max("${aws_ecs_task_definition.ecs_task_definition.revision}", "${data.aws_ecs_task_definition.ecs_task_definition.revision}")}"
desired_count = 1
load_balancer {
target_group_arn = "${aws_alb_target_group.ecs-target-group.arn}"
container_port = 80
container_name = "hello-world"
}
}
Thanks,

One thing that is apparent and that may be the source of the issue (at least one of them) is:
echo ECS_CLUSTER=your_cluster_name >> /etc/ecs/ecs.config
However, your cluster name is var.ecs_cluster. Thus the above line should be:
echo ECS_CLUSTER=${var.ecs_cluster} >> /etc/ecs/ecs.config
Please note, that there could be many other issues, which are not that clear to spot without actually deploying your terraform script.

Related

use terraform to create an aws codedeploy ecs infrastructure

I tried to use terraform to setup aws codeploy ecs infrastructure, following aws documentation to understand aws deploy : https://docs.aws.amazon.com/AmazonECS/latest/developerguide/create-blue-green.html , reading this post to have an example (it uses EC2 instance) : https://hiveit.co.uk/techshop/terraform-aws-vpc-example/02-create-the-vpc/ and finally use reference into terraform documentation : https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/codedeploy_deployment_group
The probleme is when I tried to make a deploy from aws codedeploy, the deployment is stuck in the install phase
Here is the terraform configuration I have done
# main.tf
terraform {
required_providers {
aws = {
source = "hashicorp/aws"
version = "~> 4.0"
}
}
}
provider "aws" {
# defined in AWS_REGION env
# defined in AWS_ACCESS_KEY_ID env
# defined in AWS_SECRET_ACCESS_KEY env
}
# create repository to store docker image
resource "aws_ecr_repository" "repository" {
name = "test-repository"
}
# network.tf
resource "aws_vpc" "vpc" {
cidr_block = "10.0.0.0/16"
tags = {
Name = "terraform-example-vpc"
}
}
resource "aws_internet_gateway" "gateway" {
vpc_id = aws_vpc.vpc.id
tags = {
Name = "terraform-example-internet-gateway"
}
}
resource "aws_route" "route" {
route_table_id = aws_vpc.vpc.main_route_table_id
destination_cidr_block = "0.0.0.0/0"
gateway_id = aws_internet_gateway.gateway.id
}
resource "aws_subnet" "main" {
count = length(data.aws_availability_zones.available.names)
vpc_id = aws_vpc.vpc.id
cidr_block = "10.0.${count.index}.0/24"
map_public_ip_on_launch = true
availability_zone = element(data.aws_availability_zones.available.names, count.index)
tags = {
Name = "public-subnet-${element(data.aws_availability_zones.available.names, count.index)}"
}
}
# loadbalancer.tf
resource "aws_security_group" "lb_security_group" {
name = "terraform_lb_security_group"
vpc_id = aws_vpc.vpc.id
ingress {
from_port = 80
to_port = 80
protocol = "tcp"
cidr_blocks = ["0.0.0.0/0"]
}
egress {
from_port = 0
to_port = 0
protocol = "-1"
cidr_blocks = ["0.0.0.0/0"]
}
tags = {
Name = "terraform-example-lb-security-group"
}
}
resource "aws_lb" "lb" {
name = "terraform-example-lb"
security_groups = [aws_security_group.lb_security_group.id]
subnets = aws_subnet.main.*.id
tags = {
Name = "terraform-example-lb"
}
}
resource "aws_lb_target_group" "group1" {
name = "terraform-example-lb-target1"
port = 80
protocol = "HTTP"
vpc_id = aws_vpc.vpc.id
target_type = "ip"
}
resource "aws_lb_listener" "listener_http" {
load_balancer_arn = aws_lb.lb.arn
port = "80"
protocol = "HTTP"
default_action {
target_group_arn = aws_lb_target_group.group1.arn
type = "forward"
}
}
# cluster.tf
resource "aws_ecs_cluster" "cluster" {
name = "terraform-example-cluster"
tags = {
Name = "terraform-example-cluster"
}
}
resource "aws_iam_role" "ecsTaskExecutionRole" {
name = "ecsTaskExecutionRole"
assume_role_policy = jsonencode({
Version = "2012-10-17"
Statement = [
{
"Sid" : "",
"Effect" : "Allow",
"Principal" : {
"Service" : "ecs-tasks.amazonaws.com"
},
"Action" : "sts:AssumeRole"
}
]
})
}
resource "aws_ecs_task_definition" "task_definition" {
family = "deployment-app"
network_mode = "awsvpc"
requires_compatibilities = ["FARGATE"]
cpu = 256
memory = 512
execution_role_arn = aws_iam_role.ecsTaskExecutionRole.arn
container_definitions = jsonencode([
{
"name" : "app",
"image" : "httpd:2.4",
"portMappings" : [
{
"containerPort" : 80,
"hostPort" : 80,
"protocol" : "tcp"
}
],
"essential" : true
}
])
}
resource "aws_ecs_service" "service" {
cluster = aws_ecs_cluster.cluster.id
name = "terraform-example-service"
task_definition = "deployment-app"
launch_type = "FARGATE"
scheduling_strategy = "REPLICA"
platform_version = "LATEST"
desired_count = 1
load_balancer {
target_group_arn = aws_lb_target_group.group1.arn
container_name = "app"
container_port = 80
}
deployment_controller {
type = "CODE_DEPLOY"
}
network_configuration {
assign_public_ip = true
security_groups = [aws_security_group.lb_security_group.id]
subnets = aws_subnet.main.*.id
}
lifecycle {
ignore_changes = [desired_count, task_definition, platform_version]
}
}
# codedeploy.tf
resource "aws_codedeploy_app" "codedeploy_app" {
name = "example-codedeploy-app"
compute_platform = "ECS"
}
resource "aws_lb_target_group" "group2" {
name = "terraform-example-lb-target2"
port = 80
protocol = "HTTP"
vpc_id = aws_vpc.vpc.id
target_type = "ip"
}
resource "aws_codedeploy_deployment_group" "codedeploy_group" {
app_name = aws_codedeploy_app.codedeploy_app.name
deployment_group_name = "deployment_group_name"
service_role_arn = "###"
deployment_config_name = "CodeDeployDefault.ECSAllAtOnce"
auto_rollback_configuration {
enabled = true
events = ["DEPLOYMENT_FAILURE"]
}
blue_green_deployment_config {
deployment_ready_option {
action_on_timeout = "CONTINUE_DEPLOYMENT"
wait_time_in_minutes = 0
}
terminate_blue_instances_on_deployment_success {
action = "TERMINATE"
termination_wait_time_in_minutes = 1
}
}
deployment_style {
deployment_option = "WITH_TRAFFIC_CONTROL"
deployment_type = "BLUE_GREEN"
}
load_balancer_info {
target_group_pair_info {
target_group {
name = aws_lb_target_group.group1.name
}
target_group {
name = aws_lb_target_group.group2.name
}
prod_traffic_route {
listener_arns = [aws_lb_listener.listener_http.arn]
}
}
}
ecs_service {
cluster_name = aws_ecs_cluster.cluster.name
service_name = aws_ecs_service.service.name
}
}
# datasource.tf
data "aws_availability_zones" "available" {}
note: replace ### with the arn of the role AWSCodeDeployRoleForECS : https://docs.aws.amazon.com/AmazonECS/latest/developerguide/codedeploy_IAM_role.html I don't add it into terraform yet
after using
terraform plan
terraform apply
all the stack is set and i have access to the it works of httpd through the load balancer dns name
My probleme is when I push a new image to the repository, update the task definition and create a new deployment, this last one is stuck in the Step 1 without any error or whatever
For the example, I tried to push an nginx image instead of httpd
aws ecs register-task-definition \
--family=deployment-app \
--network-mode=awsvpc \
--cpu=256 \
--memory=512 \
--execution-role-arn=arn:aws:iam::__AWS_ACCOUNT__:role/ecsTaskExecutionRole \
--requires-compatibilities='["FARGATE"]' \
--container-definitions='[{"name": "app","image": "nginx:latest","portMappings": [{"containerPort": 80,"hostPort": 80,"protocol": "tcp"}],"essential": true}]'
I am using aws console to create deployment, with yaml appspec :
version: 0.0
Resources:
- TargetService:
Type: AWS::ECS::Service
Properties:
TaskDefinition: "arn:aws:ecs:eu-west-3:__AWS_ACCOUNT__:task-definition/deployment-app:9"
LoadBalancerInfo:
ContainerName: "app"
ContainerPort: 80
PlatformVersion: "LATEST"
Can anyone help me to understand my mistake ?
Thanks !
I didn't know where to find a log from codeploy to know what was the problem. Finally, I just needed to go to the service, and check the provisionning task, after that the task failed with error message.
The problem came from my ecsTaskExecutionRole because it didn't has enought ECR rights to pull the image I built

AWS EC2 instance not joining ECS cluster

I am quite desperate with an issue very similar to the one described into this thread.
https://github.com/OpenDroneMap/opendronemap-ecs/issues/14#issuecomment-432004023
When I attach the network interface to my EC2 instance, so that my custom VPC is used instead of the default one, the EC2 instance no longer joins the ECS cluster.
This is my terraform definition.
provider "aws" {}
resource "aws_vpc" "main" {
cidr_block = "10.0.0.0/16"
enable_dns_support = true
enable_dns_hostnames = true
assign_generated_ipv6_cidr_block = true
}
resource "aws_internet_gateway" "main" {
vpc_id = aws_vpc.main.id
}
resource "aws_subnet" "main" {
vpc_id = aws_vpc.main.id
cidr_block = "10.0.0.0/16"
availability_zone = "us-west-2a"
map_public_ip_on_launch = true
}
resource "aws_route_table" "main" {
vpc_id = aws_vpc.main.id
}
resource "aws_route_table_association" "rta1" {
subnet_id = aws_subnet.main.id
route_table_id = aws_route_table.main.id
}
resource "aws_route_table_association" "rta2" {
gateway_id = aws_internet_gateway.main.id
route_table_id = aws_route_table.main.id
}
resource "aws_security_group" "sg-jenkins" {
name = "sg_jenkins"
description = "Allow inbound traffic for Jenkins instance"
vpc_id = aws_vpc.main.id
ingress = [
{
description = "inbound all"
from_port = 0
to_port = 0
protocol = "-1"
cidr_blocks = ["0.0.0.0/0"]
ipv6_cidr_blocks = ["::/0"]
self = null
prefix_list_ids = null
security_groups = null
}
]
egress = [
{
description = "outbound all"
from_port = 0
to_port = 0
protocol = "-1"
cidr_blocks = ["0.0.0.0/0"]
ipv6_cidr_blocks = ["::/0"]
self = null
prefix_list_ids = null
security_groups = null
}
]
}
resource "aws_network_interface" "main" {
subnet_id = aws_subnet.main.id
security_groups = [aws_security_group.sg-jenkins.id]
}
resource "aws_instance" "ec2_instance" {
ami = "ami-07764a7d8502d36a2"
instance_type = "t2.micro"
iam_instance_profile = "ecsInstanceRole"
key_name = "fran"
network_interface {
device_index = 0
network_interface_id = aws_network_interface.main.id
}
user_data = <<EOF
#!/bin/bash
echo ECS_CLUSTER=cluster >> /etc/ecs/ecs.config
EOF
depends_on = [aws_internet_gateway.main]
}
### Task definition
resource "aws_ecs_task_definition" "jenkins-task" {
family = "namespace"
container_definitions = jsonencode([
{
name = "jenkins"
image = "cnservices/jenkins-master"
cpu = 10
memory = 512
essential = true
portMappings = [
{
containerPort = 8080
hostPort = 8080
}
]
}
])
# network_mode = "awsvpc"
volume {
name = "service-storage"
host_path = "/ecs/service-storage"
}
placement_constraints {
type = "memberOf"
expression = "attribute:ecs.availability-zone in [us-west-2a]"
}
}
### Cluster
resource "aws_ecs_cluster" "cluster" {
name = "cluster"
setting {
name = "containerInsights"
value = "enabled"
}
}
### Service
resource "aws_ecs_service" "jenkins-service" {
name = "jenkins-service"
cluster = aws_ecs_cluster.cluster.id
task_definition = aws_ecs_task_definition.jenkins-task.arn
desired_count = 1
# iam_role = aws_iam_role.foo.arn
# depends_on = [aws_iam_role_policy.foo]
# network_configuration {
# security_groups = [aws_security_group.sg-jenkins.id]
# subnets = [aws_subnet.main.id]
# }
ordered_placement_strategy {
type = "binpack"
field = "cpu"
}
placement_constraints {
type = "memberOf"
expression = "attribute:ecs.availability-zone in [us-west-2a]"
}
}
You haven't created a route to your IGW. Thus your instance can't connect to the ECS service to register with your cluster. So remove rta2 and add a route:
# not needed. to be removed.
# resource "aws_route_table_association" "rta2" {
# gateway_id = aws_internet_gateway.main.id
# route_table_id = aws_route_table.main.id
# }
# add a missing route to the IGW
resource "aws_route" "r" {
route_table_id = aws_route_table.main.id
gateway_id = aws_internet_gateway.main.id
destination_cidr_block = "0.0.0.0/0"
}

Allowing load-balanced autoscaled instances to connect to the internet - AWS / Terraform

I'm using Terraform and I'm having a tricky time with connecting my autoscaled AWS EC2 instances to the internet. I can launch a standalone EC2 that connects with no difficulty, but when I visit the public IP addresses of my instances created with an autoscaling group I get "This site can’t be reached xxx.xx.xxx.xxx unexpectedly closed the connection."
The main difference I'm seeing is that I can specify a network interface with an EC2, but I'm not sure how this would work with my launch template. My instances launch into different subnets in different availability zones, and the template is as follows:
provider "aws" {
region = "us-east-1"
access_key = "xxxxx"
secret_key = "xxxxx"
}
data "template_file" "testfile" {
template = <<EOF
#!/bin/bash
sudo apt update -y
sudo apt install apache2 -y
sudo systemct1 start apache2
sudo bash -c 'echo hello, world! > var/www/html/index.html'
EOF
}
resource "aws_vpc" "first_vpc" {
cidr_block = "10.0.0.0/16"
tags = {
Name = "prod-vpc"
}
}
resource "aws_internet_gateway" "gw" {
vpc_id = aws_vpc.first_vpc.id
tags = {
Name = "prod-igw"
}
}
resource "aws_route_table" "prod_route_table" {
vpc_id = aws_vpc.first_vpc.id
route {
cidr_block = "0.0.0.0/0"
gateway_id = aws_internet_gateway.gw.id
}
route {
ipv6_cidr_block = "::/0"
gateway_id = aws_internet_gateway.gw.id
}
tags = {
Name = "prod-rt"
}
}
resource "aws_subnet" "subnet_1" {
vpc_id = aws_vpc.first_vpc.id
cidr_block = "10.0.1.0/24"
availability_zone = "us-east-1a"
map_public_ip_on_launch = true
tags = {
Name = "prod-subnet-1"
Tier = "public"
}
}
resource "aws_subnet" "subnet_2" {
vpc_id = aws_vpc.first_vpc.id
cidr_block = "10.0.2.0/24"
availability_zone = "us-east-1b"
map_public_ip_on_launch = true
tags = {
Name = "prod-subnet-2"
Tier = "public"
}
}
resource "aws_subnet" "subnet_3" {
vpc_id = aws_vpc.first_vpc.id
cidr_block = "10.0.3.0/24"
availability_zone = "us-east-1c"
map_public_ip_on_launch = true
tags = {
Name = "prod-subnet-3"
Tier = "public"
}
}
resource "aws_route_table_association" "a" {
subnet_id = aws_subnet.subnet_1.id
route_table_id = aws_route_table.prod_route_table.id
}
resource "aws_route_table_association" "b" {
subnet_id = aws_subnet.subnet_2.id
route_table_id = aws_route_table.prod_route_table.id
}
resource "aws_route_table_association" "c" {
subnet_id = aws_subnet.subnet_3.id
route_table_id = aws_route_table.prod_route_table.id
}
resource "aws_security_group" "allow_web" {
name = "allow_web"
description = "Allow web inbound traffic"
vpc_id = aws_vpc.first_vpc.id
ingress {
description = "HTTPS from VPC"
from_port = 443
to_port = 443
protocol = "tcp"
cidr_blocks = ["0.0.0.0/0"]
}
ingress {
description = "HTTP from VPC"
from_port = 80
to_port = 80
protocol = "tcp"
cidr_blocks = ["0.0.0.0/0"]
}
egress {
from_port = 0
to_port = 0
protocol = "-1"
cidr_blocks = ["0.0.0.0/0"]
ipv6_cidr_blocks = ["::/0"]
}
tags = {
Name = "allow_tls"
}
}
resource "aws_launch_template" "frontend" {
name = "frontend"
image_id = "ami-0ee02acd56a52998e"
instance_type = "t2.micro"
vpc_security_group_ids = [aws_security_group.allow_web.id]
network_interfaces {
device_index = 0
associate_public_ip_address = true
}
user_data = base64encode(data.template_file.testfile.rendered)
}
resource "aws_lb" "loadbalancer" {
name = "loadbalancer"
internal = false
load_balancer_type = "application"
security_groups = [aws_security_group.allow_web.id]
subnets = [aws_subnet.subnet_1.id, aws_subnet.subnet_2.id, aws_subnet.subnet_3.id]
tags = {
Environment = "production"
}
}
resource "aws_autoscaling_group" "as_group_1" {
vpc_zone_identifier = [aws_subnet.subnet_1.id, aws_subnet.subnet_2.id, aws_subnet.subnet_3.id]
desired_capacity = 3
max_size = 5
min_size = 2
target_group_arns = [aws_lb_target_group.frontendhttp.arn]
launch_template {
id = aws_launch_template.frontend.id
version = "$Latest"
}
}
resource "aws_lb_target_group" "frontendhttp" {
name = "frontendhttp"
port = 80
protocol = "HTTP"
vpc_id = aws_vpc.first_vpc.id
}
resource "aws_lb_listener" "frontendhttp" {
load_balancer_arn = aws_lb.loadbalancer.arn
port = "80"
protocol = "HTTP"
default_action {
type = "forward"
target_group_arn = aws_lb_target_group.frontendhttp.arn
}
}
#Test standalone instance
resource "aws_network_interface" "web_server_1" {
subnet_id = aws_subnet.subnet_1.id
private_ips = ["10.0.1.50"]
security_groups = [aws_security_group.allow_web.id]
}
resource "aws_instance" "ubuntu-1" {
ami = "ami-0ee02acd56a52998e"
instance_type = "t2.micro"
availability_zone = "us-east-1a" #hardcoded to ensure that subnet and instance are in same availability availability zone
network_interface {
device_index = 0
network_interface_id = aws_network_interface.web_server_1.id
}
user_data = <<-EOF
#!/bin/bash
sudo apt update -y
sudo apt install apache2 -y
sudo systemct1 start apache2
sudo bash -c 'echo hello! > var/www/html/index.html'
EOF
tags = {
Name = "web-server"
}
}
I modified a bit your template (user data, its indentation and aws_launch_template), and now it works now. It will work only over HTTP, as you don't have HTTPS setup, so don't need SG rules for HTTPS.
data "template_file" "testfile" {
template = <<EOF
#!/bin/bash
apt update -y
apt install apache2 -y
systemct1 start apache2
echo "hello, world!" > var/www/html/index.html
EOF
}
resource "aws_vpc" "first_vpc" {
cidr_block = "10.0.0.0/16"
tags = {
Name = "prod-vpc"
}
}
resource "aws_internet_gateway" "gw" {
vpc_id = aws_vpc.first_vpc.id
tags = {
Name = "prod-igw"
}
}
resource "aws_route_table" "prod_route_table" {
vpc_id = aws_vpc.first_vpc.id
route {
cidr_block = "0.0.0.0/0"
gateway_id = aws_internet_gateway.gw.id
}
route {
ipv6_cidr_block = "::/0"
gateway_id = aws_internet_gateway.gw.id
}
tags = {
Name = "prod-rt"
}
}
resource "aws_subnet" "subnet_1" {
vpc_id = aws_vpc.first_vpc.id
cidr_block = "10.0.1.0/24"
availability_zone = "us-east-1a"
map_public_ip_on_launch = true
tags = {
Name = "prod-subnet-1"
Tier = "public"
}
}
resource "aws_subnet" "subnet_2" {
vpc_id = aws_vpc.first_vpc.id
cidr_block = "10.0.2.0/24"
availability_zone = "us-east-1b"
map_public_ip_on_launch = true
tags = {
Name = "prod-subnet-2"
Tier = "public"
}
}
resource "aws_subnet" "subnet_3" {
vpc_id = aws_vpc.first_vpc.id
cidr_block = "10.0.3.0/24"
availability_zone = "us-east-1c"
map_public_ip_on_launch = true
tags = {
Name = "prod-subnet-3"
Tier = "public"
}
}
resource "aws_route_table_association" "a" {
subnet_id = aws_subnet.subnet_1.id
route_table_id = aws_route_table.prod_route_table.id
}
resource "aws_route_table_association" "b" {
subnet_id = aws_subnet.subnet_2.id
route_table_id = aws_route_table.prod_route_table.id
}
resource "aws_route_table_association" "c" {
subnet_id = aws_subnet.subnet_3.id
route_table_id = aws_route_table.prod_route_table.id
}
resource "aws_security_group" "allow_web" {
name = "allow_web"
description = "Allow web inbound traffic"
vpc_id = aws_vpc.first_vpc.id
ingress {
description = "HTTP from VPC"
from_port = 80
to_port = 80
protocol = "tcp"
cidr_blocks = ["0.0.0.0/0"]
}
egress {
from_port = 0
to_port = 0
protocol = "-1"
cidr_blocks = ["0.0.0.0/0"]
ipv6_cidr_blocks = ["::/0"]
}
tags = {
Name = "allow_http"
}
}
resource "aws_launch_template" "frontend" {
name = "frontend"
image_id = "ami-0ee02acd56a52998e"
instance_type = "t2.micro"
vpc_security_group_ids = [aws_security_group.allow_web.id]
# DONT NEED THIS
# network_interfaces {
# device_index = 0
# associate_public_ip_address = true
# }
user_data = base64encode(data.template_file.testfile.rendered)
}
resource "aws_lb" "loadbalancer" {
name = "loadbalancer"
internal = false
load_balancer_type = "application"
security_groups = [aws_security_group.allow_web.id]
subnets = [aws_subnet.subnet_1.id, aws_subnet.subnet_2.id, aws_subnet.subnet_3.id]
tags = {
Environment = "production"
}
}
resource "aws_autoscaling_group" "as_group_1" {
vpc_zone_identifier = [aws_subnet.subnet_1.id, aws_subnet.subnet_2.id, aws_subnet.subnet_3.id]
desired_capacity = 3
max_size = 5
min_size = 2
target_group_arns = [aws_lb_target_group.frontendhttp.arn]
launch_template {
id = aws_launch_template.frontend.id
version = "$Latest"
}
}
resource "aws_lb_target_group" "frontendhttp" {
name = "frontendhttp"
port = 80
protocol = "HTTP"
vpc_id = aws_vpc.first_vpc.id
}
resource "aws_lb_listener" "frontendhttp" {
load_balancer_arn = aws_lb.loadbalancer.arn
port = "80"
protocol = "HTTP"
default_action {
type = "forward"
target_group_arn = aws_lb_target_group.frontendhttp.arn
}
}

AWS ECS Terraform: The requested configuration is currently not supported. Launching EC2 instance failed

I have been trying to spin up ECS using terraform. About two days ago it was working as expected, however today I tried to run terraform apply and I keep getting an error saying
"The requested configuration is currently not supported. Launching EC2 instance failed"
I have researched a lot about this issue, I tried hardcoding the VPC tenancy to default, I've tried changing the region, the instance type and nothing seems to fix the issue.
The is my terraform config:
provider "aws" {
region = var.region
}
data "aws_availability_zones" "available" {}
# Define a vpc
resource "aws_vpc" "motivy_vpc" {
cidr_block = var.motivy_network_cidr
tags = {
Name = var.motivy_vpc
}
enable_dns_support = "true"
instance_tenancy = "default"
enable_dns_hostnames = "true"
}
# Internet gateway for the public subnet
resource "aws_internet_gateway" "motivy_ig" {
vpc_id = aws_vpc.motivy_vpc.id
tags = {
Name = "motivy_ig"
}
}
# Public subnet 1
resource "aws_subnet" "motivy_public_sn_01" {
vpc_id = aws_vpc.motivy_vpc.id
cidr_block = var.motivy_public_01_cidr
availability_zone = data.aws_availability_zones.available.names[0]
tags = {
Name = "motivy_public_sn_01"
}
}
# Public subnet 2
resource "aws_subnet" "motivy_public_sn_02" {
vpc_id = aws_vpc.motivy_vpc.id
cidr_block = var.motivy_public_02_cidr
availability_zone = data.aws_availability_zones.available.names[1]
tags = {
Name = "motivy_public_sn_02"
}
}
# Routing table for public subnet 1
resource "aws_route_table" "motivy_public_sn_rt_01" {
vpc_id = aws_vpc.motivy_vpc.id
route {
cidr_block = "0.0.0.0/0"
gateway_id = aws_internet_gateway.motivy_ig.id
}
tags = {
Name = "motivy_public_sn_rt_01"
}
}
# Routing table for public subnet 2
resource "aws_route_table" "motivy_public_sn_rt_02" {
vpc_id = aws_vpc.motivy_vpc.id
route {
cidr_block = "0.0.0.0/0"
gateway_id = aws_internet_gateway.motivy_ig.id
}
tags = {
Name = "motivy_public_sn_rt_02"
}
}
# Associate the routing table to public subnet 1
resource "aws_route_table_association" "motivy_public_sn_rt_01_assn" {
subnet_id = aws_subnet.motivy_public_sn_01.id
route_table_id = aws_route_table.motivy_public_sn_rt_01.id
}
# Associate the routing table to public subnet 2
resource "aws_route_table_association" "motivy_public_sn_rt_02_assn" {
subnet_id = aws_subnet.motivy_public_sn_02.id
route_table_id = aws_route_table.motivy_public_sn_rt_02.id
}
# ECS Instance Security group
resource "aws_security_group" "motivy_public_sg" {
name = "motivys_public_sg"
description = "Test public access security group"
vpc_id = aws_vpc.motivy_vpc.id
ingress {
from_port = 22
to_port = 22
protocol = "tcp"
cidr_blocks = [
"0.0.0.0/0"]
}
ingress {
from_port = 80
to_port = 80
protocol = "tcp"
cidr_blocks = [
"0.0.0.0/0"]
}
ingress {
from_port = 5000
to_port = 5000
protocol = "tcp"
cidr_blocks = [
"0.0.0.0/0"]
}
ingress {
from_port = 0
to_port = 0
protocol = "-1"
cidr_blocks = [
var.motivy_public_01_cidr,
var.motivy_public_02_cidr
]
}
egress {
# allow all traffic to private SN
from_port = "0"
to_port = "0"
protocol = "-1"
cidr_blocks = [
"0.0.0.0/0"]
}
tags = {
Name = "motivy_public_sg"
}
}
data "aws_ecs_task_definition" "motivy_server" {
task_definition = aws_ecs_task_definition.motivy_server.family
}
resource "aws_ecs_task_definition" "motivy_server" {
family = "motivy_server"
container_definitions = file("task-definitions/service.json")
}
data "aws_ami" "latest_ecs" {
most_recent = true # get the latest version
filter {
name = "name"
values = [
"amzn2-ami-ecs-*"] # ECS optimized image
}
owners = [
"amazon" # Only official images
]
}
resource "aws_launch_configuration" "ecs-launch-configuration" {
name = "ecs-launch-configuration"
image_id = data.aws_ami.latest_ecs.id
instance_type = "t2.micro"
iam_instance_profile = aws_iam_instance_profile.ecs-instance-profile.id
root_block_device {
volume_type = "standard"
volume_size = 100
delete_on_termination = true
}
enable_monitoring = true
lifecycle {
create_before_destroy = true
}
security_groups = [aws_security_group.motivy_public_sg.id]
associate_public_ip_address = "true"
key_name = var.ecs_key_pair_name
user_data = <<EOF
#!/bin/bash
echo ECS_CLUSTER=${var.ecs_cluster} >> /etc/ecs/ecs.config
EOF
}
resource "aws_appautoscaling_target" "ecs_motivy_server_target" {
max_capacity = 2
min_capacity = 1
resource_id = "service/${aws_ecs_cluster.motivy_ecs_cluster.name}/${aws_ecs_service.motivy_server_service.name}"
scalable_dimension = "ecs:service:DesiredCount"
service_namespace = "ecs"
depends_on = [ aws_ecs_service.motivy_server_service ]
}
resource "aws_iam_role" "ecs-instance-role" {
name = "ecs-instance-role"
path = "/"
assume_role_policy = data.aws_iam_policy_document.ecs-instance-policy.json
}
data "aws_iam_policy_document" "ecs-instance-policy" {
statement {
actions = ["sts:AssumeRole"]
principals {
type = "Service"
identifiers = ["ec2.amazonaws.com"]
}
}
}
resource "aws_iam_role_policy_attachment" "ecs-instance-role-attachment" {
role = aws_iam_role.ecs-instance-role.name
policy_arn = "arn:aws:iam::aws:policy/service-role/AmazonEC2ContainerServiceforEC2Role"
}
resource "aws_iam_instance_profile" "ecs-instance-profile" {
name = "ecs-instance-profile"
path = "/"
role = aws_iam_role.ecs-instance-role.id
provisioner "local-exec" {
command = "sleep 10"
}
}
resource "aws_autoscaling_group" "motivy-server-autoscaling-group" {
name = "motivy-server-autoscaling-group"
termination_policies = [
"OldestInstance" # When a “scale down” event occurs, which instances to kill first?
]
default_cooldown = 30
health_check_grace_period = 30
max_size = var.max_instance_size
min_size = var.min_instance_size
desired_capacity = var.desired_capacity
# Use this launch configuration to define “how” the EC2 instances are to be launched
launch_configuration = aws_launch_configuration.ecs-launch-configuration.name
lifecycle {
create_before_destroy = true
}
# Refer to vpc.tf for more information
# You could use the private subnets here instead,
# if you want the EC2 instances to be hidden from the internet
vpc_zone_identifier = [aws_subnet.motivy_public_sn_01.id, aws_subnet.motivy_public_sn_02.id]
tags = [{
key = "Name",
value = var.ecs_cluster,
# Make sure EC2 instances are tagged with this tag as well
propagate_at_launch = true
}]
}
resource "aws_alb" "motivy_server_alb_load_balancer" {
name = "motivy-alb-load-balancer"
security_groups = [aws_security_group.motivy_public_sg.id]
subnets = [aws_subnet.motivy_public_sn_01.id, aws_subnet.motivy_public_sn_02.id]
tags = {
Name = "motivy_server_alb_load_balancer"
}
}
resource "aws_alb_target_group" "motivy_server_target_group" {
name = "motivy-server-target-group"
port = 5000
protocol = "HTTP"
vpc_id = aws_vpc.motivy_vpc.id
deregistration_delay = "10"
health_check {
healthy_threshold = "2"
unhealthy_threshold = "6"
interval = "30"
matcher = "200,301,302"
path = "/"
protocol = "HTTP"
timeout = "5"
}
stickiness {
type = "lb_cookie"
}
tags = {
Name = "motivy-server-target-group"
}
}
resource "aws_alb_listener" "alb-listener" {
load_balancer_arn = aws_alb.motivy_server_alb_load_balancer.arn
port = "80"
protocol = "HTTP"
default_action {
target_group_arn = aws_alb_target_group.motivy_server_target_group.arn
type = "forward"
}
}
resource "aws_autoscaling_attachment" "asg_attachment_motivy_server" {
autoscaling_group_name = aws_autoscaling_group.motivy-server-autoscaling-group.id
alb_target_group_arn = aws_alb_target_group.motivy_server_target_group.arn
}
This is the exact error I get
Error: "motivy-server-autoscaling-group": Waiting up to 10m0s: Need at least 2 healthy instances in ASG, have 0. Most recent activity: {
ActivityId: "a775c531-9496-fdf9-5157-ab2448626293",
AutoScalingGroupName: "motivy-server-autoscaling-group",
Cause: "At 2020-04-05T22:10:28Z an instance was started in response to a difference between desired and actual capacity, increasing the capacity from 0 to 2.",
Description: "Launching a new EC2 instance. Status Reason: The requested configuration is currently not supported. Please check the documentation for supported configurations. Launching EC2 instance failed.",
Details: "{\"Subnet ID\":\"subnet-05de5fc0e994d05fe\",\"Availability Zone\":\"us-east-1a\"}",
EndTime: 2020-04-05 22:10:29 +0000 UTC,
Progress: 100,
StartTime: 2020-04-05 22:10:29.439 +0000 UTC,
StatusCode: "Failed",
StatusMessage: "The requested configuration is currently not supported. Please check the documentation for supported configurations. Launching EC2 instance failed."
}
I'm not sure why it worked two days ago.
But recent Amazon ECS-optimized AMIs' volume_type is gp2.
You should choose gp2 as root_block_device.volume_type.
resource "aws_launch_configuration" "ecs-launch-configuration" {
# ...
root_block_device {
volume_type = "gp2"
volume_size = 100
delete_on_termination = true
}
# ...
}
data "aws_ami" "latest_ecs" {
most_recent = true # get the latest version
filter {
name = "name"
values = ["amzn2-ami-ecs-hvm-*-x86_64-ebs"] # ECS optimized image
}
owners = [
"amazon" # Only official images
]
}
For me worked using t3 gen instead of t2

terraform - No Container Instances were found in your cluster

I deploy ecs using terraform.
When I run terraform apply everything is okay but when I browse to ecs service on events tab I have this error:
service nginx-ecs-service was unable to place a task because no container instance met all of its requirements. Reason: No Container Instances were found in your cluster.
How do I fix that? What is missing in my terraform file?
locals {
name = "myapp"
environment = "prod"
# This is the convention we use to know what belongs to each other
ec2_resources_name = "${local.name}-${local.environment}"
}
resource "aws_iam_server_certificate" "lb_cert" {
name = "lb_cert"
certificate_body = "${file("./www.example.com/cert.pem")}"
private_key = "${file("./www.example.com/privkey.pem")}"
certificate_chain = "${file("./www.example.com/chain.pem")}"
}
resource "aws_security_group" "bastion-sg" {
name = "bastion-security-group"
vpc_id = "${module.vpc.vpc_id}"
ingress {
protocol = "tcp"
from_port = 22
to_port = 22
cidr_blocks = ["0.0.0.0/0"]
}
egress {
protocol = -1
from_port = 0
to_port = 0
cidr_blocks = ["0.0.0.0/0"]
}
}
resource "aws_instance" "bastion" {
depends_on = ["aws_security_group.bastion-sg"]
ami = "ami-0d5d9d301c853a04a"
key_name = "myapp"
instance_type = "t2.micro"
vpc_security_group_ids = ["${aws_security_group.bastion-sg.id}"]
associate_public_ip_address = true
subnet_id = "${element(module.vpc.public_subnets, 0)}"
tags = {
Name = "bastion"
}
}
# VPC Definition
module "vpc" {
source = "terraform-aws-modules/vpc/aws"
version = "~> 2.0"
name = "my-vpc"
cidr = "10.1.0.0/16"
azs = ["us-east-2a", "us-east-2b", "us-east-2c"]
private_subnets = ["10.1.1.0/24", "10.1.2.0/24", "10.1.3.0/24"]
public_subnets = ["10.1.101.0/24", "10.1.102.0/24", "10.1.103.0/24"]
single_nat_gateway = true
enable_nat_gateway = true
enable_vpn_gateway = false
enable_dns_hostnames = true
public_subnet_tags = {
Name = "public"
}
private_subnet_tags = {
Name = "private"
}
public_route_table_tags = {
Name = "public-RT"
}
private_route_table_tags = {
Name = "private-RT"
}
tags = {
Environment = local.environment
Name = local.name
}
}
# ------------
resource "aws_ecs_cluster" "public-ecs-cluster" {
name = "myapp-${local.environment}"
lifecycle {
create_before_destroy = true
}
}
resource "aws_security_group" "ecs-vpc-secgroup" {
name = "ecs-vpc-secgroup"
description = "ecs-vpc-secgroup"
# vpc_id = "vpc-b8daecde"
vpc_id = "${module.vpc.vpc_id}"
ingress {
from_port = 0
to_port = 65535
protocol = "tcp"
cidr_blocks = ["0.0.0.0/0"]
}
egress {
from_port = 0
to_port = 0
protocol = "-1"
cidr_blocks = ["0.0.0.0/0"]
}
tags = {
Name = "ecs-security-group"
}
}
resource "aws_lb" "nginx-ecs-alb" {
name = "nginx-ecs-alb"
internal = false
load_balancer_type = "application"
subnets = module.vpc.public_subnets
security_groups = ["${aws_security_group.ecs-vpc-secgroup.id}"]
}
resource "aws_alb_target_group" "nginx-ecs-tg" {
name = "nginx-ecs-tg"
port = "80"
protocol = "HTTP"
vpc_id = "${module.vpc.vpc_id}"
health_check {
healthy_threshold = 3
unhealthy_threshold = 10
timeout = 5
interval = 10
path = "/"
}
depends_on = ["aws_lb.nginx-ecs-alb"]
}
resource "aws_alb_listener" "alb_listener" {
load_balancer_arn = "${aws_lb.nginx-ecs-alb.arn}"
port = "80"
protocol = "HTTP"
default_action {
target_group_arn = "${aws_alb_target_group.nginx-ecs-tg.arn}"
type = "forward"
}
}
resource "aws_ecs_task_definition" "nginx-image" {
family = "nginx-server"
network_mode = "bridge"
container_definitions = <<DEFINITION
[
{
"name": "nginx-web",
"image": "nginx:latest",
"essential": true,
"portMappings": [
{
"containerPort": 80,
"hostPort": 0,
"protocol": "tcp"
}
],
"memory": 128,
"cpu": 10
}
]
DEFINITION
}
data "aws_ecs_task_definition" "nginx-image" {
depends_on = ["aws_ecs_task_definition.nginx-image"]
task_definition = "${aws_ecs_task_definition.nginx-image.family}"
}
resource "aws_launch_configuration" "ecs-launch-configuration" {
name = "ecs-launch-configuration"
image_id = "ami-0d5d9d301c853a04a"
instance_type = "t2.micro"
iam_instance_profile = "ecsInstanceRole"
root_block_device {
volume_type = "standard"
volume_size = 35
delete_on_termination = true
}
security_groups = ["${aws_security_group.ecs-vpc-secgroup.id}"]
associate_public_ip_address = "true"
key_name = "myapp"
user_data = <<-EOF
#!/bin/bash
echo ECS_CLUSTER=${aws_ecs_cluster.public-ecs-cluster.name} >> /etc/ecs/ecs.config
EOF
}
resource "aws_autoscaling_group" "ecs-autoscaling-group" {
name = "ecs-autoscaling-group"
max_size = "1"
min_size = "1"
desired_capacity = "1"
# vpc_zone_identifier = ["subnet-5c66053a", "subnet-9cd1a2d4"]
vpc_zone_identifier = module.vpc.public_subnets
launch_configuration = "${aws_launch_configuration.ecs-launch-configuration.name}"
health_check_type = "EC2"
default_cooldown = "300"
lifecycle {
create_before_destroy = true
}
tag {
key = "Name"
value = "wizardet972_ecs-instance"
propagate_at_launch = true
}
tag {
key = "Owner"
value = "Wizardnet972"
propagate_at_launch = true
}
}
resource "aws_autoscaling_policy" "ecs-scale" {
name = "ecs-scale-policy"
policy_type = "TargetTrackingScaling"
autoscaling_group_name = "${aws_autoscaling_group.ecs-autoscaling-group.name}"
estimated_instance_warmup = 60
target_tracking_configuration {
predefined_metric_specification {
predefined_metric_type = "ASGAverageCPUUtilization"
}
target_value = "70"
}
}
resource "aws_ecs_service" "nginx-ecs-service" {
name = "nginx-ecs-service"
cluster = "${aws_ecs_cluster.public-ecs-cluster.id}"
task_definition = "${aws_ecs_task_definition.nginx-image.family}:${max("${aws_ecs_task_definition.nginx-image.revision}", "${aws_ecs_task_definition.nginx-image.revision}")}"
launch_type = "EC2"
desired_count = 1
load_balancer {
target_group_arn = "${aws_alb_target_group.nginx-ecs-tg.arn}"
container_name = "nginx-web"
container_port = 80
}
depends_on = ["aws_ecs_task_definition.nginx-image"]
}
Update:
I tried to create the terraform stack you shared with me, I was able to reproduce the issue.
The issue was, The ec2 instance was unhealthy and the autoscaling group was continuously terminating the instance and launch a new one.
the solution was to remove the following configuration.I think the volume_type standard was causing trouble.
root_block_device {
volume_type = "standard"
volume_size = 100
delete_on_termination = true
}
See if you have done the basic steps to prepare the ec2 instance. You should use an ecs-optimized ami to create the instance and then attach the AmazonEC2ContainerServiceforEC2Role permission to IAM role.
Reference:
AWS ECS Error when running task: No Container Instances were found in your cluster
setup instance role