I am quite desperate with an issue very similar to the one described into this thread.
https://github.com/OpenDroneMap/opendronemap-ecs/issues/14#issuecomment-432004023
When I attach the network interface to my EC2 instance, so that my custom VPC is used instead of the default one, the EC2 instance no longer joins the ECS cluster.
This is my terraform definition.
provider "aws" {}
resource "aws_vpc" "main" {
cidr_block = "10.0.0.0/16"
enable_dns_support = true
enable_dns_hostnames = true
assign_generated_ipv6_cidr_block = true
}
resource "aws_internet_gateway" "main" {
vpc_id = aws_vpc.main.id
}
resource "aws_subnet" "main" {
vpc_id = aws_vpc.main.id
cidr_block = "10.0.0.0/16"
availability_zone = "us-west-2a"
map_public_ip_on_launch = true
}
resource "aws_route_table" "main" {
vpc_id = aws_vpc.main.id
}
resource "aws_route_table_association" "rta1" {
subnet_id = aws_subnet.main.id
route_table_id = aws_route_table.main.id
}
resource "aws_route_table_association" "rta2" {
gateway_id = aws_internet_gateway.main.id
route_table_id = aws_route_table.main.id
}
resource "aws_security_group" "sg-jenkins" {
name = "sg_jenkins"
description = "Allow inbound traffic for Jenkins instance"
vpc_id = aws_vpc.main.id
ingress = [
{
description = "inbound all"
from_port = 0
to_port = 0
protocol = "-1"
cidr_blocks = ["0.0.0.0/0"]
ipv6_cidr_blocks = ["::/0"]
self = null
prefix_list_ids = null
security_groups = null
}
]
egress = [
{
description = "outbound all"
from_port = 0
to_port = 0
protocol = "-1"
cidr_blocks = ["0.0.0.0/0"]
ipv6_cidr_blocks = ["::/0"]
self = null
prefix_list_ids = null
security_groups = null
}
]
}
resource "aws_network_interface" "main" {
subnet_id = aws_subnet.main.id
security_groups = [aws_security_group.sg-jenkins.id]
}
resource "aws_instance" "ec2_instance" {
ami = "ami-07764a7d8502d36a2"
instance_type = "t2.micro"
iam_instance_profile = "ecsInstanceRole"
key_name = "fran"
network_interface {
device_index = 0
network_interface_id = aws_network_interface.main.id
}
user_data = <<EOF
#!/bin/bash
echo ECS_CLUSTER=cluster >> /etc/ecs/ecs.config
EOF
depends_on = [aws_internet_gateway.main]
}
### Task definition
resource "aws_ecs_task_definition" "jenkins-task" {
family = "namespace"
container_definitions = jsonencode([
{
name = "jenkins"
image = "cnservices/jenkins-master"
cpu = 10
memory = 512
essential = true
portMappings = [
{
containerPort = 8080
hostPort = 8080
}
]
}
])
# network_mode = "awsvpc"
volume {
name = "service-storage"
host_path = "/ecs/service-storage"
}
placement_constraints {
type = "memberOf"
expression = "attribute:ecs.availability-zone in [us-west-2a]"
}
}
### Cluster
resource "aws_ecs_cluster" "cluster" {
name = "cluster"
setting {
name = "containerInsights"
value = "enabled"
}
}
### Service
resource "aws_ecs_service" "jenkins-service" {
name = "jenkins-service"
cluster = aws_ecs_cluster.cluster.id
task_definition = aws_ecs_task_definition.jenkins-task.arn
desired_count = 1
# iam_role = aws_iam_role.foo.arn
# depends_on = [aws_iam_role_policy.foo]
# network_configuration {
# security_groups = [aws_security_group.sg-jenkins.id]
# subnets = [aws_subnet.main.id]
# }
ordered_placement_strategy {
type = "binpack"
field = "cpu"
}
placement_constraints {
type = "memberOf"
expression = "attribute:ecs.availability-zone in [us-west-2a]"
}
}
You haven't created a route to your IGW. Thus your instance can't connect to the ECS service to register with your cluster. So remove rta2 and add a route:
# not needed. to be removed.
# resource "aws_route_table_association" "rta2" {
# gateway_id = aws_internet_gateway.main.id
# route_table_id = aws_route_table.main.id
# }
# add a missing route to the IGW
resource "aws_route" "r" {
route_table_id = aws_route_table.main.id
gateway_id = aws_internet_gateway.main.id
destination_cidr_block = "0.0.0.0/0"
}
Related
Hi i'm beginner and i'm trying to play with VPC on AWS and Terraform and i'm stuck on ALB health check issue
I have 2 az with a ec2 and a webserver on each ec2 my goal is to setup the load balancer and be able to be redirected on one of my ec2, my ec2 are on a private subnet and a Nat Gatway and elastic IP
I've tried to setup a bastion host to check on the ssh if my ec2 was well link to internet and the answer is yes
this is my setup terraform : ( baybe there is an obvious error that i haven't seen )
terraform {
required_providers {
aws = {
source = "hashicorp/aws"
version = "~> 4.0"
}
}
}
provider "aws" {
shared_credentials_file = "./aws/credentials"
region = "us-east-1"
}
resource "aws_vpc" "main" {
cidr_block = "10.0.0.0/16"
enable_dns_hostnames = true
enable_dns_support = true
tags = {
Name = "my-vpc"
}
}
resource "aws_internet_gateway" "main" {
vpc_id = aws_vpc.main.id
tags = {
Name = "my-internet-gateway"
}
}
resource "aws_subnet" "public_a" {
vpc_id = aws_vpc.main.id
cidr_block = "10.0.1.0/24"
availability_zone = "us-east-1a"
map_public_ip_on_launch = true
tags = {
Name = "my-public-a-subnet"
}
}
resource "aws_subnet" "public_b" {
vpc_id = aws_vpc.main.id
cidr_block = "10.0.2.0/24"
availability_zone = "us-east-1b"
map_public_ip_on_launch = true
tags = {
Name = "my-public-b-subnet"
}
}
resource "aws_subnet" "private_a" {
vpc_id = aws_vpc.main.id
cidr_block = "10.0.3.0/24"
availability_zone = "us-east-1a"
tags = {
Name = "my-private-a-subnet"
}
}
resource "aws_subnet" "private_b" {
vpc_id = aws_vpc.main.id
cidr_block = "10.0.4.0/24"
availability_zone = "us-east-1b"
tags = {
Name = "my-private-b-subnet"
}
}
resource "aws_nat_gateway" "main" {
allocation_id = aws_eip.main.id
subnet_id = aws_subnet.public_a.id
}
resource "aws_eip" "main" {
vpc = true
tags = {
Name = "my-nat-gateway-eip"
}
}
resource "aws_security_group" "main" {
name = "my-security-group"
description = "Allow HTTP and SSH access"
vpc_id = aws_vpc.main.id
ingress {
from_port = 80
to_port = 80
protocol = "tcp"
cidr_blocks = ["0.0.0.0/0"]
}
ingress {
from_port = 22
to_port = 22
protocol = "tcp"
cidr_blocks = ["0.0.0.0/0"]
}
egress {
from_port = 0
to_port = 0
protocol = "-1"
cidr_blocks = ["0.0.0.0/0"]
}
tags = {
Name = "my-security-group"
}
}
resource "aws_instance" "ec2_a" {
ami = "ami-0c2b8ca1dad447f8a"
instance_type = "t2.micro"
subnet_id = aws_subnet.private_a.id
vpc_security_group_ids = [aws_security_group.main.id]
tags = {
Name = "my-ec2-a"
}
key_name = "vockey"
user_data = file("user_data.sh")
}
resource "aws_instance" "ec2_b" {
ami = "ami-0c2b8ca1dad447f8a"
instance_type = "t2.micro"
subnet_id = aws_subnet.private_b.id
vpc_security_group_ids = [aws_security_group.main.id]
tags = {
Name = "my-ec2-b"
}
key_name = "vockey"
user_data = file("user_data.sh")
}
resource "aws_instance" "bastion" {
ami = "ami-0c2b8ca1dad447f8a"
instance_type = "t2.micro"
subnet_id = aws_subnet.public_a.id
vpc_security_group_ids = [aws_security_group.main.id]
tags = {
Name = "my-bastion"
}
key_name = "vockey"
user_data = file("user_data_bastion.sh")
}
resource "aws_alb" "main" {
name = "my-alb"
internal = false
security_groups = [aws_security_group.main.id]
subnets = [aws_subnet.public_a.id, aws_subnet.public_b.id]
tags = {
Name = "my-alb"
}
}
resource "aws_alb_target_group" "ec2" {
name = "my-alb-target-group"
port = 80
protocol = "HTTP"
vpc_id = aws_vpc.main.id
tags = {
Name = "my-alb-target-group"
}
}
resource "aws_route_table" "private" {
vpc_id = aws_vpc.main.id
route {
cidr_block = "0.0.0.0/0"
gateway_id = aws_internet_gateway.main.id
}
route {
cidr_block = "0.0.0.0/0"
gateway_id = aws_nat_gateway.main.id
}
tags = {
Name = "my-private-route-table"
}
}
resource "aws_route_table_association" "private_a" {
subnet_id = aws_subnet.private_a.id
route_table_id = aws_route_table.private.id
}
resource "aws_route_table_association" "private_b" {
subnet_id = aws_subnet.private_b.id
route_table_id = aws_route_table.private.id
}
resource "aws_route_table" "public" {
vpc_id = aws_vpc.main.id
route {
cidr_block = "0.0.0.0/0"
gateway_id = aws_internet_gateway.main.id
}
tags = {
Name = "My Public Route Table"
}
}
resource "aws_route_table_association" "public_a" {
subnet_id = aws_subnet.public_a.id
route_table_id = aws_route_table.public.id
}
resource "aws_route_table_association" "public_b" {
subnet_id = aws_subnet.public_b.id
route_table_id = aws_route_table.public.id
}
resource "aws_alb_listener" "main" {
load_balancer_arn = aws_alb.main.arn
port = "80"
protocol = "HTTP"
default_action {
target_group_arn = aws_alb_target_group.ec2.arn
type = "forward"
}
}
resource "aws_alb_target_group_attachment" "ec2_a" {
target_group_arn = aws_alb_target_group.ec2.arn
target_id = aws_instance.ec2_a.id
port = 80
}
resource "aws_alb_target_group_attachment" "ec2_b" {
target_group_arn = aws_alb_target_group.ec2.arn
target_id = aws_instance.ec2_b.id
port = 80
}
It looks like you don't have a health_check block on the aws_alb_target_group resource. Try adding something like this:
resource "aws_alb_target_group" "ec2" {
name = "my-alb-target-group"
port = 80
protocol = "HTTP"
vpc_id = aws_vpc.main.id
health_check {
path = "/"
matcher = "200"
}
tags = {
Name = "my-alb-target-group"
}
}
Also, make sure that the HTTP services on your EC2 instances are listening and accepting connections on port 80. You should be able to curl http://<ec2 ip address> with a 200 response.
I got the following setup to create the networking requirements for a Fargate setup:
resource "aws_vpc" "main" {
cidr_block = var.cidr
tags = {
Environment = var.environment
DO_NOT_DELETE = true
CreatedBy = "terraform"
}
}
resource "aws_internet_gateway" "main" {
vpc_id = aws_vpc.main.id
tags = {
Environment = var.environment
CreatedBy = "terraform"
Vpc = aws_vpc.main.id
}
}
data "aws_availability_zones" "region_azs" {
state = "available"
}
locals {
az_count = length(data.aws_availability_zones.region_azs.names)
}
resource "aws_subnet" "private" {
vpc_id = aws_vpc.main.id
cidr_block = cidrsubnet(aws_vpc.main.cidr_block, 4, count.index)
availability_zone = data.aws_availability_zones.region_azs.names[count.index]
count = local.az_count
tags = {
Name = "public-subnet-${data.aws_availability_zones.region_azs.names[count.index]}"
AvailabilityZone = data.aws_availability_zones.region_azs.names[count.index]
Environment = var.environment
CreatedBy = "terraform"
Vpc = aws_vpc.main.id
Type = "private"
DO_NOT_DELETE = true
}
}
resource "aws_subnet" "public" {
vpc_id = aws_vpc.main.id
cidr_block = cidrsubnet(aws_vpc.main.cidr_block, 4, count.index + local.az_count )
availability_zone = data.aws_availability_zones.region_azs.names[count.index]
count = local.az_count
map_public_ip_on_launch = true
tags = {
Environment = var.environment
CreatedBy = "terraform"
Vpc = aws_vpc.main.id
DO_NOT_DELETE = true
Type = "public"
}
}
resource "aws_route_table" "public" {
vpc_id = aws_vpc.main.id
tags = {
Environment = var.environment
CreatedBy = "terraform"
Vpc = aws_vpc.main.id
Type = "public"
}
}
resource "aws_route" "public" {
route_table_id = aws_route_table.public.id
destination_cidr_block = "0.0.0.0/0"
gateway_id = aws_internet_gateway.main.id
}
resource "aws_route_table_association" "public" {
count = local.az_count
subnet_id = element(aws_subnet.public.*.id, count.index)
route_table_id = aws_route_table.public.id
}
resource "aws_nat_gateway" "main" {
count = local.az_count
allocation_id = element(aws_eip.nat.*.id, count.index)
subnet_id = element(aws_subnet.public.*.id, count.index)
depends_on = [aws_internet_gateway.main]
tags = {
Environment = var.environment
CreatedBy = "terraform"
Vpc = aws_vpc.main.id
}
}
resource "aws_eip" "nat" {
count = local.az_count
vpc = true
tags = {
CreatedBy = "terraform"
Vpc = aws_vpc.main.id
}
}
resource "aws_route_table" "private" {
count = local.az_count
vpc_id = aws_vpc.main.id
tags = {
Environment = var.environment
CreatedBy = "terraform"
Type = "private"
Vpc = aws_vpc.main.id
}
}
resource "aws_route" "private" {
count = local.az_count
route_table_id = element(aws_route_table.private.*.id, count.index)
destination_cidr_block = "0.0.0.0/0"
nat_gateway_id = element(aws_nat_gateway.main.*.id, count.index)
}
resource "aws_route_table_association" "private" {
count = local.az_count
subnet_id = element(aws_subnet.private.*.id, count.index)
route_table_id = element(aws_route_table.private.*.id, count.index)
}
resource "aws_security_group" "alb" {
name = "${var.resources_name_prefix}-alb-sg"
vpc_id = aws_vpc.main.id
ingress {
protocol = "tcp"
from_port = 80
to_port = 80
cidr_blocks = ["0.0.0.0/0"]
ipv6_cidr_blocks = ["::/0"]
}
ingress {
protocol = "tcp"
from_port = 443
to_port = 443
cidr_blocks = ["0.0.0.0/0"]
ipv6_cidr_blocks = ["::/0"]
}
egress {
protocol = "-1"
from_port = 0
to_port = 0
cidr_blocks = ["0.0.0.0/0"]
ipv6_cidr_blocks = ["::/0"]
}
tags = {
Environment = var.environment
CreatedBy = "terraform"
Vpc = aws_vpc.main.id
}
}
resource "aws_security_group" "ecs_tasks" {
name = "${var.resources_name_prefix}-ecs-sg"
vpc_id = aws_vpc.main.id
ingress {
protocol = "tcp"
from_port = 3000
to_port = 3000
cidr_blocks = ["0.0.0.0/0"]
ipv6_cidr_blocks = ["::/0"]
}
egress {
protocol = "-1"
from_port = 0
to_port = 0
cidr_blocks = ["0.0.0.0/0"]
ipv6_cidr_blocks = ["::/0"]
}
tags = {
Environment = var.environment
CreatedBy = "terraform"
Vpc = aws_vpc.main.id
}
}
This is been working great for a couple availability zones, but now that I'm dynamically creating subnets for running tasks in every AZ per region, I'm reaching the limit of Elastic IP's per region.
So I'm getting this erorr while trying to create the stack:
Error creating EIP: AddressLimitExceeded: The maximum number of addresses has been reached.
status code: 400
I'm wodering if the following part:
resource "aws_nat_gateway" "main" {
count = local.az_count
allocation_id = element(aws_eip.nat.*.id, count.index)
subnet_id = element(aws_subnet.public.*.id, count.index)
depends_on = [aws_internet_gateway.main]
tags = {
Environment = var.environment
CreatedBy = "terraform"
Vpc = aws_vpc.main.id
}
}
resource "aws_eip" "nat" {
count = local.az_count
vpc = true
tags = {
CreatedBy = "terraform"
Vpc = aws_vpc.main.id
}
}
Could be structured to use a single EIP and routing internally, if this makes sense.
I modified your code a bit, but it's a mess. For example all private subnets are called "public". It creates two NATs now. Obviously if you have subnets in, lets say, 6 AZs, there will be some cross-AZ traffic to get to those NATs.
Alternatively, simply don't create VPCs spanning so many AZs. Typically only two-three AZs are used for a VPC. Having more than that is not really needed.
Finally, you can request AWS support to give your more EIPs, if you want to preserve your original setup.
resource "aws_vpc" "main" {
cidr_block = var.cidr
tags = {
Environment = var.environment
DO_NOT_DELETE = true
CreatedBy = "terraform"
}
}
resource "aws_internet_gateway" "main" {
vpc_id = aws_vpc.main.id
tags = {
Environment = var.environment
CreatedBy = "terraform"
Vpc = aws_vpc.main.id
}
}
data "aws_availability_zones" "region_azs" {
state = "available"
}
locals {
az_count = length(data.aws_availability_zones.region_azs.names)
}
resource "aws_subnet" "private" {
vpc_id = aws_vpc.main.id
cidr_block = cidrsubnet(aws_vpc.main.cidr_block, 4, count.index)
availability_zone = data.aws_availability_zones.region_azs.names[count.index]
count = local.az_count
tags = {
Name = "private-subnet-${data.aws_availability_zones.region_azs.names[count.index]}"
AvailabilityZone = data.aws_availability_zones.region_azs.names[count.index]
Environment = var.environment
CreatedBy = "terraform"
Vpc = aws_vpc.main.id
Type = "private"
DO_NOT_DELETE = true
}
}
resource "aws_subnet" "public" {
vpc_id = aws_vpc.main.id
cidr_block = cidrsubnet(aws_vpc.main.cidr_block, 4, count.index + local.az_count )
availability_zone = data.aws_availability_zones.region_azs.names[count.index]
count = local.az_count
map_public_ip_on_launch = true
tags = {
Name = "public-subnet-${data.aws_availability_zones.region_azs.names[count.index]}"
Environment = var.environment
CreatedBy = "terraform"
Vpc = aws_vpc.main.id
DO_NOT_DELETE = true
Type = "public"
}
}
resource "aws_route_table" "public" {
vpc_id = aws_vpc.main.id
tags = {
Environment = var.environment
CreatedBy = "terraform"
Vpc = aws_vpc.main.id
Type = "public"
}
}
resource "aws_route" "public" {
route_table_id = aws_route_table.public.id
destination_cidr_block = "0.0.0.0/0"
gateway_id = aws_internet_gateway.main.id
}
resource "aws_route_table_association" "public" {
count = local.az_count
subnet_id = element(aws_subnet.public.*.id, count.index)
route_table_id = aws_route_table.public.id
}
resource "aws_nat_gateway" "main" {
count = 2
allocation_id = element(aws_eip.nat.*.id, count.index)
subnet_id = element(aws_subnet.public.*.id, count.index)
depends_on = [aws_internet_gateway.main]
tags = {
Environment = var.environment
CreatedBy = "terraform"
Vpc = aws_vpc.main.id
}
}
resource "aws_eip" "nat" {
count = 2
vpc = true
tags = {
CreatedBy = "terraform"
Vpc = aws_vpc.main.id
}
}
resource "aws_route_table" "private" {
count = local.az_count
vpc_id = aws_vpc.main.id
tags = {
Environment = var.environment
CreatedBy = "terraform"
Type = "private"
Vpc = aws_vpc.main.id
}
}
resource "aws_route" "private" {
count = local.az_count
route_table_id = element(aws_route_table.private.*.id, count.index)
destination_cidr_block = "0.0.0.0/0"
nat_gateway_id = element(aws_nat_gateway.main.*.id, count.index)
}
resource "aws_route_table_association" "private" {
count = local.az_count
subnet_id = element(aws_subnet.private.*.id, count.index)
route_table_id = element(aws_route_table.private.*.id, count.index)
}
resource "aws_security_group" "alb" {
name = "${var.resources_name_prefix}-alb-sg"
vpc_id = aws_vpc.main.id
ingress {
protocol = "tcp"
from_port = 80
to_port = 80
cidr_blocks = ["0.0.0.0/0"]
ipv6_cidr_blocks = ["::/0"]
}
ingress {
protocol = "tcp"
from_port = 443
to_port = 443
cidr_blocks = ["0.0.0.0/0"]
ipv6_cidr_blocks = ["::/0"]
}
egress {
protocol = "-1"
from_port = 0
to_port = 0
cidr_blocks = ["0.0.0.0/0"]
ipv6_cidr_blocks = ["::/0"]
}
tags = {
Environment = var.environment
CreatedBy = "terraform"
Vpc = aws_vpc.main.id
}
}
resource "aws_security_group" "ecs_tasks" {
name = "${var.resources_name_prefix}-ecs-sg"
vpc_id = aws_vpc.main.id
ingress {
protocol = "tcp"
from_port = 3000
to_port = 3000
cidr_blocks = ["0.0.0.0/0"]
ipv6_cidr_blocks = ["::/0"]
}
egress {
protocol = "-1"
from_port = 0
to_port = 0
cidr_blocks = ["0.0.0.0/0"]
ipv6_cidr_blocks = ["::/0"]
}
tags = {
Environment = var.environment
CreatedBy = "terraform"
Vpc = aws_vpc.main.id
}
}
I'm using Terraform and I'm having a tricky time with connecting my autoscaled AWS EC2 instances to the internet. I can launch a standalone EC2 that connects with no difficulty, but when I visit the public IP addresses of my instances created with an autoscaling group I get "This site can’t be reached xxx.xx.xxx.xxx unexpectedly closed the connection."
The main difference I'm seeing is that I can specify a network interface with an EC2, but I'm not sure how this would work with my launch template. My instances launch into different subnets in different availability zones, and the template is as follows:
provider "aws" {
region = "us-east-1"
access_key = "xxxxx"
secret_key = "xxxxx"
}
data "template_file" "testfile" {
template = <<EOF
#!/bin/bash
sudo apt update -y
sudo apt install apache2 -y
sudo systemct1 start apache2
sudo bash -c 'echo hello, world! > var/www/html/index.html'
EOF
}
resource "aws_vpc" "first_vpc" {
cidr_block = "10.0.0.0/16"
tags = {
Name = "prod-vpc"
}
}
resource "aws_internet_gateway" "gw" {
vpc_id = aws_vpc.first_vpc.id
tags = {
Name = "prod-igw"
}
}
resource "aws_route_table" "prod_route_table" {
vpc_id = aws_vpc.first_vpc.id
route {
cidr_block = "0.0.0.0/0"
gateway_id = aws_internet_gateway.gw.id
}
route {
ipv6_cidr_block = "::/0"
gateway_id = aws_internet_gateway.gw.id
}
tags = {
Name = "prod-rt"
}
}
resource "aws_subnet" "subnet_1" {
vpc_id = aws_vpc.first_vpc.id
cidr_block = "10.0.1.0/24"
availability_zone = "us-east-1a"
map_public_ip_on_launch = true
tags = {
Name = "prod-subnet-1"
Tier = "public"
}
}
resource "aws_subnet" "subnet_2" {
vpc_id = aws_vpc.first_vpc.id
cidr_block = "10.0.2.0/24"
availability_zone = "us-east-1b"
map_public_ip_on_launch = true
tags = {
Name = "prod-subnet-2"
Tier = "public"
}
}
resource "aws_subnet" "subnet_3" {
vpc_id = aws_vpc.first_vpc.id
cidr_block = "10.0.3.0/24"
availability_zone = "us-east-1c"
map_public_ip_on_launch = true
tags = {
Name = "prod-subnet-3"
Tier = "public"
}
}
resource "aws_route_table_association" "a" {
subnet_id = aws_subnet.subnet_1.id
route_table_id = aws_route_table.prod_route_table.id
}
resource "aws_route_table_association" "b" {
subnet_id = aws_subnet.subnet_2.id
route_table_id = aws_route_table.prod_route_table.id
}
resource "aws_route_table_association" "c" {
subnet_id = aws_subnet.subnet_3.id
route_table_id = aws_route_table.prod_route_table.id
}
resource "aws_security_group" "allow_web" {
name = "allow_web"
description = "Allow web inbound traffic"
vpc_id = aws_vpc.first_vpc.id
ingress {
description = "HTTPS from VPC"
from_port = 443
to_port = 443
protocol = "tcp"
cidr_blocks = ["0.0.0.0/0"]
}
ingress {
description = "HTTP from VPC"
from_port = 80
to_port = 80
protocol = "tcp"
cidr_blocks = ["0.0.0.0/0"]
}
egress {
from_port = 0
to_port = 0
protocol = "-1"
cidr_blocks = ["0.0.0.0/0"]
ipv6_cidr_blocks = ["::/0"]
}
tags = {
Name = "allow_tls"
}
}
resource "aws_launch_template" "frontend" {
name = "frontend"
image_id = "ami-0ee02acd56a52998e"
instance_type = "t2.micro"
vpc_security_group_ids = [aws_security_group.allow_web.id]
network_interfaces {
device_index = 0
associate_public_ip_address = true
}
user_data = base64encode(data.template_file.testfile.rendered)
}
resource "aws_lb" "loadbalancer" {
name = "loadbalancer"
internal = false
load_balancer_type = "application"
security_groups = [aws_security_group.allow_web.id]
subnets = [aws_subnet.subnet_1.id, aws_subnet.subnet_2.id, aws_subnet.subnet_3.id]
tags = {
Environment = "production"
}
}
resource "aws_autoscaling_group" "as_group_1" {
vpc_zone_identifier = [aws_subnet.subnet_1.id, aws_subnet.subnet_2.id, aws_subnet.subnet_3.id]
desired_capacity = 3
max_size = 5
min_size = 2
target_group_arns = [aws_lb_target_group.frontendhttp.arn]
launch_template {
id = aws_launch_template.frontend.id
version = "$Latest"
}
}
resource "aws_lb_target_group" "frontendhttp" {
name = "frontendhttp"
port = 80
protocol = "HTTP"
vpc_id = aws_vpc.first_vpc.id
}
resource "aws_lb_listener" "frontendhttp" {
load_balancer_arn = aws_lb.loadbalancer.arn
port = "80"
protocol = "HTTP"
default_action {
type = "forward"
target_group_arn = aws_lb_target_group.frontendhttp.arn
}
}
#Test standalone instance
resource "aws_network_interface" "web_server_1" {
subnet_id = aws_subnet.subnet_1.id
private_ips = ["10.0.1.50"]
security_groups = [aws_security_group.allow_web.id]
}
resource "aws_instance" "ubuntu-1" {
ami = "ami-0ee02acd56a52998e"
instance_type = "t2.micro"
availability_zone = "us-east-1a" #hardcoded to ensure that subnet and instance are in same availability availability zone
network_interface {
device_index = 0
network_interface_id = aws_network_interface.web_server_1.id
}
user_data = <<-EOF
#!/bin/bash
sudo apt update -y
sudo apt install apache2 -y
sudo systemct1 start apache2
sudo bash -c 'echo hello! > var/www/html/index.html'
EOF
tags = {
Name = "web-server"
}
}
I modified a bit your template (user data, its indentation and aws_launch_template), and now it works now. It will work only over HTTP, as you don't have HTTPS setup, so don't need SG rules for HTTPS.
data "template_file" "testfile" {
template = <<EOF
#!/bin/bash
apt update -y
apt install apache2 -y
systemct1 start apache2
echo "hello, world!" > var/www/html/index.html
EOF
}
resource "aws_vpc" "first_vpc" {
cidr_block = "10.0.0.0/16"
tags = {
Name = "prod-vpc"
}
}
resource "aws_internet_gateway" "gw" {
vpc_id = aws_vpc.first_vpc.id
tags = {
Name = "prod-igw"
}
}
resource "aws_route_table" "prod_route_table" {
vpc_id = aws_vpc.first_vpc.id
route {
cidr_block = "0.0.0.0/0"
gateway_id = aws_internet_gateway.gw.id
}
route {
ipv6_cidr_block = "::/0"
gateway_id = aws_internet_gateway.gw.id
}
tags = {
Name = "prod-rt"
}
}
resource "aws_subnet" "subnet_1" {
vpc_id = aws_vpc.first_vpc.id
cidr_block = "10.0.1.0/24"
availability_zone = "us-east-1a"
map_public_ip_on_launch = true
tags = {
Name = "prod-subnet-1"
Tier = "public"
}
}
resource "aws_subnet" "subnet_2" {
vpc_id = aws_vpc.first_vpc.id
cidr_block = "10.0.2.0/24"
availability_zone = "us-east-1b"
map_public_ip_on_launch = true
tags = {
Name = "prod-subnet-2"
Tier = "public"
}
}
resource "aws_subnet" "subnet_3" {
vpc_id = aws_vpc.first_vpc.id
cidr_block = "10.0.3.0/24"
availability_zone = "us-east-1c"
map_public_ip_on_launch = true
tags = {
Name = "prod-subnet-3"
Tier = "public"
}
}
resource "aws_route_table_association" "a" {
subnet_id = aws_subnet.subnet_1.id
route_table_id = aws_route_table.prod_route_table.id
}
resource "aws_route_table_association" "b" {
subnet_id = aws_subnet.subnet_2.id
route_table_id = aws_route_table.prod_route_table.id
}
resource "aws_route_table_association" "c" {
subnet_id = aws_subnet.subnet_3.id
route_table_id = aws_route_table.prod_route_table.id
}
resource "aws_security_group" "allow_web" {
name = "allow_web"
description = "Allow web inbound traffic"
vpc_id = aws_vpc.first_vpc.id
ingress {
description = "HTTP from VPC"
from_port = 80
to_port = 80
protocol = "tcp"
cidr_blocks = ["0.0.0.0/0"]
}
egress {
from_port = 0
to_port = 0
protocol = "-1"
cidr_blocks = ["0.0.0.0/0"]
ipv6_cidr_blocks = ["::/0"]
}
tags = {
Name = "allow_http"
}
}
resource "aws_launch_template" "frontend" {
name = "frontend"
image_id = "ami-0ee02acd56a52998e"
instance_type = "t2.micro"
vpc_security_group_ids = [aws_security_group.allow_web.id]
# DONT NEED THIS
# network_interfaces {
# device_index = 0
# associate_public_ip_address = true
# }
user_data = base64encode(data.template_file.testfile.rendered)
}
resource "aws_lb" "loadbalancer" {
name = "loadbalancer"
internal = false
load_balancer_type = "application"
security_groups = [aws_security_group.allow_web.id]
subnets = [aws_subnet.subnet_1.id, aws_subnet.subnet_2.id, aws_subnet.subnet_3.id]
tags = {
Environment = "production"
}
}
resource "aws_autoscaling_group" "as_group_1" {
vpc_zone_identifier = [aws_subnet.subnet_1.id, aws_subnet.subnet_2.id, aws_subnet.subnet_3.id]
desired_capacity = 3
max_size = 5
min_size = 2
target_group_arns = [aws_lb_target_group.frontendhttp.arn]
launch_template {
id = aws_launch_template.frontend.id
version = "$Latest"
}
}
resource "aws_lb_target_group" "frontendhttp" {
name = "frontendhttp"
port = 80
protocol = "HTTP"
vpc_id = aws_vpc.first_vpc.id
}
resource "aws_lb_listener" "frontendhttp" {
load_balancer_arn = aws_lb.loadbalancer.arn
port = "80"
protocol = "HTTP"
default_action {
type = "forward"
target_group_arn = aws_lb_target_group.frontendhttp.arn
}
}
I would like to ask for assistance.
I wrote terraform script which is creating 5 EC2 instances but I am not able to ping or SSH them.
Do you see any potential issue with this? I have opened icmp, ssh, not when I checked form other computers/sites I get port is closed.
When I create manually EC2 is working from my computer, I am able to ssh/ping, but not with this terraform script.
provider "aws" {
version = "~> 3.0"
region = "us-east-1"
access_key = "AKxxxxxxxxxxx"
secret_key = "2CLBj/s9dC5r52Y"
}
# Create a VPC
resource "aws_vpc" "BrokenByteVPC" {
cidr_block = "192.168.100.0/28"
tags = {
Name = "BrokenByteVPC"
}
}
resource "aws_subnet" "BrokenbyteLB-subnet" {
vpc_id = aws_vpc.BrokenByteVPC.id
cidr_block = "192.168.100.0/28"
availability_zone = "us-east-1a"
tags = {
Name = "BrokenbyteLB-subnet"
}
}
resource "aws_internet_gateway" "BrokenByte-gateway" {
vpc_id = aws_vpc.BrokenByteVPC.id
tags = {
Name = "BrokenByte-gateway"
}
}
resource "aws_route_table" "BrokenByte-Route-table" {
vpc_id = aws_vpc.BrokenByteVPC.id
route {
cidr_block = "0.0.0.0/0"
gateway_id = aws_internet_gateway.BrokenByte-gateway.id
}
}
resource "aws_route_table_association" "a" {
subnet_id = aws_subnet.BrokenbyteLB-subnet.id
route_table_id = aws_route_table.BrokenByte-Route-table.id
}
resource "aws_security_group" "allow_traffic" {
name = "allow_Traffic"
description = "Allow SSH,HTTP and HTTPS inbound traffic"
vpc_id = aws_vpc.BrokenByteVPC.id
ingress {
description = "Dozvoli SVEEEEEEEE"
from_port = 0
to_port = 0
protocol = "-1"
cidr_blocks = ["0.0.0.0/0"]
}
ingress {
description = "SSH traffic"
from_port = 22
to_port = 22
protocol = "tcp"
cidr_blocks = ["0.0.0.0/0"]
}
ingress {
description = "HTTP traffic"
from_port = 80
to_port = 80
protocol = "tcp"
cidr_blocks = ["0.0.0.0/0"]
}
ingress {
description = "HTTPS traffic"
from_port = 443
to_port = 443
protocol = "tcp"
cidr_blocks = ["0.0.0.0/0"]
}
egress {
from_port = 0
to_port = 0
protocol = "-1"
cidr_blocks = ["0.0.0.0/0"]
}
tags = {
Name = "Allow_ssh_http_https"
}
}
resource "aws_network_interface" "NginX-public" {
subnet_id = aws_subnet.BrokenbyteLB-subnet.id
#private_ips = ["192.168.100.2"]
security_groups = [aws_security_group.allow_traffic.id]
}
resource "aws_network_interface" "NginX-LB" {
subnet_id = aws_subnet.BrokenbyteLB-subnet.id
private_ips = ["192.168.100.10"]
security_groups = [aws_security_group.allow_traffic.id]
}
resource "aws_network_interface" "www1" {
subnet_id = aws_subnet.BrokenbyteLB-subnet.id
private_ips = ["192.168.100.11"]
security_groups = [aws_security_group.allow_traffic.id]
}
resource "aws_network_interface" "www2" {
subnet_id = aws_subnet.BrokenbyteLB-subnet.id
private_ips = ["192.168.100.12"]
security_groups = [aws_security_group.allow_traffic.id]
}
resource "aws_network_interface" "www3" {
subnet_id = aws_subnet.BrokenbyteLB-subnet.id
private_ips = ["192.168.100.13"]
security_groups = [aws_security_group.allow_traffic.id]
}
resource "aws_eip" "BrokenByte-PublicIP" {
vpc = true
network_interface = aws_network_interface.NginX-public.id
#associate_with_private_ip = "192.168.100.10"
depends_on = [aws_internet_gateway.BrokenByte-gateway, aws_instance.BrokenByteNginX]
}
resource "aws_instance" "BrokenByteNginX" {
ami = "ami-0dba2cb6798deb6d8"
availability_zone = "us-east-1a"
instance_type = "t2.micro"
key_name = "aws_test"
network_interface {
device_index=0
network_interface_id = aws_network_interface.NginX-LB.id
}
network_interface {
device_index=1
network_interface_id = aws_network_interface.NginX-public.id
}
tags = {
Name = "BrokenByteNginXLB"
}
user_data = <<-EOF
#!/bin/bash
sudo apt-get update -y
EOF
}
resource "aws_instance" "BrokenByteWWW1" {
ami = "ami-0dba2cb6798deb6d8"
availability_zone = "us-east-1a"
instance_type = "t2.micro"
key_name = "aws_test"
network_interface {
device_index=0
network_interface_id = aws_network_interface.www1.id
}
tags = {
Name = "BrokenByteWWW1"
}
}
resource "aws_instance" "BrokenByteWWW2" {
ami = "ami-0dba2cb6798deb6d8"
availability_zone = "us-east-1a"
instance_type = "t2.micro"
key_name = "aws_test"
network_interface {
device_index=0
network_interface_id = aws_network_interface.www2.id
}
tags = {
Name = "BrokenByteWWW2"
}
}
resource "aws_instance" "BrokenByteWWW3" {
ami = "ami-0dba2cb6798deb6d8"
availability_zone = "us-east-1a"
instance_type = "t2.micro"
key_name = "aws_test"
network_interface {
device_index=0
network_interface_id = aws_network_interface.www3.id
}
tags = {
Name = "BrokenByteWWW3"
}
}
None of your instances have public IP address (except the one with aws_eip.BrokenByte-PublicIP), since your public subnet is missing map_public_ip_on_launch. You can rectify the issue by:
resource "aws_subnet" "BrokenbyteLB-subnet" {
vpc_id = aws_vpc.BrokenByteVPC.id
cidr_block = "192.168.100.0/28"
availability_zone = "us-east-1a"
map_public_ip_on_launch = true
tags = {
Name = "BrokenbyteLB-subnet"
}
}
I was sure is something related to network card, but wasn't sure what.
Now is fine, I can ping and SSH, just swapped public IP to be network 0, and I removed code for network.
#Marcin, your first reply showed me in which direction to look.
# network_interface {
# device_index=0
# network_interface_id = aws_network_interface.NginX-LB.id
# }
network_interface {
device_index=0
network_interface_id = aws_network_interface.NginX-public.id
}
I deploy ecs using terraform.
When I run terraform apply everything is okay but when I browse to ecs service on events tab I have this error:
service nginx-ecs-service was unable to place a task because no container instance met all of its requirements. Reason: No Container Instances were found in your cluster.
How do I fix that? What is missing in my terraform file?
locals {
name = "myapp"
environment = "prod"
# This is the convention we use to know what belongs to each other
ec2_resources_name = "${local.name}-${local.environment}"
}
resource "aws_iam_server_certificate" "lb_cert" {
name = "lb_cert"
certificate_body = "${file("./www.example.com/cert.pem")}"
private_key = "${file("./www.example.com/privkey.pem")}"
certificate_chain = "${file("./www.example.com/chain.pem")}"
}
resource "aws_security_group" "bastion-sg" {
name = "bastion-security-group"
vpc_id = "${module.vpc.vpc_id}"
ingress {
protocol = "tcp"
from_port = 22
to_port = 22
cidr_blocks = ["0.0.0.0/0"]
}
egress {
protocol = -1
from_port = 0
to_port = 0
cidr_blocks = ["0.0.0.0/0"]
}
}
resource "aws_instance" "bastion" {
depends_on = ["aws_security_group.bastion-sg"]
ami = "ami-0d5d9d301c853a04a"
key_name = "myapp"
instance_type = "t2.micro"
vpc_security_group_ids = ["${aws_security_group.bastion-sg.id}"]
associate_public_ip_address = true
subnet_id = "${element(module.vpc.public_subnets, 0)}"
tags = {
Name = "bastion"
}
}
# VPC Definition
module "vpc" {
source = "terraform-aws-modules/vpc/aws"
version = "~> 2.0"
name = "my-vpc"
cidr = "10.1.0.0/16"
azs = ["us-east-2a", "us-east-2b", "us-east-2c"]
private_subnets = ["10.1.1.0/24", "10.1.2.0/24", "10.1.3.0/24"]
public_subnets = ["10.1.101.0/24", "10.1.102.0/24", "10.1.103.0/24"]
single_nat_gateway = true
enable_nat_gateway = true
enable_vpn_gateway = false
enable_dns_hostnames = true
public_subnet_tags = {
Name = "public"
}
private_subnet_tags = {
Name = "private"
}
public_route_table_tags = {
Name = "public-RT"
}
private_route_table_tags = {
Name = "private-RT"
}
tags = {
Environment = local.environment
Name = local.name
}
}
# ------------
resource "aws_ecs_cluster" "public-ecs-cluster" {
name = "myapp-${local.environment}"
lifecycle {
create_before_destroy = true
}
}
resource "aws_security_group" "ecs-vpc-secgroup" {
name = "ecs-vpc-secgroup"
description = "ecs-vpc-secgroup"
# vpc_id = "vpc-b8daecde"
vpc_id = "${module.vpc.vpc_id}"
ingress {
from_port = 0
to_port = 65535
protocol = "tcp"
cidr_blocks = ["0.0.0.0/0"]
}
egress {
from_port = 0
to_port = 0
protocol = "-1"
cidr_blocks = ["0.0.0.0/0"]
}
tags = {
Name = "ecs-security-group"
}
}
resource "aws_lb" "nginx-ecs-alb" {
name = "nginx-ecs-alb"
internal = false
load_balancer_type = "application"
subnets = module.vpc.public_subnets
security_groups = ["${aws_security_group.ecs-vpc-secgroup.id}"]
}
resource "aws_alb_target_group" "nginx-ecs-tg" {
name = "nginx-ecs-tg"
port = "80"
protocol = "HTTP"
vpc_id = "${module.vpc.vpc_id}"
health_check {
healthy_threshold = 3
unhealthy_threshold = 10
timeout = 5
interval = 10
path = "/"
}
depends_on = ["aws_lb.nginx-ecs-alb"]
}
resource "aws_alb_listener" "alb_listener" {
load_balancer_arn = "${aws_lb.nginx-ecs-alb.arn}"
port = "80"
protocol = "HTTP"
default_action {
target_group_arn = "${aws_alb_target_group.nginx-ecs-tg.arn}"
type = "forward"
}
}
resource "aws_ecs_task_definition" "nginx-image" {
family = "nginx-server"
network_mode = "bridge"
container_definitions = <<DEFINITION
[
{
"name": "nginx-web",
"image": "nginx:latest",
"essential": true,
"portMappings": [
{
"containerPort": 80,
"hostPort": 0,
"protocol": "tcp"
}
],
"memory": 128,
"cpu": 10
}
]
DEFINITION
}
data "aws_ecs_task_definition" "nginx-image" {
depends_on = ["aws_ecs_task_definition.nginx-image"]
task_definition = "${aws_ecs_task_definition.nginx-image.family}"
}
resource "aws_launch_configuration" "ecs-launch-configuration" {
name = "ecs-launch-configuration"
image_id = "ami-0d5d9d301c853a04a"
instance_type = "t2.micro"
iam_instance_profile = "ecsInstanceRole"
root_block_device {
volume_type = "standard"
volume_size = 35
delete_on_termination = true
}
security_groups = ["${aws_security_group.ecs-vpc-secgroup.id}"]
associate_public_ip_address = "true"
key_name = "myapp"
user_data = <<-EOF
#!/bin/bash
echo ECS_CLUSTER=${aws_ecs_cluster.public-ecs-cluster.name} >> /etc/ecs/ecs.config
EOF
}
resource "aws_autoscaling_group" "ecs-autoscaling-group" {
name = "ecs-autoscaling-group"
max_size = "1"
min_size = "1"
desired_capacity = "1"
# vpc_zone_identifier = ["subnet-5c66053a", "subnet-9cd1a2d4"]
vpc_zone_identifier = module.vpc.public_subnets
launch_configuration = "${aws_launch_configuration.ecs-launch-configuration.name}"
health_check_type = "EC2"
default_cooldown = "300"
lifecycle {
create_before_destroy = true
}
tag {
key = "Name"
value = "wizardet972_ecs-instance"
propagate_at_launch = true
}
tag {
key = "Owner"
value = "Wizardnet972"
propagate_at_launch = true
}
}
resource "aws_autoscaling_policy" "ecs-scale" {
name = "ecs-scale-policy"
policy_type = "TargetTrackingScaling"
autoscaling_group_name = "${aws_autoscaling_group.ecs-autoscaling-group.name}"
estimated_instance_warmup = 60
target_tracking_configuration {
predefined_metric_specification {
predefined_metric_type = "ASGAverageCPUUtilization"
}
target_value = "70"
}
}
resource "aws_ecs_service" "nginx-ecs-service" {
name = "nginx-ecs-service"
cluster = "${aws_ecs_cluster.public-ecs-cluster.id}"
task_definition = "${aws_ecs_task_definition.nginx-image.family}:${max("${aws_ecs_task_definition.nginx-image.revision}", "${aws_ecs_task_definition.nginx-image.revision}")}"
launch_type = "EC2"
desired_count = 1
load_balancer {
target_group_arn = "${aws_alb_target_group.nginx-ecs-tg.arn}"
container_name = "nginx-web"
container_port = 80
}
depends_on = ["aws_ecs_task_definition.nginx-image"]
}
Update:
I tried to create the terraform stack you shared with me, I was able to reproduce the issue.
The issue was, The ec2 instance was unhealthy and the autoscaling group was continuously terminating the instance and launch a new one.
the solution was to remove the following configuration.I think the volume_type standard was causing trouble.
root_block_device {
volume_type = "standard"
volume_size = 100
delete_on_termination = true
}
See if you have done the basic steps to prepare the ec2 instance. You should use an ecs-optimized ami to create the instance and then attach the AmazonEC2ContainerServiceforEC2Role permission to IAM role.
Reference:
AWS ECS Error when running task: No Container Instances were found in your cluster
setup instance role