Problem with accessing ASG in private subnet from elb - amazon-web-services

i have the 502 error in the ALB.
my vpc and routes.
resource "aws_vpc" "My_VPC" {
cidr_block = "${var.vpcCIDRblock}"
instance_tenancy = "${var.instanceTenancy}"
enable_dns_support = "true"
enable_dns_hostnames = "true"
tags = {
Name = "My VPC"
}
}
resource "aws_subnet" "Public_Subnet" {
vpc_id = "${aws_vpc.My_VPC.id}"
cidr_block = "${var.subnetCIDRblock}"
map_public_ip_on_launch = "true"
availability_zone = "eu-central-1a"
tags= {
Name = "My Public Subnet"
}
}
resource "aws_subnet" "Public_Subnet_elb" {
vpc_id = "${aws_vpc.My_VPC.id}"
cidr_block = "${var.subnetCIDRblock4}"
map_public_ip_on_launch = "true"
availability_zone = "eu-central-1"
tags = {
Name = "My Public Subnet ELB"
}
}
resource "aws_subnet" "Private_Subnet" {
vpc_id = "${aws_vpc.My_VPC.id}"
cidr_block = "172.16.2.0/24"
map_public_ip_on_launch = "false"
availability_zone = "$eu-central-1a"
tags = {
Name = "My_Private_Subnet"
}
}
resource "aws_internet_gateway" "My_VPC_GW" {
vpc_id = "${aws_vpc.My_VPC.id}"
tags = {
Name = "My VPC Internet Gateway"
}
}
resource "aws_route_table" "eu-central-1a" {
vpc_id = "${aws_vpc.My_VPC.id}"
route {
cidr_block = "0.0.0.0/0"
gateway_id = "${aws_internet_gateway.My_VPC_GW.id}"
}
tags = {
Name = "Public Subnet"
}
}
resource "aws_main_route_table_association" "public" {
vpc_id = "${aws_vpc.My_VPC.id}"
route_table_id = "${aws_route_table.eu-central-1a.id}"
}
resource "aws_route_table_association" "eu-central-1a-public" {
subnet_id = "${aws_subnet.Public_Subnet.id}"
route_table_id = "${aws_route_table.eu-central-1a.id}"
}
resource "aws_route_table_association" "elb" {
subnet_id = "${aws_subnet.Public_Subnet_elb.id}"
route_table_id = "${aws_route_table.eu-central-1a.id}"
}
resource "aws_eip" "eip" {
vpc = true
depends_on = ["aws_internet_gateway.My_VPC_GW"]
}
resource "aws_nat_gateway" "gateway" {
allocation_id = "${aws_eip.eip.id}"
subnet_id = "${aws_subnet.Public_Subnet.id}"
depends_on = ["aws_internet_gateway.My_VPC_GW"]
}
output "NAT_GW_IP" {
value = "${aws_eip.eip.public_ip}"
}
## Routing table
resource "aws_route_table" "private_route_table" {
vpc_id = "${aws_vpc.My_VPC.id}"
}
resource "aws_route" "private" {
route_table_id = "${aws_route_table.private_route_table.id}"
destination_cidr_block = "0.0.0.0/0"
nat_gateway_id = "${aws_nat_gateway.gateway.id}"
}
# Associate subnet private_subnet to private route table
resource "aws_route_table_association" "private_subnet_association" {
subnet_id = "${aws_subnet.Private_Subnet.id}"
route_table_id = "${aws_route_table.private_route_table.id}"
}
each security group open for incoming traffic for port 80 443 and 22 . outbound are 0.0.0.0
ELB
resource "aws_lb" "test" {
name = "test-lb-tf"
internal = false
load_balancer_type = "application"
security_groups = ["${aws_security_group.elb-security.id}"]
subnets = ["${aws_subnet.Public_Subnet_elb.id}","${aws_subnet.Public_Subnet.id}"]
enable_deletion_protection = false
depends_on = ["aws_nat_gateway.gateway"]
access_logs {
bucket = "test-listener"
prefix = "test-lb"
enabled = true
}
tags = {
Environment = "production"
}
}
resource "aws_lb_target_group" "test" {
name = "moodle-tg"
port = "80"
protocol = "HTTP"
vpc_id = aws_vpc.My_VPC.id
target_type = "instance"
deregistration_delay = "300"
health_check {
path = "/"
interval = "300"
port = "80"
matcher = "200"
protocol = "HTTP"
timeout = "10"
healthy_threshold = "10"
unhealthy_threshold= "10"
}
}
resource "aws_lb_listener" "front_end" {
load_balancer_arn = aws_lb.test.arn
port = "80"
protocol = "HTTP"
depends_on = ["aws_nat_gateway.gateway"]
default_action {
target_group_arn = "${aws_lb_target_group.test.arn}"
type = "forward"
}
}
resource "aws_lb_listener_rule" "asg-listener_rule" {
listener_arn = aws_lb_listener.front_end.arn
priority = 100
depends_on = ["aws_nat_gateway.gateway"]
condition {
path_pattern {
values = ["/"]
}
}
action {
type = "forward"
target_group_arn = aws_lb_target_group.test.arn
}
}
ASG
resource "aws_launch_configuration" "moodle-lc" {
name_prefix = "moodle-lc-"
image_id = "${data.aws_ami.centos.id}"
instance_type = "${var.instance}"
security_groups = ["${aws_security_group.web_ubuntu1.id}"]
key_name = "moodle_agents"
user_data = "${file("init-agent-instance.sh")}"
depends_on = ["aws_nat_gateway.gateway"]
lifecycle {
create_before_destroy = true
}
}
resource "aws_autoscaling_group" "moodle-agents" {
vpc_zone_identifier = ["${aws_subnet.Private_Subnet.id}"]
name = "agents"
max_size = "20"
min_size = "1"
health_check_grace_period = 300
health_check_type = "ELB"
desired_capacity = 2
target_group_arns = ["${aws_lb_target_group.test.arn}"]
force_delete = true
launch_configuration = "${aws_launch_configuration.moodle-lc.name}"
depends_on = ["aws_nat_gateway.gateway"]
lifecycle {
create_before_destroy = true
}
tag {
key = "Name"
value = "Agent Instance"
propagate_at_launch = true
}
}
user_data script just installs apache web-server and starts it
I read this article link and my code looks the same for me can someone please explain where I made a mistake.
Without nat-gateway(and ASG are in public subnet) everything works fine, but it doesn't have sense to use ALB for accessing instances that are already visible in the internet.

Your general architecture is correct, although there are still some mistakes:
Incorrect AZ:
availability_zone = "$eu-central-1a"
Again wrong AZ:
availability_zone = "eu-central-1"
ALB must be in two different AZs, maybe you should have "eu-central-1a" and "eu-central-1b"

Related

ALB health check failing with 502 error with my terraform config

Hi i'm beginner and i'm trying to play with VPC on AWS and Terraform and i'm stuck on ALB health check issue
I have 2 az with a ec2 and a webserver on each ec2 my goal is to setup the load balancer and be able to be redirected on one of my ec2, my ec2 are on a private subnet and a Nat Gatway and elastic IP
I've tried to setup a bastion host to check on the ssh if my ec2 was well link to internet and the answer is yes
this is my setup terraform : ( baybe there is an obvious error that i haven't seen )
terraform {
required_providers {
aws = {
source = "hashicorp/aws"
version = "~> 4.0"
}
}
}
provider "aws" {
shared_credentials_file = "./aws/credentials"
region = "us-east-1"
}
resource "aws_vpc" "main" {
cidr_block = "10.0.0.0/16"
enable_dns_hostnames = true
enable_dns_support = true
tags = {
Name = "my-vpc"
}
}
resource "aws_internet_gateway" "main" {
vpc_id = aws_vpc.main.id
tags = {
Name = "my-internet-gateway"
}
}
resource "aws_subnet" "public_a" {
vpc_id = aws_vpc.main.id
cidr_block = "10.0.1.0/24"
availability_zone = "us-east-1a"
map_public_ip_on_launch = true
tags = {
Name = "my-public-a-subnet"
}
}
resource "aws_subnet" "public_b" {
vpc_id = aws_vpc.main.id
cidr_block = "10.0.2.0/24"
availability_zone = "us-east-1b"
map_public_ip_on_launch = true
tags = {
Name = "my-public-b-subnet"
}
}
resource "aws_subnet" "private_a" {
vpc_id = aws_vpc.main.id
cidr_block = "10.0.3.0/24"
availability_zone = "us-east-1a"
tags = {
Name = "my-private-a-subnet"
}
}
resource "aws_subnet" "private_b" {
vpc_id = aws_vpc.main.id
cidr_block = "10.0.4.0/24"
availability_zone = "us-east-1b"
tags = {
Name = "my-private-b-subnet"
}
}
resource "aws_nat_gateway" "main" {
allocation_id = aws_eip.main.id
subnet_id = aws_subnet.public_a.id
}
resource "aws_eip" "main" {
vpc = true
tags = {
Name = "my-nat-gateway-eip"
}
}
resource "aws_security_group" "main" {
name = "my-security-group"
description = "Allow HTTP and SSH access"
vpc_id = aws_vpc.main.id
ingress {
from_port = 80
to_port = 80
protocol = "tcp"
cidr_blocks = ["0.0.0.0/0"]
}
ingress {
from_port = 22
to_port = 22
protocol = "tcp"
cidr_blocks = ["0.0.0.0/0"]
}
egress {
from_port = 0
to_port = 0
protocol = "-1"
cidr_blocks = ["0.0.0.0/0"]
}
tags = {
Name = "my-security-group"
}
}
resource "aws_instance" "ec2_a" {
ami = "ami-0c2b8ca1dad447f8a"
instance_type = "t2.micro"
subnet_id = aws_subnet.private_a.id
vpc_security_group_ids = [aws_security_group.main.id]
tags = {
Name = "my-ec2-a"
}
key_name = "vockey"
user_data = file("user_data.sh")
}
resource "aws_instance" "ec2_b" {
ami = "ami-0c2b8ca1dad447f8a"
instance_type = "t2.micro"
subnet_id = aws_subnet.private_b.id
vpc_security_group_ids = [aws_security_group.main.id]
tags = {
Name = "my-ec2-b"
}
key_name = "vockey"
user_data = file("user_data.sh")
}
resource "aws_instance" "bastion" {
ami = "ami-0c2b8ca1dad447f8a"
instance_type = "t2.micro"
subnet_id = aws_subnet.public_a.id
vpc_security_group_ids = [aws_security_group.main.id]
tags = {
Name = "my-bastion"
}
key_name = "vockey"
user_data = file("user_data_bastion.sh")
}
resource "aws_alb" "main" {
name = "my-alb"
internal = false
security_groups = [aws_security_group.main.id]
subnets = [aws_subnet.public_a.id, aws_subnet.public_b.id]
tags = {
Name = "my-alb"
}
}
resource "aws_alb_target_group" "ec2" {
name = "my-alb-target-group"
port = 80
protocol = "HTTP"
vpc_id = aws_vpc.main.id
tags = {
Name = "my-alb-target-group"
}
}
resource "aws_route_table" "private" {
vpc_id = aws_vpc.main.id
route {
cidr_block = "0.0.0.0/0"
gateway_id = aws_internet_gateway.main.id
}
route {
cidr_block = "0.0.0.0/0"
gateway_id = aws_nat_gateway.main.id
}
tags = {
Name = "my-private-route-table"
}
}
resource "aws_route_table_association" "private_a" {
subnet_id = aws_subnet.private_a.id
route_table_id = aws_route_table.private.id
}
resource "aws_route_table_association" "private_b" {
subnet_id = aws_subnet.private_b.id
route_table_id = aws_route_table.private.id
}
resource "aws_route_table" "public" {
vpc_id = aws_vpc.main.id
route {
cidr_block = "0.0.0.0/0"
gateway_id = aws_internet_gateway.main.id
}
tags = {
Name = "My Public Route Table"
}
}
resource "aws_route_table_association" "public_a" {
subnet_id = aws_subnet.public_a.id
route_table_id = aws_route_table.public.id
}
resource "aws_route_table_association" "public_b" {
subnet_id = aws_subnet.public_b.id
route_table_id = aws_route_table.public.id
}
resource "aws_alb_listener" "main" {
load_balancer_arn = aws_alb.main.arn
port = "80"
protocol = "HTTP"
default_action {
target_group_arn = aws_alb_target_group.ec2.arn
type = "forward"
}
}
resource "aws_alb_target_group_attachment" "ec2_a" {
target_group_arn = aws_alb_target_group.ec2.arn
target_id = aws_instance.ec2_a.id
port = 80
}
resource "aws_alb_target_group_attachment" "ec2_b" {
target_group_arn = aws_alb_target_group.ec2.arn
target_id = aws_instance.ec2_b.id
port = 80
}
It looks like you don't have a health_check block on the aws_alb_target_group resource. Try adding something like this:
resource "aws_alb_target_group" "ec2" {
name = "my-alb-target-group"
port = 80
protocol = "HTTP"
vpc_id = aws_vpc.main.id
health_check {
path = "/"
matcher = "200"
}
tags = {
Name = "my-alb-target-group"
}
}
Also, make sure that the HTTP services on your EC2 instances are listening and accepting connections on port 80. You should be able to curl http://<ec2 ip address> with a 200 response.

How to reuse Elastic IPs for a set of private and public subnets dedicated to Fargate tasks

I got the following setup to create the networking requirements for a Fargate setup:
resource "aws_vpc" "main" {
cidr_block = var.cidr
tags = {
Environment = var.environment
DO_NOT_DELETE = true
CreatedBy = "terraform"
}
}
resource "aws_internet_gateway" "main" {
vpc_id = aws_vpc.main.id
tags = {
Environment = var.environment
CreatedBy = "terraform"
Vpc = aws_vpc.main.id
}
}
data "aws_availability_zones" "region_azs" {
state = "available"
}
locals {
az_count = length(data.aws_availability_zones.region_azs.names)
}
resource "aws_subnet" "private" {
vpc_id = aws_vpc.main.id
cidr_block = cidrsubnet(aws_vpc.main.cidr_block, 4, count.index)
availability_zone = data.aws_availability_zones.region_azs.names[count.index]
count = local.az_count
tags = {
Name = "public-subnet-${data.aws_availability_zones.region_azs.names[count.index]}"
AvailabilityZone = data.aws_availability_zones.region_azs.names[count.index]
Environment = var.environment
CreatedBy = "terraform"
Vpc = aws_vpc.main.id
Type = "private"
DO_NOT_DELETE = true
}
}
resource "aws_subnet" "public" {
vpc_id = aws_vpc.main.id
cidr_block = cidrsubnet(aws_vpc.main.cidr_block, 4, count.index + local.az_count )
availability_zone = data.aws_availability_zones.region_azs.names[count.index]
count = local.az_count
map_public_ip_on_launch = true
tags = {
Environment = var.environment
CreatedBy = "terraform"
Vpc = aws_vpc.main.id
DO_NOT_DELETE = true
Type = "public"
}
}
resource "aws_route_table" "public" {
vpc_id = aws_vpc.main.id
tags = {
Environment = var.environment
CreatedBy = "terraform"
Vpc = aws_vpc.main.id
Type = "public"
}
}
resource "aws_route" "public" {
route_table_id = aws_route_table.public.id
destination_cidr_block = "0.0.0.0/0"
gateway_id = aws_internet_gateway.main.id
}
resource "aws_route_table_association" "public" {
count = local.az_count
subnet_id = element(aws_subnet.public.*.id, count.index)
route_table_id = aws_route_table.public.id
}
resource "aws_nat_gateway" "main" {
count = local.az_count
allocation_id = element(aws_eip.nat.*.id, count.index)
subnet_id = element(aws_subnet.public.*.id, count.index)
depends_on = [aws_internet_gateway.main]
tags = {
Environment = var.environment
CreatedBy = "terraform"
Vpc = aws_vpc.main.id
}
}
resource "aws_eip" "nat" {
count = local.az_count
vpc = true
tags = {
CreatedBy = "terraform"
Vpc = aws_vpc.main.id
}
}
resource "aws_route_table" "private" {
count = local.az_count
vpc_id = aws_vpc.main.id
tags = {
Environment = var.environment
CreatedBy = "terraform"
Type = "private"
Vpc = aws_vpc.main.id
}
}
resource "aws_route" "private" {
count = local.az_count
route_table_id = element(aws_route_table.private.*.id, count.index)
destination_cidr_block = "0.0.0.0/0"
nat_gateway_id = element(aws_nat_gateway.main.*.id, count.index)
}
resource "aws_route_table_association" "private" {
count = local.az_count
subnet_id = element(aws_subnet.private.*.id, count.index)
route_table_id = element(aws_route_table.private.*.id, count.index)
}
resource "aws_security_group" "alb" {
name = "${var.resources_name_prefix}-alb-sg"
vpc_id = aws_vpc.main.id
ingress {
protocol = "tcp"
from_port = 80
to_port = 80
cidr_blocks = ["0.0.0.0/0"]
ipv6_cidr_blocks = ["::/0"]
}
ingress {
protocol = "tcp"
from_port = 443
to_port = 443
cidr_blocks = ["0.0.0.0/0"]
ipv6_cidr_blocks = ["::/0"]
}
egress {
protocol = "-1"
from_port = 0
to_port = 0
cidr_blocks = ["0.0.0.0/0"]
ipv6_cidr_blocks = ["::/0"]
}
tags = {
Environment = var.environment
CreatedBy = "terraform"
Vpc = aws_vpc.main.id
}
}
resource "aws_security_group" "ecs_tasks" {
name = "${var.resources_name_prefix}-ecs-sg"
vpc_id = aws_vpc.main.id
ingress {
protocol = "tcp"
from_port = 3000
to_port = 3000
cidr_blocks = ["0.0.0.0/0"]
ipv6_cidr_blocks = ["::/0"]
}
egress {
protocol = "-1"
from_port = 0
to_port = 0
cidr_blocks = ["0.0.0.0/0"]
ipv6_cidr_blocks = ["::/0"]
}
tags = {
Environment = var.environment
CreatedBy = "terraform"
Vpc = aws_vpc.main.id
}
}
This is been working great for a couple availability zones, but now that I'm dynamically creating subnets for running tasks in every AZ per region, I'm reaching the limit of Elastic IP's per region.
So I'm getting this erorr while trying to create the stack:
Error creating EIP: AddressLimitExceeded: The maximum number of addresses has been reached.
status code: 400
I'm wodering if the following part:
resource "aws_nat_gateway" "main" {
count = local.az_count
allocation_id = element(aws_eip.nat.*.id, count.index)
subnet_id = element(aws_subnet.public.*.id, count.index)
depends_on = [aws_internet_gateway.main]
tags = {
Environment = var.environment
CreatedBy = "terraform"
Vpc = aws_vpc.main.id
}
}
resource "aws_eip" "nat" {
count = local.az_count
vpc = true
tags = {
CreatedBy = "terraform"
Vpc = aws_vpc.main.id
}
}
Could be structured to use a single EIP and routing internally, if this makes sense.
I modified your code a bit, but it's a mess. For example all private subnets are called "public". It creates two NATs now. Obviously if you have subnets in, lets say, 6 AZs, there will be some cross-AZ traffic to get to those NATs.
Alternatively, simply don't create VPCs spanning so many AZs. Typically only two-three AZs are used for a VPC. Having more than that is not really needed.
Finally, you can request AWS support to give your more EIPs, if you want to preserve your original setup.
resource "aws_vpc" "main" {
cidr_block = var.cidr
tags = {
Environment = var.environment
DO_NOT_DELETE = true
CreatedBy = "terraform"
}
}
resource "aws_internet_gateway" "main" {
vpc_id = aws_vpc.main.id
tags = {
Environment = var.environment
CreatedBy = "terraform"
Vpc = aws_vpc.main.id
}
}
data "aws_availability_zones" "region_azs" {
state = "available"
}
locals {
az_count = length(data.aws_availability_zones.region_azs.names)
}
resource "aws_subnet" "private" {
vpc_id = aws_vpc.main.id
cidr_block = cidrsubnet(aws_vpc.main.cidr_block, 4, count.index)
availability_zone = data.aws_availability_zones.region_azs.names[count.index]
count = local.az_count
tags = {
Name = "private-subnet-${data.aws_availability_zones.region_azs.names[count.index]}"
AvailabilityZone = data.aws_availability_zones.region_azs.names[count.index]
Environment = var.environment
CreatedBy = "terraform"
Vpc = aws_vpc.main.id
Type = "private"
DO_NOT_DELETE = true
}
}
resource "aws_subnet" "public" {
vpc_id = aws_vpc.main.id
cidr_block = cidrsubnet(aws_vpc.main.cidr_block, 4, count.index + local.az_count )
availability_zone = data.aws_availability_zones.region_azs.names[count.index]
count = local.az_count
map_public_ip_on_launch = true
tags = {
Name = "public-subnet-${data.aws_availability_zones.region_azs.names[count.index]}"
Environment = var.environment
CreatedBy = "terraform"
Vpc = aws_vpc.main.id
DO_NOT_DELETE = true
Type = "public"
}
}
resource "aws_route_table" "public" {
vpc_id = aws_vpc.main.id
tags = {
Environment = var.environment
CreatedBy = "terraform"
Vpc = aws_vpc.main.id
Type = "public"
}
}
resource "aws_route" "public" {
route_table_id = aws_route_table.public.id
destination_cidr_block = "0.0.0.0/0"
gateway_id = aws_internet_gateway.main.id
}
resource "aws_route_table_association" "public" {
count = local.az_count
subnet_id = element(aws_subnet.public.*.id, count.index)
route_table_id = aws_route_table.public.id
}
resource "aws_nat_gateway" "main" {
count = 2
allocation_id = element(aws_eip.nat.*.id, count.index)
subnet_id = element(aws_subnet.public.*.id, count.index)
depends_on = [aws_internet_gateway.main]
tags = {
Environment = var.environment
CreatedBy = "terraform"
Vpc = aws_vpc.main.id
}
}
resource "aws_eip" "nat" {
count = 2
vpc = true
tags = {
CreatedBy = "terraform"
Vpc = aws_vpc.main.id
}
}
resource "aws_route_table" "private" {
count = local.az_count
vpc_id = aws_vpc.main.id
tags = {
Environment = var.environment
CreatedBy = "terraform"
Type = "private"
Vpc = aws_vpc.main.id
}
}
resource "aws_route" "private" {
count = local.az_count
route_table_id = element(aws_route_table.private.*.id, count.index)
destination_cidr_block = "0.0.0.0/0"
nat_gateway_id = element(aws_nat_gateway.main.*.id, count.index)
}
resource "aws_route_table_association" "private" {
count = local.az_count
subnet_id = element(aws_subnet.private.*.id, count.index)
route_table_id = element(aws_route_table.private.*.id, count.index)
}
resource "aws_security_group" "alb" {
name = "${var.resources_name_prefix}-alb-sg"
vpc_id = aws_vpc.main.id
ingress {
protocol = "tcp"
from_port = 80
to_port = 80
cidr_blocks = ["0.0.0.0/0"]
ipv6_cidr_blocks = ["::/0"]
}
ingress {
protocol = "tcp"
from_port = 443
to_port = 443
cidr_blocks = ["0.0.0.0/0"]
ipv6_cidr_blocks = ["::/0"]
}
egress {
protocol = "-1"
from_port = 0
to_port = 0
cidr_blocks = ["0.0.0.0/0"]
ipv6_cidr_blocks = ["::/0"]
}
tags = {
Environment = var.environment
CreatedBy = "terraform"
Vpc = aws_vpc.main.id
}
}
resource "aws_security_group" "ecs_tasks" {
name = "${var.resources_name_prefix}-ecs-sg"
vpc_id = aws_vpc.main.id
ingress {
protocol = "tcp"
from_port = 3000
to_port = 3000
cidr_blocks = ["0.0.0.0/0"]
ipv6_cidr_blocks = ["::/0"]
}
egress {
protocol = "-1"
from_port = 0
to_port = 0
cidr_blocks = ["0.0.0.0/0"]
ipv6_cidr_blocks = ["::/0"]
}
tags = {
Environment = var.environment
CreatedBy = "terraform"
Vpc = aws_vpc.main.id
}
}

AWS EC2 instance not joining ECS cluster

I am quite desperate with an issue very similar to the one described into this thread.
https://github.com/OpenDroneMap/opendronemap-ecs/issues/14#issuecomment-432004023
When I attach the network interface to my EC2 instance, so that my custom VPC is used instead of the default one, the EC2 instance no longer joins the ECS cluster.
This is my terraform definition.
provider "aws" {}
resource "aws_vpc" "main" {
cidr_block = "10.0.0.0/16"
enable_dns_support = true
enable_dns_hostnames = true
assign_generated_ipv6_cidr_block = true
}
resource "aws_internet_gateway" "main" {
vpc_id = aws_vpc.main.id
}
resource "aws_subnet" "main" {
vpc_id = aws_vpc.main.id
cidr_block = "10.0.0.0/16"
availability_zone = "us-west-2a"
map_public_ip_on_launch = true
}
resource "aws_route_table" "main" {
vpc_id = aws_vpc.main.id
}
resource "aws_route_table_association" "rta1" {
subnet_id = aws_subnet.main.id
route_table_id = aws_route_table.main.id
}
resource "aws_route_table_association" "rta2" {
gateway_id = aws_internet_gateway.main.id
route_table_id = aws_route_table.main.id
}
resource "aws_security_group" "sg-jenkins" {
name = "sg_jenkins"
description = "Allow inbound traffic for Jenkins instance"
vpc_id = aws_vpc.main.id
ingress = [
{
description = "inbound all"
from_port = 0
to_port = 0
protocol = "-1"
cidr_blocks = ["0.0.0.0/0"]
ipv6_cidr_blocks = ["::/0"]
self = null
prefix_list_ids = null
security_groups = null
}
]
egress = [
{
description = "outbound all"
from_port = 0
to_port = 0
protocol = "-1"
cidr_blocks = ["0.0.0.0/0"]
ipv6_cidr_blocks = ["::/0"]
self = null
prefix_list_ids = null
security_groups = null
}
]
}
resource "aws_network_interface" "main" {
subnet_id = aws_subnet.main.id
security_groups = [aws_security_group.sg-jenkins.id]
}
resource "aws_instance" "ec2_instance" {
ami = "ami-07764a7d8502d36a2"
instance_type = "t2.micro"
iam_instance_profile = "ecsInstanceRole"
key_name = "fran"
network_interface {
device_index = 0
network_interface_id = aws_network_interface.main.id
}
user_data = <<EOF
#!/bin/bash
echo ECS_CLUSTER=cluster >> /etc/ecs/ecs.config
EOF
depends_on = [aws_internet_gateway.main]
}
### Task definition
resource "aws_ecs_task_definition" "jenkins-task" {
family = "namespace"
container_definitions = jsonencode([
{
name = "jenkins"
image = "cnservices/jenkins-master"
cpu = 10
memory = 512
essential = true
portMappings = [
{
containerPort = 8080
hostPort = 8080
}
]
}
])
# network_mode = "awsvpc"
volume {
name = "service-storage"
host_path = "/ecs/service-storage"
}
placement_constraints {
type = "memberOf"
expression = "attribute:ecs.availability-zone in [us-west-2a]"
}
}
### Cluster
resource "aws_ecs_cluster" "cluster" {
name = "cluster"
setting {
name = "containerInsights"
value = "enabled"
}
}
### Service
resource "aws_ecs_service" "jenkins-service" {
name = "jenkins-service"
cluster = aws_ecs_cluster.cluster.id
task_definition = aws_ecs_task_definition.jenkins-task.arn
desired_count = 1
# iam_role = aws_iam_role.foo.arn
# depends_on = [aws_iam_role_policy.foo]
# network_configuration {
# security_groups = [aws_security_group.sg-jenkins.id]
# subnets = [aws_subnet.main.id]
# }
ordered_placement_strategy {
type = "binpack"
field = "cpu"
}
placement_constraints {
type = "memberOf"
expression = "attribute:ecs.availability-zone in [us-west-2a]"
}
}
You haven't created a route to your IGW. Thus your instance can't connect to the ECS service to register with your cluster. So remove rta2 and add a route:
# not needed. to be removed.
# resource "aws_route_table_association" "rta2" {
# gateway_id = aws_internet_gateway.main.id
# route_table_id = aws_route_table.main.id
# }
# add a missing route to the IGW
resource "aws_route" "r" {
route_table_id = aws_route_table.main.id
gateway_id = aws_internet_gateway.main.id
destination_cidr_block = "0.0.0.0/0"
}

Allowing load-balanced autoscaled instances to connect to the internet - AWS / Terraform

I'm using Terraform and I'm having a tricky time with connecting my autoscaled AWS EC2 instances to the internet. I can launch a standalone EC2 that connects with no difficulty, but when I visit the public IP addresses of my instances created with an autoscaling group I get "This site can’t be reached xxx.xx.xxx.xxx unexpectedly closed the connection."
The main difference I'm seeing is that I can specify a network interface with an EC2, but I'm not sure how this would work with my launch template. My instances launch into different subnets in different availability zones, and the template is as follows:
provider "aws" {
region = "us-east-1"
access_key = "xxxxx"
secret_key = "xxxxx"
}
data "template_file" "testfile" {
template = <<EOF
#!/bin/bash
sudo apt update -y
sudo apt install apache2 -y
sudo systemct1 start apache2
sudo bash -c 'echo hello, world! > var/www/html/index.html'
EOF
}
resource "aws_vpc" "first_vpc" {
cidr_block = "10.0.0.0/16"
tags = {
Name = "prod-vpc"
}
}
resource "aws_internet_gateway" "gw" {
vpc_id = aws_vpc.first_vpc.id
tags = {
Name = "prod-igw"
}
}
resource "aws_route_table" "prod_route_table" {
vpc_id = aws_vpc.first_vpc.id
route {
cidr_block = "0.0.0.0/0"
gateway_id = aws_internet_gateway.gw.id
}
route {
ipv6_cidr_block = "::/0"
gateway_id = aws_internet_gateway.gw.id
}
tags = {
Name = "prod-rt"
}
}
resource "aws_subnet" "subnet_1" {
vpc_id = aws_vpc.first_vpc.id
cidr_block = "10.0.1.0/24"
availability_zone = "us-east-1a"
map_public_ip_on_launch = true
tags = {
Name = "prod-subnet-1"
Tier = "public"
}
}
resource "aws_subnet" "subnet_2" {
vpc_id = aws_vpc.first_vpc.id
cidr_block = "10.0.2.0/24"
availability_zone = "us-east-1b"
map_public_ip_on_launch = true
tags = {
Name = "prod-subnet-2"
Tier = "public"
}
}
resource "aws_subnet" "subnet_3" {
vpc_id = aws_vpc.first_vpc.id
cidr_block = "10.0.3.0/24"
availability_zone = "us-east-1c"
map_public_ip_on_launch = true
tags = {
Name = "prod-subnet-3"
Tier = "public"
}
}
resource "aws_route_table_association" "a" {
subnet_id = aws_subnet.subnet_1.id
route_table_id = aws_route_table.prod_route_table.id
}
resource "aws_route_table_association" "b" {
subnet_id = aws_subnet.subnet_2.id
route_table_id = aws_route_table.prod_route_table.id
}
resource "aws_route_table_association" "c" {
subnet_id = aws_subnet.subnet_3.id
route_table_id = aws_route_table.prod_route_table.id
}
resource "aws_security_group" "allow_web" {
name = "allow_web"
description = "Allow web inbound traffic"
vpc_id = aws_vpc.first_vpc.id
ingress {
description = "HTTPS from VPC"
from_port = 443
to_port = 443
protocol = "tcp"
cidr_blocks = ["0.0.0.0/0"]
}
ingress {
description = "HTTP from VPC"
from_port = 80
to_port = 80
protocol = "tcp"
cidr_blocks = ["0.0.0.0/0"]
}
egress {
from_port = 0
to_port = 0
protocol = "-1"
cidr_blocks = ["0.0.0.0/0"]
ipv6_cidr_blocks = ["::/0"]
}
tags = {
Name = "allow_tls"
}
}
resource "aws_launch_template" "frontend" {
name = "frontend"
image_id = "ami-0ee02acd56a52998e"
instance_type = "t2.micro"
vpc_security_group_ids = [aws_security_group.allow_web.id]
network_interfaces {
device_index = 0
associate_public_ip_address = true
}
user_data = base64encode(data.template_file.testfile.rendered)
}
resource "aws_lb" "loadbalancer" {
name = "loadbalancer"
internal = false
load_balancer_type = "application"
security_groups = [aws_security_group.allow_web.id]
subnets = [aws_subnet.subnet_1.id, aws_subnet.subnet_2.id, aws_subnet.subnet_3.id]
tags = {
Environment = "production"
}
}
resource "aws_autoscaling_group" "as_group_1" {
vpc_zone_identifier = [aws_subnet.subnet_1.id, aws_subnet.subnet_2.id, aws_subnet.subnet_3.id]
desired_capacity = 3
max_size = 5
min_size = 2
target_group_arns = [aws_lb_target_group.frontendhttp.arn]
launch_template {
id = aws_launch_template.frontend.id
version = "$Latest"
}
}
resource "aws_lb_target_group" "frontendhttp" {
name = "frontendhttp"
port = 80
protocol = "HTTP"
vpc_id = aws_vpc.first_vpc.id
}
resource "aws_lb_listener" "frontendhttp" {
load_balancer_arn = aws_lb.loadbalancer.arn
port = "80"
protocol = "HTTP"
default_action {
type = "forward"
target_group_arn = aws_lb_target_group.frontendhttp.arn
}
}
#Test standalone instance
resource "aws_network_interface" "web_server_1" {
subnet_id = aws_subnet.subnet_1.id
private_ips = ["10.0.1.50"]
security_groups = [aws_security_group.allow_web.id]
}
resource "aws_instance" "ubuntu-1" {
ami = "ami-0ee02acd56a52998e"
instance_type = "t2.micro"
availability_zone = "us-east-1a" #hardcoded to ensure that subnet and instance are in same availability availability zone
network_interface {
device_index = 0
network_interface_id = aws_network_interface.web_server_1.id
}
user_data = <<-EOF
#!/bin/bash
sudo apt update -y
sudo apt install apache2 -y
sudo systemct1 start apache2
sudo bash -c 'echo hello! > var/www/html/index.html'
EOF
tags = {
Name = "web-server"
}
}
I modified a bit your template (user data, its indentation and aws_launch_template), and now it works now. It will work only over HTTP, as you don't have HTTPS setup, so don't need SG rules for HTTPS.
data "template_file" "testfile" {
template = <<EOF
#!/bin/bash
apt update -y
apt install apache2 -y
systemct1 start apache2
echo "hello, world!" > var/www/html/index.html
EOF
}
resource "aws_vpc" "first_vpc" {
cidr_block = "10.0.0.0/16"
tags = {
Name = "prod-vpc"
}
}
resource "aws_internet_gateway" "gw" {
vpc_id = aws_vpc.first_vpc.id
tags = {
Name = "prod-igw"
}
}
resource "aws_route_table" "prod_route_table" {
vpc_id = aws_vpc.first_vpc.id
route {
cidr_block = "0.0.0.0/0"
gateway_id = aws_internet_gateway.gw.id
}
route {
ipv6_cidr_block = "::/0"
gateway_id = aws_internet_gateway.gw.id
}
tags = {
Name = "prod-rt"
}
}
resource "aws_subnet" "subnet_1" {
vpc_id = aws_vpc.first_vpc.id
cidr_block = "10.0.1.0/24"
availability_zone = "us-east-1a"
map_public_ip_on_launch = true
tags = {
Name = "prod-subnet-1"
Tier = "public"
}
}
resource "aws_subnet" "subnet_2" {
vpc_id = aws_vpc.first_vpc.id
cidr_block = "10.0.2.0/24"
availability_zone = "us-east-1b"
map_public_ip_on_launch = true
tags = {
Name = "prod-subnet-2"
Tier = "public"
}
}
resource "aws_subnet" "subnet_3" {
vpc_id = aws_vpc.first_vpc.id
cidr_block = "10.0.3.0/24"
availability_zone = "us-east-1c"
map_public_ip_on_launch = true
tags = {
Name = "prod-subnet-3"
Tier = "public"
}
}
resource "aws_route_table_association" "a" {
subnet_id = aws_subnet.subnet_1.id
route_table_id = aws_route_table.prod_route_table.id
}
resource "aws_route_table_association" "b" {
subnet_id = aws_subnet.subnet_2.id
route_table_id = aws_route_table.prod_route_table.id
}
resource "aws_route_table_association" "c" {
subnet_id = aws_subnet.subnet_3.id
route_table_id = aws_route_table.prod_route_table.id
}
resource "aws_security_group" "allow_web" {
name = "allow_web"
description = "Allow web inbound traffic"
vpc_id = aws_vpc.first_vpc.id
ingress {
description = "HTTP from VPC"
from_port = 80
to_port = 80
protocol = "tcp"
cidr_blocks = ["0.0.0.0/0"]
}
egress {
from_port = 0
to_port = 0
protocol = "-1"
cidr_blocks = ["0.0.0.0/0"]
ipv6_cidr_blocks = ["::/0"]
}
tags = {
Name = "allow_http"
}
}
resource "aws_launch_template" "frontend" {
name = "frontend"
image_id = "ami-0ee02acd56a52998e"
instance_type = "t2.micro"
vpc_security_group_ids = [aws_security_group.allow_web.id]
# DONT NEED THIS
# network_interfaces {
# device_index = 0
# associate_public_ip_address = true
# }
user_data = base64encode(data.template_file.testfile.rendered)
}
resource "aws_lb" "loadbalancer" {
name = "loadbalancer"
internal = false
load_balancer_type = "application"
security_groups = [aws_security_group.allow_web.id]
subnets = [aws_subnet.subnet_1.id, aws_subnet.subnet_2.id, aws_subnet.subnet_3.id]
tags = {
Environment = "production"
}
}
resource "aws_autoscaling_group" "as_group_1" {
vpc_zone_identifier = [aws_subnet.subnet_1.id, aws_subnet.subnet_2.id, aws_subnet.subnet_3.id]
desired_capacity = 3
max_size = 5
min_size = 2
target_group_arns = [aws_lb_target_group.frontendhttp.arn]
launch_template {
id = aws_launch_template.frontend.id
version = "$Latest"
}
}
resource "aws_lb_target_group" "frontendhttp" {
name = "frontendhttp"
port = 80
protocol = "HTTP"
vpc_id = aws_vpc.first_vpc.id
}
resource "aws_lb_listener" "frontendhttp" {
load_balancer_arn = aws_lb.loadbalancer.arn
port = "80"
protocol = "HTTP"
default_action {
type = "forward"
target_group_arn = aws_lb_target_group.frontendhttp.arn
}
}

Terraform: Why AWS NAT Gateway conflicts with Egress Only Internet Gateway

I have both IPv4 and IPv6, I'm trying to manage a Routing for private subnet.
Once NAT Gateway is attached to Route Table, it does not allow me to attach Egress Gateway to the same route table, and giving me an error:
An interface that is part of a NAT gateway cannot be the next hop for an IPv6 destination CIDR block or IPv6 prefix list
However if I'm attaching manually thought AWS Console, there is no problem
Maybe I'm missing some info? I know that NAT only for IPv4 and Egress only for IPv6, can someone guide me on this? Why if NAT not compatible with Egress Only Gateway, it allows me to attach via aws console, but not with terraform?
Here is my simple terraform
resource "aws_eip" "neip" {
count = length(var.private_subnet)
vpc = true
}
resource "aws_nat_gateway" "nat" {
count = length(var.private_subnet)
subnet_id = element(var.public_subnet, count.index)
allocation_id = element(aws_eip.neip.*.id, count.index)
}
resource "aws_egress_only_internet_gateway" "egw" {
count = length(var.zones) > 0 ? 1 : 0
vpc_id = var.vpc_id
}
resource "aws_route_table" "route" {
count = length(var.private_subnet)
vpc_id = var.vpc_id
}
resource "aws_route" "ipv4" {
count = length(aws_route_table.route)
depends_on = [ aws_route_table.route ]
route_table_id = aws_route_table.route[count.index].id
nat_gateway_id = element(aws_nat_gateway.nat.*.id, count.index)
destination_cidr_block = "0.0.0.0/0"
}
resource "aws_route" "ipv6" {
count = length(aws_route_table.route)
depends_on = [ aws_route_table.route ]
route_table_id = aws_route_table.route[count.index].id
egress_only_gateway_id = element(aws_egress_only_internet_gateway.egw.*.id, count.index)
destination_ipv6_cidr_block = "::/0"
}
resource "aws_route_table_association" "route" {
count = length(aws_route_table.route)
subnet_id = var.private_subnet[count.index]
route_table_id = aws_route_table.route[count.index].id
}
No issue with terraform script
I tried to reproduce your issue, but for me it works as expected. Maybe you still have some "typos" in your code presented here, thus its difficult to see why it woudn't work for you.
Anyway, here is the code I used in order to mimic your setup, though large chunks I had to create myself, as they are not shown in your code (e.g. VPC setup all missing, internet gateway, public subnets).
The code below works and I couldn't replicate your issue. Route tables work as expected:
data "aws_availability_zones" "available" {}
resource "aws_vpc" "vpc" {
cidr_block = "10.0.0.0/16"
enable_dns_hostnames = true
assign_generated_ipv6_cidr_block = true
tags = {
Name = "testvpc"
}
}
variable "private_cidrs" {
default = ["10.0.2.0/24", "10.0.3.0/24"]
}
variable "public_cidrs" {
default = ["10.0.0.0/24", "10.0.1.0/24"]
}
resource "aws_subnet" "public_subnet" {
count = length(var.public_cidrs)
cidr_block = var.public_cidrs[count.index]
vpc_id = aws_vpc.vpc.id
availability_zone = data.aws_availability_zones.available.names[count.index]
tags = {
Name = "public${count.index}"
}
}
resource "aws_subnet" "private_subnet" {
count = length(var.private_cidrs)
cidr_block = var.private_cidrs[count.index]
vpc_id = aws_vpc.vpc.id
availability_zone = data.aws_availability_zones.available.names[count.index]
tags = {
Name = "private${count.index}"
}
}
resource "aws_eip" "neip" {
count = length(var.private_cidrs)
vpc = true
}
resource "aws_internet_gateway" "igw" {
vpc_id = aws_vpc.vpc.id
tags = {
Name = "main"
}
}
resource "aws_nat_gateway" "nat" {
count = length(var.private_cidrs)
subnet_id = element(aws_subnet.public_subnet.*.id, count.index)
allocation_id = element(aws_eip.neip.*.id, count.index)
depends_on = [aws_internet_gateway.igw]
}
resource "aws_egress_only_internet_gateway" "egw" {
#count = length(var.private_cidrs)
vpc_id = aws_vpc.vpc.id
}
# routes for public subnets
resource "aws_route_table" "public_route" {
count = length(var.public_cidrs)
vpc_id = aws_vpc.vpc.id
}
resource "aws_route" "public_ipv4" {
count = length(aws_route_table.public_route)
route_table_id = aws_route_table.public_route[count.index].id
gateway_id = aws_internet_gateway.igw.id
destination_cidr_block = "0.0.0.0/0"
}
resource "aws_route" "ipv6_public" {
count = length(aws_route_table.public_route)
route_table_id = aws_route_table.public_route[count.index].id
egress_only_gateway_id = aws_egress_only_internet_gateway.egw.id
destination_ipv6_cidr_block = "::/0"
}
resource "aws_route_table_association" "public_route" {
count = length(aws_route_table.public_route)
subnet_id = aws_subnet.public_subnet[count.index].id
route_table_id = aws_route_table.public_route[count.index].id
}
# routes for private subnets
resource "aws_route_table" "route" {
count = length(var.private_cidrs)
vpc_id = aws_vpc.vpc.id
}
resource "aws_route" "ipv4" {
count = length(aws_route_table.route)
route_table_id = aws_route_table.route[count.index].id
nat_gateway_id = aws_nat_gateway.nat[count.index].id
#nat_gateway_id = aws_nat_gateway.nat.id
destination_cidr_block = "0.0.0.0/0"
}
resource "aws_route" "ipv6" {
count = length(aws_route_table.route)
route_table_id = aws_route_table.route[count.index].id
egress_only_gateway_id = aws_egress_only_internet_gateway.egw.id
destination_ipv6_cidr_block = "::/0"
}
resource "aws_route_table_association" "route" {
count = length(aws_route_table.route)
subnet_id = aws_subnet.private_subnet[count.index].id
route_table_id = aws_route_table.route[count.index].id
}