EC2 with two interfaces (one public and one private) - amazon-web-services

I basically want a EC2 with one interface in public subnet, one in private. I should be able to SSH through the public interface from outside world, private network will be used to communicate to database in private network.
In terraform...
I created a VPC, two subnets (one pubilc, one private) in that VPC.
Created a Internet Gateway, a route table in the vpc with route 0.0.0.0/0 to IGW
Created a route association between with public subnet and routing table.
Created a EC2 instance with one interface in public subnet and one in private subnet.
Created a Elastic IP address on public interface of EC2
Created a Security group allowing ssh port (22)
Im unable to SSH, connection timeout, could anyone take a look at this?
conf_vpc_cidr_block = "10.100.0.0/16"
conf_vpc_enable_dns_support = "true"
conf_vpc_enable_dns_hostnames = "true"
conf_vpc_enable_classiclink = "false"
conf_vpc_instance_tenancy = "default"
conf_private_subnet_cidr_block = "10.100.100.0/24"
conf_private_subnet_map_public_ip_on_launch = "false"
conf_private_subnet_availability_zone = "eu-north-1a"
conf_public_subnet_cidr_block = "10.100.200.0/24"
conf_public_subnet_map_public_ip_on_launch = "true"
conf_public_subnet_availability_zone = "eu-north-1a"
conf_instance_ami = "ami-000e50175c5f86214"
conf_instance_type = "t3.micro"
provider "aws" {
region = var.conf_aws_region
profile = var.conf_aws_profile
}
resource "aws_vpc" "Terraform_XX_VPC" {
cidr_block = var.conf_vpc_cidr_block
enable_dns_support = var.conf_vpc_enable_dns_support
enable_dns_hostnames = var.conf_vpc_enable_dns_hostnames
enable_classiclink = var.conf_vpc_enable_classiclink
instance_tenancy = var.conf_vpc_instance_tenancy
}
resource "aws_subnet" "Terraform_XX_Private_SN" {
vpc_id = "${aws_vpc.Terraform_XX_VPC.id}"
cidr_block = var.conf_private_subnet_cidr_block
map_public_ip_on_launch = var.conf_private_subnet_map_public_ip_on_launch
availability_zone = var.conf_private_subnet_availability_zone
}
resource "aws_subnet" "Terraform_XX_Public_SN" {
vpc_id = "${aws_vpc.Terraform_XX_VPC.id}"
cidr_block = var.conf_public_subnet_cidr_block
map_public_ip_on_launch = var.conf_public_subnet_map_public_ip_on_launch
availability_zone = var.conf_public_subnet_availability_zone
}
resource "aws_internet_gateway" "Terraform_XX_IGW" {
vpc_id = "${aws_vpc.Terraform_XX_VPC.id}"
}
resource "aws_route_table" "Terraform_XX_Route_Public" {
vpc_id = "${aws_vpc.Terraform_XX_VPC.id}"
route {
cidr_block = "0.0.0.0/0"
gateway_id = "${aws_internet_gateway.Terraform_XX_IGW.id}"
}
}
resource "aws_route_table_association" "Terraform_XX_Route_Table_Public_Association" {
subnet_id = "${aws_subnet.Terraform_XX_Public_SN.id}"
route_table_id = "${aws_route_table.Terraform_XX_Route_Public.id}"
}
resource "aws_security_group" "Terraform_XX_SG_ALLOW_SSH" {
name = "Terraform_XX_SG_ALLOW_SSH"
description = "Allow SSH"
vpc_id = "${aws_vpc.Terraform_XX_VPC.id}"
ingress {
from_port = 22
to_port = 22
protocol = "tcp"
cidr_blocks = ["0.0.0.0/0"]
}
egress {
from_port = 0
to_port = 0
protocol = "-1"
cidr_blocks = ["0.0.0.0/0"]
}
}
resource "aws_instance" "Terraform_XX_EC2" {
ami = var.conf_instance_ami
instance_type = var.conf_instance_type
key_name = "${local.environmentLC}"
network_interface {
device_index = 0
network_interface_id = "${aws_network_interface.Terraform_XX_EC2_Private_Intf.id}"
#delete_on_termination = true
}
network_interface {
device_index = 1
network_interface_id = "${aws_network_interface.Terraform_XX_EC2_Public_Intf.id}"
#delete_on_termination = true
}
}
resource "aws_network_interface" "Terraform_XX_EC2_Private_Intf" {
subnet_id = "${aws_subnet.Terraform_XX_Private_SN.id}"
private_ips = ["10.100.100.5"]
security_groups = [aws_security_group.Terraform_XX_SG_ALLOW_SSH.id]
}
resource "aws_network_interface" "Terraform_XX_EC2_Public_Intf" {
subnet_id = "${aws_subnet.Terraform_XX_Public_SN.id}"
private_ips = ["10.100.200.5"]
security_groups = [aws_security_group.Terraform_XX_SG_ALLOW_SSH.id]
}
resource "aws_eip" "Terraform_XX_EC2_EIP" {
vpc = true
network_interface = "${aws_network_interface.Terraform_XX_EC2_Public_Intf.id}"
}

I was able to solve this by exchanging the device_index for public and private interface

Related

ALB health check failing with 502 error with my terraform config

Hi i'm beginner and i'm trying to play with VPC on AWS and Terraform and i'm stuck on ALB health check issue
I have 2 az with a ec2 and a webserver on each ec2 my goal is to setup the load balancer and be able to be redirected on one of my ec2, my ec2 are on a private subnet and a Nat Gatway and elastic IP
I've tried to setup a bastion host to check on the ssh if my ec2 was well link to internet and the answer is yes
this is my setup terraform : ( baybe there is an obvious error that i haven't seen )
terraform {
required_providers {
aws = {
source = "hashicorp/aws"
version = "~> 4.0"
}
}
}
provider "aws" {
shared_credentials_file = "./aws/credentials"
region = "us-east-1"
}
resource "aws_vpc" "main" {
cidr_block = "10.0.0.0/16"
enable_dns_hostnames = true
enable_dns_support = true
tags = {
Name = "my-vpc"
}
}
resource "aws_internet_gateway" "main" {
vpc_id = aws_vpc.main.id
tags = {
Name = "my-internet-gateway"
}
}
resource "aws_subnet" "public_a" {
vpc_id = aws_vpc.main.id
cidr_block = "10.0.1.0/24"
availability_zone = "us-east-1a"
map_public_ip_on_launch = true
tags = {
Name = "my-public-a-subnet"
}
}
resource "aws_subnet" "public_b" {
vpc_id = aws_vpc.main.id
cidr_block = "10.0.2.0/24"
availability_zone = "us-east-1b"
map_public_ip_on_launch = true
tags = {
Name = "my-public-b-subnet"
}
}
resource "aws_subnet" "private_a" {
vpc_id = aws_vpc.main.id
cidr_block = "10.0.3.0/24"
availability_zone = "us-east-1a"
tags = {
Name = "my-private-a-subnet"
}
}
resource "aws_subnet" "private_b" {
vpc_id = aws_vpc.main.id
cidr_block = "10.0.4.0/24"
availability_zone = "us-east-1b"
tags = {
Name = "my-private-b-subnet"
}
}
resource "aws_nat_gateway" "main" {
allocation_id = aws_eip.main.id
subnet_id = aws_subnet.public_a.id
}
resource "aws_eip" "main" {
vpc = true
tags = {
Name = "my-nat-gateway-eip"
}
}
resource "aws_security_group" "main" {
name = "my-security-group"
description = "Allow HTTP and SSH access"
vpc_id = aws_vpc.main.id
ingress {
from_port = 80
to_port = 80
protocol = "tcp"
cidr_blocks = ["0.0.0.0/0"]
}
ingress {
from_port = 22
to_port = 22
protocol = "tcp"
cidr_blocks = ["0.0.0.0/0"]
}
egress {
from_port = 0
to_port = 0
protocol = "-1"
cidr_blocks = ["0.0.0.0/0"]
}
tags = {
Name = "my-security-group"
}
}
resource "aws_instance" "ec2_a" {
ami = "ami-0c2b8ca1dad447f8a"
instance_type = "t2.micro"
subnet_id = aws_subnet.private_a.id
vpc_security_group_ids = [aws_security_group.main.id]
tags = {
Name = "my-ec2-a"
}
key_name = "vockey"
user_data = file("user_data.sh")
}
resource "aws_instance" "ec2_b" {
ami = "ami-0c2b8ca1dad447f8a"
instance_type = "t2.micro"
subnet_id = aws_subnet.private_b.id
vpc_security_group_ids = [aws_security_group.main.id]
tags = {
Name = "my-ec2-b"
}
key_name = "vockey"
user_data = file("user_data.sh")
}
resource "aws_instance" "bastion" {
ami = "ami-0c2b8ca1dad447f8a"
instance_type = "t2.micro"
subnet_id = aws_subnet.public_a.id
vpc_security_group_ids = [aws_security_group.main.id]
tags = {
Name = "my-bastion"
}
key_name = "vockey"
user_data = file("user_data_bastion.sh")
}
resource "aws_alb" "main" {
name = "my-alb"
internal = false
security_groups = [aws_security_group.main.id]
subnets = [aws_subnet.public_a.id, aws_subnet.public_b.id]
tags = {
Name = "my-alb"
}
}
resource "aws_alb_target_group" "ec2" {
name = "my-alb-target-group"
port = 80
protocol = "HTTP"
vpc_id = aws_vpc.main.id
tags = {
Name = "my-alb-target-group"
}
}
resource "aws_route_table" "private" {
vpc_id = aws_vpc.main.id
route {
cidr_block = "0.0.0.0/0"
gateway_id = aws_internet_gateway.main.id
}
route {
cidr_block = "0.0.0.0/0"
gateway_id = aws_nat_gateway.main.id
}
tags = {
Name = "my-private-route-table"
}
}
resource "aws_route_table_association" "private_a" {
subnet_id = aws_subnet.private_a.id
route_table_id = aws_route_table.private.id
}
resource "aws_route_table_association" "private_b" {
subnet_id = aws_subnet.private_b.id
route_table_id = aws_route_table.private.id
}
resource "aws_route_table" "public" {
vpc_id = aws_vpc.main.id
route {
cidr_block = "0.0.0.0/0"
gateway_id = aws_internet_gateway.main.id
}
tags = {
Name = "My Public Route Table"
}
}
resource "aws_route_table_association" "public_a" {
subnet_id = aws_subnet.public_a.id
route_table_id = aws_route_table.public.id
}
resource "aws_route_table_association" "public_b" {
subnet_id = aws_subnet.public_b.id
route_table_id = aws_route_table.public.id
}
resource "aws_alb_listener" "main" {
load_balancer_arn = aws_alb.main.arn
port = "80"
protocol = "HTTP"
default_action {
target_group_arn = aws_alb_target_group.ec2.arn
type = "forward"
}
}
resource "aws_alb_target_group_attachment" "ec2_a" {
target_group_arn = aws_alb_target_group.ec2.arn
target_id = aws_instance.ec2_a.id
port = 80
}
resource "aws_alb_target_group_attachment" "ec2_b" {
target_group_arn = aws_alb_target_group.ec2.arn
target_id = aws_instance.ec2_b.id
port = 80
}
It looks like you don't have a health_check block on the aws_alb_target_group resource. Try adding something like this:
resource "aws_alb_target_group" "ec2" {
name = "my-alb-target-group"
port = 80
protocol = "HTTP"
vpc_id = aws_vpc.main.id
health_check {
path = "/"
matcher = "200"
}
tags = {
Name = "my-alb-target-group"
}
}
Also, make sure that the HTTP services on your EC2 instances are listening and accepting connections on port 80. You should be able to curl http://<ec2 ip address> with a 200 response.

Solving conflicting route table issue on ALB

tldr;
I can't access to my service through the ALB DNS name. Trying to reach the URL will timeout.
I noticed that from IGW and Nate there's an isolated routed subnet (Public Subnet 2) and also a task that's not being exposed through the ALB because somehow it got a different attached subnet.
More general context
Got Terraform modules defining
ECS cluster, service and task definition
ALB setup, including a target group and a listener
Got a couple subnets and a security group for ALB
Got private subnets and own sg for ECS
Target group port is the same as container port already
Using CodePipeline a get a task running, I can see logs of my service meaning it starts.
Some questions
Can I have multiple IGW associated to a single NAT within a single VPC?
Tasks get attached a couple private subnets and a sg with permissions to the alb sg. Also, tasks should access a Redis instance so I'm also attaching to them a SG and a subnet where Elastic Cache node lives (shown in the terraform module below). Any advise here?
ALB and networking resources
variable "vpc_id" {
type = string
default = "vpc-0af6233d57f7a6e1b"
}
variable "environment" {
type = string
default = "dev"
}
data "aws_vpc" "vpc" {
id = var.vpc_id
}
### Public subnets
resource "aws_subnet" "public_subnet_us_east_1a" {
vpc_id = data.aws_vpc.vpc.id
cidr_block = "10.0.10.0/24"
map_public_ip_on_launch = true
availability_zone = "us-east-1a"
tags = {
Name = "audible-blog-us-${var.environment}-public-subnet-1a"
}
}
resource "aws_subnet" "public_subnet_us_east_1b" {
vpc_id = data.aws_vpc.vpc.id
cidr_block = "10.0.11.0/24"
availability_zone = "us-east-1b"
map_public_ip_on_launch = true
tags = {
Name = "audible-blog-us-${var.environment}-public-subnet-1b"
}
}
### Private subnets
resource "aws_subnet" "private_subnet_us_east_1a" {
vpc_id = data.aws_vpc.vpc.id
cidr_block = "10.0.12.0/24"
map_public_ip_on_launch = true
availability_zone = "us-east-1a"
tags = {
Name = "audible-blog-us-${var.environment}-private-subnet-1a"
}
}
resource "aws_subnet" "private_subnet_us_east_1b" {
vpc_id = data.aws_vpc.vpc.id
cidr_block = "10.0.13.0/24"
availability_zone = "us-east-1b"
tags = {
Name = "audible-blog-us-${var.environment}-private-subnet-1b"
}
}
# Create a NAT gateway with an EIP for each private subnet to get internet connectivity
resource "aws_eip" "gw_a" {
vpc = true
}
resource "aws_eip" "gw_b" {
vpc = true
}
resource "aws_nat_gateway" "gw_a" {
subnet_id = aws_subnet.public_subnet_us_east_1a.id
allocation_id = aws_eip.gw_a.id
}
resource "aws_nat_gateway" "gw_b" {
subnet_id = aws_subnet.public_subnet_us_east_1b.id
allocation_id = aws_eip.gw_b.id
}
# Create a new route table for the private subnets
# And make it route non-local traffic through the NAT gateway to the internet
resource "aws_route_table" "private_a" {
vpc_id = data.aws_vpc.vpc.id
route {
cidr_block = "0.0.0.0/0"
nat_gateway_id = aws_nat_gateway.gw_a.id
}
}
resource "aws_route_table" "private_b" {
vpc_id = data.aws_vpc.vpc.id
route {
cidr_block = "0.0.0.0/0"
nat_gateway_id = aws_nat_gateway.gw_b.id
}
}
# Explicitely associate the newly created route tables to the private subnets (so they don't default to the main route table)
resource "aws_route_table_association" "private_a" {
subnet_id = aws_subnet.private_subnet_us_east_1a.id
route_table_id = aws_route_table.private_a.id
}
resource "aws_route_table_association" "private_b" {
subnet_id = aws_subnet.private_subnet_us_east_1b.id
route_table_id = aws_route_table.private_b.id
}
# This is the group you need to edit if you want to restrict access to your application
resource "aws_security_group" "alb_sg" {
name = "audible-blog-us-${var.environment}-lb-sg"
description = "Internet to ALB Security Group"
vpc_id = data.aws_vpc.vpc.id
ingress {
from_port = 80
to_port = 80
protocol = "tcp"
cidr_blocks = ["0.0.0.0/0"]
}
ingress {
from_port = 443
to_port = 443
protocol = "tcp"
cidr_blocks = ["0.0.0.0/0"]
}
egress {
from_port = 0
to_port = 0
protocol = "-1"
cidr_blocks = ["0.0.0.0/0"]
}
tags = {
name = "audible-blog-us-${var.environment}-lb-sg"
}
}
# Traffic to the ECS Cluster should only come from the ALB
resource "aws_security_group" "ecs_tasks_sg" {
name = "audible-blog-us-${var.environment}-ecs-sg"
description = "ALB to ECS Security Group"
vpc_id = data.aws_vpc.vpc.id
ingress {
from_port = 8080
to_port = 8080
protocol = "tcp"
cidr_blocks = ["0.0.0.0/0"]
security_groups = [ aws_security_group.alb_sg.id ]
}
egress {
from_port = 0
to_port = 0
protocol = "-1"
cidr_blocks = ["0.0.0.0/0"]
}
tags = {
name = "audible-blog-us-${var.environment}-ecs-sg"
}
}
resource "aws_alb" "alb" {
name = "audible-blog-us-${var.environment}-alb"
internal = false
load_balancer_type = "application"
subnets = [ aws_subnet.public_subnet_us_east_1a.id, aws_subnet.public_subnet_us_east_1b.id ]
security_groups = [ aws_security_group.alb_sg.id ]
tags = {
name = "audible-blog-us-${var.environment}-alb"
environment = var.environment
}
}
resource "aws_alb_target_group" "target_group" {
name = "audible-blog-us-${var.environment}-target-group"
port = "8080"
protocol = "HTTP"
vpc_id = data.aws_vpc.vpc.id
target_type = "ip"
health_check {
enabled = true
path = "/blog"
interval = 30
matcher = "200-304"
port = "traffic-port"
unhealthy_threshold = 5
}
depends_on = [aws_alb.alb]
}
resource "aws_alb_listener" "web_app_http" {
load_balancer_arn = aws_alb.alb.arn
port = 80
protocol = "HTTP"
depends_on = [aws_alb_target_group.target_group]
default_action {
target_group_arn = aws_alb_target_group.target_group.arn
type = "forward"
}
}
output "networking_details" {
value = {
load_balancer_arn = aws_alb.alb.arn
load_balancer_target_group_arn = aws_alb_target_group.target_group.arn
subnets = [
aws_subnet.private_subnet_us_east_1a.id,
aws_subnet.private_subnet_us_east_1b.id
]
security_group = aws_security_group.ecs_tasks_sg.id
}
}
ECS Fargate module
module "permissions" {
source = "./permissions"
environment = var.environment
}
resource "aws_ecs_cluster" "cluster" {
name = "adl-blog-us-${var.environment}"
}
resource "aws_cloudwatch_log_group" "logs_group" {
name = "/ecs/adl-blog-us-next-${var.environment}"
retention_in_days = 90
}
resource "aws_ecs_task_definition" "task" {
family = "adl-blog-us-task-${var.environment}"
container_definitions = jsonencode([
{
name = "adl-blog-us-next"
image = "536299334720.dkr.ecr.us-east-1.amazonaws.com/adl-blog-us:latest"
portMappings = [
{
containerPort = 8080
hostPort = 8080
},
{
containerPort = 6379
hostPort = 6379
}
]
environment: [
{
"name": "ECS_TASK_FAMILY",
"value": "adl-blog-us-task-${var.environment}"
}
],
logConfiguration: {
logDriver: "awslogs",
options: {
awslogs-group: "/ecs/adl-blog-us-next-${var.environment}",
awslogs-region: "us-east-1",
awslogs-stream-prefix: "ecs"
}
},
healthCheck: {
retries: 3,
command: [
"CMD-SHELL",
"curl -sf http://localhost:8080/blog || exit 1"
],
timeout: 5,
interval: 30,
startPeriod: null
}
}
])
cpu = 256
memory = 512
requires_compatibilities = ["FARGATE"]
network_mode = "awsvpc"
execution_role_arn = module.permissions.task_definition_execution_role_arn
task_role_arn = module.permissions.task_definition_execution_role_arn
}
resource "aws_ecs_service" "service" {
name = "adl-blog-us-task-service-${var.environment}"
cluster = aws_ecs_cluster.cluster.id
deployment_controller {
type = "ECS"
}
deployment_maximum_percent = 200
deployment_minimum_healthy_percent = 50
task_definition = aws_ecs_task_definition.task.family
desired_count = 3
launch_type = "FARGATE"
network_configuration {
subnets = concat(
var.public_alb_networking_details.subnets,
[ var.private_networking_details.subnet.id ]
)
security_groups = [
var.public_alb_networking_details.security_group,
var.private_networking_details.security_group.id
]
assign_public_ip = true
}
load_balancer {
target_group_arn = var.public_alb_networking_details.load_balancer_target_group_arn
container_name = "adl-blog-us-next"
container_port = 8080
}
force_new_deployment = true
lifecycle {
ignore_changes = [desired_count]
}
depends_on = [
module.permissions
]
}
variable "private_networking_details" {}
variable "public_alb_networking_details" {}
variable "environment" {
type = string
}
Your container ports are 8080 and 6379. However your target group says its 80. So you have to double check what are your actual ports that you use on Fargate and adjust your TG accordingly.
There could be other issues as well, which aren't yet apparent. For example, you are opening port 443, but there is no listener for that. So any attempt of using https will fail.

Terraform project to create VPC/IGW, VPN, and private subnet with NATGW - Connectivity Analyzer says no route from NATGW to IGW

https://github.com/phillhocking/aws-network-vpn/tree/1000
I have been trying to figure this out for quite some time, and I really am struggling at understanding why these components can't talk to each other. The VPC has an IGW which is necessary for an EIP for the NATGW, however, nothing on this subnet can get out to the public internet. Everything works fine over the VPN link, but the Connectivity Analyzer indicates there's no connectivity between the NATGW and the IGW as there is not a route - how would I route just the NATGW traffic to the IGW without something like a 0.0.0.0/0 route which already is assigned to the NATGW for this subnet?
Connectivity Analyzer - Route Table has no route to IGW
I know I am just missing something bonehead simple. Look at the repo for how the whole thing goes together, but here's the VPC module:
data "aws_availability_zones" "available" {
state = "available"
}
resource "aws_vpc" "main" {
cidr_block = var.cidr_block
enable_dns_hostnames = true
enable_dns_support = true
tags = {
Name = var.vpc_name
}
}
resource "aws_subnet" "dev" {
count = var.subnet_count
# This line is necessary to ensure that we pick availabiltiy zones that can launch any size ec2 instance
availability_zone = data.aws_availability_zones.available.names[0]
vpc_id = aws_vpc.main.id
cidr_block = cidrsubnet(var.cidr_block, 6, count.index * 2 + 1)
tags = {
Name = "dev-subnet-${count.index}"
}
}
resource "aws_network_acl" "dev" {
vpc_id = aws_vpc.main.id
subnet_ids = aws_subnet.dev[*].id
ingress {
protocol = -1
rule_no = 1000
action = "allow"
#cidr_block = var.prem_network_address_space
cidr_block = "0.0.0.0/0"
from_port = 0
to_port = 0
}
egress {
protocol = -1
rule_no = 100
action = "allow"
cidr_block = "0.0.0.0/0"
from_port = 0
to_port = 0
}
tags = {
Name = "dev-acl"
}
}
# Gateways
resource "aws_internet_gateway" "gw" {
vpc_id = aws_vpc.main.id
tags = {
Name = "${var.vpc_name}-internet-gateway"
}
}
resource "aws_eip" "nat-gw" {
vpc = true
tags = {
Name = "nat-elastic-ip"
}
depends_on = [aws_internet_gateway.gw]
}
resource "aws_nat_gateway" "gw" {
allocation_id = aws_eip.nat-gw.id
subnet_id = aws_subnet.dev[0].id
tags = {
Name = "${var.vpc_name}-nat-gateway-dev"
}
depends_on = [aws_internet_gateway.gw]
}
# VPC Route Table
resource "aws_default_route_table" "default" {
default_route_table_id = aws_vpc.main.main_route_table_id
route {
cidr_block = "0.0.0.0/0"
gateway_id = aws_internet_gateway.gw.id
}
tags = {
Name = "${var.vpc_name}-public"
}
depends_on = [aws_internet_gateway.gw]
}
# dev Subnet Route Table
resource "aws_route_table" "dev" {
vpc_id = aws_vpc.main.id
tags = {
Name = "dev-route-table"
}
}
resource "aws_route_table_association" "dev_routes" {
subnet_id = aws_subnet.dev[0].id
route_table_id = aws_route_table.dev.id
depends_on = [aws_nat_gateway.gw]
}
resource "aws_route" "dev_nat" {
route_table_id = aws_route_table.dev.id
destination_cidr_block = "0.0.0.0/0"
nat_gateway_id = aws_nat_gateway.gw.id
depends_on = [aws_nat_gateway.gw]
You are placing NAT in aws_subnet.dev[0], then you create aws_route_table.dev that you attach to the subnet. What's more the aws_route_table.dev has a route aws_route.dev_nat which points to the NAT.
So basically you are doing some circular routing - all traffic in aws_subnet.dev[0] is direct to NAT in the same subnet, which in turn is directed again to the same NAT.
As you pointed in comment, NAT should be in public subnet, while subnet directing traffic to NAT should be private subnets.

Why can't I connect to my ec2 instance after apply?

I have put together my first terraform script for asset provisioning on AWS. However, I am not able to connect to the EC2 instance in the public subnet
I can see that all of the expected resources are created:
subnets/instances/route tables/gateway etc
I have excluded provider.tf because it contains sensitive secrets.
My region is ap-south-1.
resource "aws_vpc" "vpc1" {
cidr_block = "10.20.0.0/16"
tags = {
name = "tf_vpc"
}
}
# subnets below
resource "aws_subnet" "subnet_public"{
vpc_id = "${aws_vpc.vpc1.id}"
cidr_block = "10.20.10.0/24"
availability_zone = "ap-south-1a"
map_public_ip_on_launch = true
}
resource "aws_subnet" "subnet_private"{
vpc_id = "${aws_vpc.vpc1.id}"
cidr_block = "10.20.20.0/24"
availability_zone = "ap-south-1a"
}
resource "aws_security_group" "sg-web" {
name ="allow80"
description="allows traffic on port 80"
vpc_id ="${aws_vpc.vpc1.id}"
ingress{
from_port = 80
to_port = 80
protocol = "tcp"
cidr_blocks = ["0.0.0.0/0"]
}
ingress{
from_port = 22
to_port = 22
protocol = "tcp"
cidr_blocks = ["0.0.0.0/0"]
}
tags = {
name="allowhttp"
}
}
resource "aws_default_route_table" "public" {
default_route_table_id = "${aws_vpc.vpc1.main_route_table_id}"
tags = {
name = "route-default"
}
}
resource "aws_internet_gateway" "ig"{
vpc_id = "${aws_vpc.vpc1.id}"
}
resource "aws_route_table" "route_public"{
vpc_id = "${aws_vpc.vpc1.id}"
}
resource "aws_route" "r1" {
route_table_id = "${aws_route_table.route_public.id}"
destination_cidr_block = "0.0.0.0/16"
gateway_id = "${aws_internet_gateway.ig.id}"
}
resource "aws_route_table_association" "public" {
subnet_id = "${aws_subnet.subnet_public.id}"
route_table_id = "${aws_route_table.route_public.id}"
}
resource "aws_instance" "ins1_web"{
ami = "ami-0447a12f28fddb066"
instance_type = "t2.micro"
subnet_id = "${aws_subnet.subnet_public.id}"
vpc_security_group_ids = ["${aws_security_group.sg-web.id}"]
key_name = "myBOMkey-2"
tags = {
name="tf-1"
}
}
resource "aws_instance" "ins1_db"{
ami = "ami-0447a12f28fddb066"
instance_type = "t2.micro"
subnet_id = "${aws_subnet.subnet_private.id}"
vpc_security_group_ids = ["${aws_security_group.sg-web.id}"]
key_name = "myBOMkey-2"
tags = {
name="tf-1"
}
}
Why can't I connect to my ec2 instance after apply?
Take a look at the CIDR (0.0.0.0/16), which does not seem to be correct. Might be a typo. Any-IP is represented with "0.0.0.0/0" , as any-IP destination needs to be routed to Internet gateway.
resource "aws_route" "r1" {
route_table_id = "${aws_route_table.route_public.id}"
destination_cidr_block = "0.0.0.0/0"
gateway_id = "${aws_internet_gateway.ig.id}"
}
Also missing from your Security group configuration is egress (outbound )traffic as terraform does not keep ALL traffic allowed as default in outbound traffic. Refer to terraform security group documentation.
egress {
from_port = 0
to_port = 0
protocol = "-1"
cidr_blocks = ["0.0.0.0/0"]
}
Hope this helps !

I'm planning to create 3 aws_vpc and peering using terraform

I'm planning to create 3 aws_vpc and peering using terraform. My problems is all 3 config files are on a different folder. WHenn I Run terraform apply I get this error
Error: resource 'aws_vpc_peering_connection.transit2pco' config: unknown resource 'aws_vpc.Transit-VPC' referenced in variable aws_vpc.Transit-VPC.id
#create a vpc in aws named PCO-VPC-Prod
resource "aws_vpc" "PCO-VPC-Prod" {
cidr_block = "${var.pco_cidr_block}"
enable_dns_support = true
enable_dns_hostnames = true
tags = {
Name = "PCO-VPC-Prod"
}
}
# Create a subnet to launch our instances into
resource "aws_subnet" "PCO-pub-sub-a" {
availability_zone = "us-west-1a"
vpc_id = "${aws_vpc.PCO-VPC-Prod.id}"
cidr_block = "${var.pco-pub-sub-a}"
map_public_ip_on_launch = true
tags {
Name = "PCO-pub-sub-a"
Created = "terraform"
}
}
# Create a subnet to launch our instances into
resource "aws_subnet" "PCO-pub-sub-b" {
availability_zone = "us-west-1b"
vpc_id = "${aws_vpc.PCO-VPC-Prod.id}"
cidr_block = "${var.pco-pub-sub-b}"
map_public_ip_on_launch = true
tags {
Name = "PCO-pub-sub-a"
Created = "terraform"
}
}
# Create a subnet to launch our instances into
resource "aws_subnet" "PCO-priv-sub-a" {
availability_zone = "us-west-1a"
vpc_id = "${aws_vpc.PCO-VPC-Prod.id}"
cidr_block = "${var.pco-priv-sub-a}"
map_public_ip_on_launch = false
tags {
Name = "PCO-priv-sub-a"
Created = "terraform"
}
}
# Create a subnet to launch our instances into
resource "aws_subnet" "PCO-priv-sub-b" {
availability_zone = "us-west-1b"
vpc_id = "${aws_vpc.PCO-VPC-Prod.id}"
cidr_block = "${var.pco-priv-sub-b}"
map_public_ip_on_launch = false
tags {
Name = "PCO-priv-sub-a"
Created = "terraform"
}
}
#create the public route table
resource "aws_route_table" "PCO-rt-pub" {
vpc_id = "${aws_vpc.PCO-VPC-Prod.id}"
tags {
Name = "Pco Public route table"
}
}
#create the private route table
resource "aws_route_table" "PCO-rt-priv" {
vpc_id = "${aws_vpc.PCO-VPC-Prod.id}"
tags {
Name = "Pco Private route table"
}
}
# Associate subnet PCO-pub-sub-a to public route table
resource "aws_route_table_association" "PCO-pub-sub-a-association" {
subnet_id = "${aws_subnet.PCO-pub-sub-a.id}"
route_table_id = "${aws_vpc.PCO-VPC-Prod.main_route_table_id}"
}
# Associate subnet PCO-pub-sub-b to public route table
resource "aws_route_table_association" "PCO-pub-sub-b-association" {
subnet_id = "${aws_subnet.PCO-pub-sub-b.id}"
route_table_id = "${aws_route_table.PCO-rt-pub.id}"
}
# Associate subnet PCO-priv-sub-a to private route table
resource "aws_route_table_association" "PCO-priv-sub-a-association" {
subnet_id = "${aws_subnet.PCO-priv-sub-a.id}"
route_table_id = "${aws_route_table.PCO-rt-priv.id}"
}
# Associate subnet PCO-priv-sub-b to private route table
resource "aws_route_table_association" "PCO-priv-sub-b-association" {
subnet_id = "${aws_subnet.PCO-priv-sub-b.id}"
route_table_id = "${aws_route_table.PCO-rt-priv.id}"
}
resource "aws_security_group" "PCO_public_subnet_security_group" {
name = "PCO_public_sg"
description = "PCO_public_sg"
vpc_id = "${aws_vpc.PCO-VPC-Prod.id}"
tags = { Name = "PCO_public_sg"}
ingress {
from_port = 22
to_port = 22
protocol = "tcp"
cidr_blocks = ["0.0.0.0/0"]
}
ingress {
from_port = 0
to_port = 0
protocol = "-1"
cidr_blocks = ["${var.pco-priv-sub-a}"]
}
egress {
from_port = 0
to_port = 0
protocol = "-1"
cidr_blocks = ["0.0.0.0/0"]
}
}
resource "aws_security_group" "PCO_private_subnet_security_group" {
name = "vpc2_private_sg"
description = "vpc2_private_sg"
vpc_id = "${aws_vpc.PCO-VPC-Prod.id}"
tags = { Name = "vpc2_private_sg"}
ingress {
from_port = 0
to_port = 0
protocol = "-1"
cidr_blocks = ["${var.pco-pub-sub-a}"]
}
ingress {
from_port = 0
to_port = 0
protocol = "-1"
cidr_blocks = ["${var.transit-priv-sub-a}"]
}
egress {
from_port = 0
to_port = 0
protocol = "-1"
cidr_blocks = ["0.0.0.0/0"]
}
}
resource "aws_instance" "pco_public_instance" {
ami = "ami-b2527ad2"
instance_type = "t2.micro"
vpc_security_group_ids = ["${aws_security_group.PCO_public_subnet_security_group.id}"]
subnet_id = "${aws_subnet.PCO-pub-sub-a.id}"
tags {
Name = "pco"
}
}
resource "aws_instance" "pco_private_instance" {
ami = "ami-b2527ad2"
instance_type = "t2.micro"
vpc_security_group_ids = ["${aws_security_group.PCO_private_subnet_security_group.id}"]
subnet_id = "${aws_subnet.PCO-priv-sub-a.id}"
tags {
Name = "pco2"
}
}
/**
* VPC peering connection.
*
* Establishes a relationship resource between the transit and tx VPC.
*/
resource "aws_vpc_peering_connection" "transit2tx" {
peer_vpc_id = "${aws_vpc.TX-VPC-Prod.id}"
vpc_id = "${aws_vpc.Transit-VPC.id}"
auto_accept = true
accepter {
allow_remote_vpc_dns_resolution = true
}
requester {
allow_remote_vpc_dns_resolution = true
}
}
/**
* Route rule.
*
* Creates a new route rule on the "transit" VPC main route table. All requests
* to the "tx" VPC's IP range will be directed to the VPC peering
* connection.
*/
resource "aws_route" "transit2tx" {
route_table_id = "${aws_vpc.Transit-VPC.main_route_table_id}"
destination_cidr_block = "${aws_vpc.TX-VPC-Prod.cidr_block}"
vpc_peering_connection_id = "${aws_vpc_peering_connection.transit2tx.id}"
}
/**
* Route rule.
*
* Creates a new route rule on the "pco" VPC main route table. All
* requests to the "pco" VPC's IP range will be directed to the VPC
* peering connection.
*/
resource "aws_route" "tx2transit" {
route_table_id = "${aws_vpc.TX-VPC-Prod.main_route_table_id}"
destination_cidr_block = "${aws_vpc.Transit-VPC.cidr_block}"
vpc_peering_connection_id = "${aws_vpc_peering_connection.transit2tx.id}"
}
I believe you'll need to use a Data Source to reference "Transit-VPC"
https://www.terraform.io/docs/providers/aws/d/vpc.html