Why can't I connect to my ec2 instance after apply? - amazon-web-services

I have put together my first terraform script for asset provisioning on AWS. However, I am not able to connect to the EC2 instance in the public subnet
I can see that all of the expected resources are created:
subnets/instances/route tables/gateway etc
I have excluded provider.tf because it contains sensitive secrets.
My region is ap-south-1.
resource "aws_vpc" "vpc1" {
cidr_block = "10.20.0.0/16"
tags = {
name = "tf_vpc"
}
}
# subnets below
resource "aws_subnet" "subnet_public"{
vpc_id = "${aws_vpc.vpc1.id}"
cidr_block = "10.20.10.0/24"
availability_zone = "ap-south-1a"
map_public_ip_on_launch = true
}
resource "aws_subnet" "subnet_private"{
vpc_id = "${aws_vpc.vpc1.id}"
cidr_block = "10.20.20.0/24"
availability_zone = "ap-south-1a"
}
resource "aws_security_group" "sg-web" {
name ="allow80"
description="allows traffic on port 80"
vpc_id ="${aws_vpc.vpc1.id}"
ingress{
from_port = 80
to_port = 80
protocol = "tcp"
cidr_blocks = ["0.0.0.0/0"]
}
ingress{
from_port = 22
to_port = 22
protocol = "tcp"
cidr_blocks = ["0.0.0.0/0"]
}
tags = {
name="allowhttp"
}
}
resource "aws_default_route_table" "public" {
default_route_table_id = "${aws_vpc.vpc1.main_route_table_id}"
tags = {
name = "route-default"
}
}
resource "aws_internet_gateway" "ig"{
vpc_id = "${aws_vpc.vpc1.id}"
}
resource "aws_route_table" "route_public"{
vpc_id = "${aws_vpc.vpc1.id}"
}
resource "aws_route" "r1" {
route_table_id = "${aws_route_table.route_public.id}"
destination_cidr_block = "0.0.0.0/16"
gateway_id = "${aws_internet_gateway.ig.id}"
}
resource "aws_route_table_association" "public" {
subnet_id = "${aws_subnet.subnet_public.id}"
route_table_id = "${aws_route_table.route_public.id}"
}
resource "aws_instance" "ins1_web"{
ami = "ami-0447a12f28fddb066"
instance_type = "t2.micro"
subnet_id = "${aws_subnet.subnet_public.id}"
vpc_security_group_ids = ["${aws_security_group.sg-web.id}"]
key_name = "myBOMkey-2"
tags = {
name="tf-1"
}
}
resource "aws_instance" "ins1_db"{
ami = "ami-0447a12f28fddb066"
instance_type = "t2.micro"
subnet_id = "${aws_subnet.subnet_private.id}"
vpc_security_group_ids = ["${aws_security_group.sg-web.id}"]
key_name = "myBOMkey-2"
tags = {
name="tf-1"
}
}
Why can't I connect to my ec2 instance after apply?

Take a look at the CIDR (0.0.0.0/16), which does not seem to be correct. Might be a typo. Any-IP is represented with "0.0.0.0/0" , as any-IP destination needs to be routed to Internet gateway.
resource "aws_route" "r1" {
route_table_id = "${aws_route_table.route_public.id}"
destination_cidr_block = "0.0.0.0/0"
gateway_id = "${aws_internet_gateway.ig.id}"
}
Also missing from your Security group configuration is egress (outbound )traffic as terraform does not keep ALL traffic allowed as default in outbound traffic. Refer to terraform security group documentation.
egress {
from_port = 0
to_port = 0
protocol = "-1"
cidr_blocks = ["0.0.0.0/0"]
}
Hope this helps !

Related

ALB health check failing with 502 error with my terraform config

Hi i'm beginner and i'm trying to play with VPC on AWS and Terraform and i'm stuck on ALB health check issue
I have 2 az with a ec2 and a webserver on each ec2 my goal is to setup the load balancer and be able to be redirected on one of my ec2, my ec2 are on a private subnet and a Nat Gatway and elastic IP
I've tried to setup a bastion host to check on the ssh if my ec2 was well link to internet and the answer is yes
this is my setup terraform : ( baybe there is an obvious error that i haven't seen )
terraform {
required_providers {
aws = {
source = "hashicorp/aws"
version = "~> 4.0"
}
}
}
provider "aws" {
shared_credentials_file = "./aws/credentials"
region = "us-east-1"
}
resource "aws_vpc" "main" {
cidr_block = "10.0.0.0/16"
enable_dns_hostnames = true
enable_dns_support = true
tags = {
Name = "my-vpc"
}
}
resource "aws_internet_gateway" "main" {
vpc_id = aws_vpc.main.id
tags = {
Name = "my-internet-gateway"
}
}
resource "aws_subnet" "public_a" {
vpc_id = aws_vpc.main.id
cidr_block = "10.0.1.0/24"
availability_zone = "us-east-1a"
map_public_ip_on_launch = true
tags = {
Name = "my-public-a-subnet"
}
}
resource "aws_subnet" "public_b" {
vpc_id = aws_vpc.main.id
cidr_block = "10.0.2.0/24"
availability_zone = "us-east-1b"
map_public_ip_on_launch = true
tags = {
Name = "my-public-b-subnet"
}
}
resource "aws_subnet" "private_a" {
vpc_id = aws_vpc.main.id
cidr_block = "10.0.3.0/24"
availability_zone = "us-east-1a"
tags = {
Name = "my-private-a-subnet"
}
}
resource "aws_subnet" "private_b" {
vpc_id = aws_vpc.main.id
cidr_block = "10.0.4.0/24"
availability_zone = "us-east-1b"
tags = {
Name = "my-private-b-subnet"
}
}
resource "aws_nat_gateway" "main" {
allocation_id = aws_eip.main.id
subnet_id = aws_subnet.public_a.id
}
resource "aws_eip" "main" {
vpc = true
tags = {
Name = "my-nat-gateway-eip"
}
}
resource "aws_security_group" "main" {
name = "my-security-group"
description = "Allow HTTP and SSH access"
vpc_id = aws_vpc.main.id
ingress {
from_port = 80
to_port = 80
protocol = "tcp"
cidr_blocks = ["0.0.0.0/0"]
}
ingress {
from_port = 22
to_port = 22
protocol = "tcp"
cidr_blocks = ["0.0.0.0/0"]
}
egress {
from_port = 0
to_port = 0
protocol = "-1"
cidr_blocks = ["0.0.0.0/0"]
}
tags = {
Name = "my-security-group"
}
}
resource "aws_instance" "ec2_a" {
ami = "ami-0c2b8ca1dad447f8a"
instance_type = "t2.micro"
subnet_id = aws_subnet.private_a.id
vpc_security_group_ids = [aws_security_group.main.id]
tags = {
Name = "my-ec2-a"
}
key_name = "vockey"
user_data = file("user_data.sh")
}
resource "aws_instance" "ec2_b" {
ami = "ami-0c2b8ca1dad447f8a"
instance_type = "t2.micro"
subnet_id = aws_subnet.private_b.id
vpc_security_group_ids = [aws_security_group.main.id]
tags = {
Name = "my-ec2-b"
}
key_name = "vockey"
user_data = file("user_data.sh")
}
resource "aws_instance" "bastion" {
ami = "ami-0c2b8ca1dad447f8a"
instance_type = "t2.micro"
subnet_id = aws_subnet.public_a.id
vpc_security_group_ids = [aws_security_group.main.id]
tags = {
Name = "my-bastion"
}
key_name = "vockey"
user_data = file("user_data_bastion.sh")
}
resource "aws_alb" "main" {
name = "my-alb"
internal = false
security_groups = [aws_security_group.main.id]
subnets = [aws_subnet.public_a.id, aws_subnet.public_b.id]
tags = {
Name = "my-alb"
}
}
resource "aws_alb_target_group" "ec2" {
name = "my-alb-target-group"
port = 80
protocol = "HTTP"
vpc_id = aws_vpc.main.id
tags = {
Name = "my-alb-target-group"
}
}
resource "aws_route_table" "private" {
vpc_id = aws_vpc.main.id
route {
cidr_block = "0.0.0.0/0"
gateway_id = aws_internet_gateway.main.id
}
route {
cidr_block = "0.0.0.0/0"
gateway_id = aws_nat_gateway.main.id
}
tags = {
Name = "my-private-route-table"
}
}
resource "aws_route_table_association" "private_a" {
subnet_id = aws_subnet.private_a.id
route_table_id = aws_route_table.private.id
}
resource "aws_route_table_association" "private_b" {
subnet_id = aws_subnet.private_b.id
route_table_id = aws_route_table.private.id
}
resource "aws_route_table" "public" {
vpc_id = aws_vpc.main.id
route {
cidr_block = "0.0.0.0/0"
gateway_id = aws_internet_gateway.main.id
}
tags = {
Name = "My Public Route Table"
}
}
resource "aws_route_table_association" "public_a" {
subnet_id = aws_subnet.public_a.id
route_table_id = aws_route_table.public.id
}
resource "aws_route_table_association" "public_b" {
subnet_id = aws_subnet.public_b.id
route_table_id = aws_route_table.public.id
}
resource "aws_alb_listener" "main" {
load_balancer_arn = aws_alb.main.arn
port = "80"
protocol = "HTTP"
default_action {
target_group_arn = aws_alb_target_group.ec2.arn
type = "forward"
}
}
resource "aws_alb_target_group_attachment" "ec2_a" {
target_group_arn = aws_alb_target_group.ec2.arn
target_id = aws_instance.ec2_a.id
port = 80
}
resource "aws_alb_target_group_attachment" "ec2_b" {
target_group_arn = aws_alb_target_group.ec2.arn
target_id = aws_instance.ec2_b.id
port = 80
}
It looks like you don't have a health_check block on the aws_alb_target_group resource. Try adding something like this:
resource "aws_alb_target_group" "ec2" {
name = "my-alb-target-group"
port = 80
protocol = "HTTP"
vpc_id = aws_vpc.main.id
health_check {
path = "/"
matcher = "200"
}
tags = {
Name = "my-alb-target-group"
}
}
Also, make sure that the HTTP services on your EC2 instances are listening and accepting connections on port 80. You should be able to curl http://<ec2 ip address> with a 200 response.

EC2 with two interfaces (one public and one private)

I basically want a EC2 with one interface in public subnet, one in private. I should be able to SSH through the public interface from outside world, private network will be used to communicate to database in private network.
In terraform...
I created a VPC, two subnets (one pubilc, one private) in that VPC.
Created a Internet Gateway, a route table in the vpc with route 0.0.0.0/0 to IGW
Created a route association between with public subnet and routing table.
Created a EC2 instance with one interface in public subnet and one in private subnet.
Created a Elastic IP address on public interface of EC2
Created a Security group allowing ssh port (22)
Im unable to SSH, connection timeout, could anyone take a look at this?
conf_vpc_cidr_block = "10.100.0.0/16"
conf_vpc_enable_dns_support = "true"
conf_vpc_enable_dns_hostnames = "true"
conf_vpc_enable_classiclink = "false"
conf_vpc_instance_tenancy = "default"
conf_private_subnet_cidr_block = "10.100.100.0/24"
conf_private_subnet_map_public_ip_on_launch = "false"
conf_private_subnet_availability_zone = "eu-north-1a"
conf_public_subnet_cidr_block = "10.100.200.0/24"
conf_public_subnet_map_public_ip_on_launch = "true"
conf_public_subnet_availability_zone = "eu-north-1a"
conf_instance_ami = "ami-000e50175c5f86214"
conf_instance_type = "t3.micro"
provider "aws" {
region = var.conf_aws_region
profile = var.conf_aws_profile
}
resource "aws_vpc" "Terraform_XX_VPC" {
cidr_block = var.conf_vpc_cidr_block
enable_dns_support = var.conf_vpc_enable_dns_support
enable_dns_hostnames = var.conf_vpc_enable_dns_hostnames
enable_classiclink = var.conf_vpc_enable_classiclink
instance_tenancy = var.conf_vpc_instance_tenancy
}
resource "aws_subnet" "Terraform_XX_Private_SN" {
vpc_id = "${aws_vpc.Terraform_XX_VPC.id}"
cidr_block = var.conf_private_subnet_cidr_block
map_public_ip_on_launch = var.conf_private_subnet_map_public_ip_on_launch
availability_zone = var.conf_private_subnet_availability_zone
}
resource "aws_subnet" "Terraform_XX_Public_SN" {
vpc_id = "${aws_vpc.Terraform_XX_VPC.id}"
cidr_block = var.conf_public_subnet_cidr_block
map_public_ip_on_launch = var.conf_public_subnet_map_public_ip_on_launch
availability_zone = var.conf_public_subnet_availability_zone
}
resource "aws_internet_gateway" "Terraform_XX_IGW" {
vpc_id = "${aws_vpc.Terraform_XX_VPC.id}"
}
resource "aws_route_table" "Terraform_XX_Route_Public" {
vpc_id = "${aws_vpc.Terraform_XX_VPC.id}"
route {
cidr_block = "0.0.0.0/0"
gateway_id = "${aws_internet_gateway.Terraform_XX_IGW.id}"
}
}
resource "aws_route_table_association" "Terraform_XX_Route_Table_Public_Association" {
subnet_id = "${aws_subnet.Terraform_XX_Public_SN.id}"
route_table_id = "${aws_route_table.Terraform_XX_Route_Public.id}"
}
resource "aws_security_group" "Terraform_XX_SG_ALLOW_SSH" {
name = "Terraform_XX_SG_ALLOW_SSH"
description = "Allow SSH"
vpc_id = "${aws_vpc.Terraform_XX_VPC.id}"
ingress {
from_port = 22
to_port = 22
protocol = "tcp"
cidr_blocks = ["0.0.0.0/0"]
}
egress {
from_port = 0
to_port = 0
protocol = "-1"
cidr_blocks = ["0.0.0.0/0"]
}
}
resource "aws_instance" "Terraform_XX_EC2" {
ami = var.conf_instance_ami
instance_type = var.conf_instance_type
key_name = "${local.environmentLC}"
network_interface {
device_index = 0
network_interface_id = "${aws_network_interface.Terraform_XX_EC2_Private_Intf.id}"
#delete_on_termination = true
}
network_interface {
device_index = 1
network_interface_id = "${aws_network_interface.Terraform_XX_EC2_Public_Intf.id}"
#delete_on_termination = true
}
}
resource "aws_network_interface" "Terraform_XX_EC2_Private_Intf" {
subnet_id = "${aws_subnet.Terraform_XX_Private_SN.id}"
private_ips = ["10.100.100.5"]
security_groups = [aws_security_group.Terraform_XX_SG_ALLOW_SSH.id]
}
resource "aws_network_interface" "Terraform_XX_EC2_Public_Intf" {
subnet_id = "${aws_subnet.Terraform_XX_Public_SN.id}"
private_ips = ["10.100.200.5"]
security_groups = [aws_security_group.Terraform_XX_SG_ALLOW_SSH.id]
}
resource "aws_eip" "Terraform_XX_EC2_EIP" {
vpc = true
network_interface = "${aws_network_interface.Terraform_XX_EC2_Public_Intf.id}"
}
I was able to solve this by exchanging the device_index for public and private interface

Terraform shows `InvalidGroup.NotFound` while creating an EC2 instance

I am trying to deploy EC2 instances using Terrafom and I can see the following error:
Error: Error launching source instance: InvalidGroup.NotFound: The security group 'prod-web-servers-sg' does not exist in VPC 'vpc-db3a3cb3'
Here is the Terraform template I'm using:
resource "aws_default_vpc" "default" {
}
resource "aws_security_group" "prod-web-servers-sg" {
name = "prod-web-servers-sg"
description = "security group for production grade web servers"
vpc_id = "${aws_default_vpc.default.id}"
ingress {
from_port = 80
to_port = 80
protocol = "tcp"
cidr_blocks = ["0.0.0.0/0"]
}
ingress {
from_port = 443
to_port = 443
protocol = "tcp"
cidr_blocks = ["0.0.0.0/0"]
}
}
#Subnet
resource "aws_subnet" "private_subnet" {
vpc_id = "${aws_default_vpc.default.id}"
cidr_block = "172.31.0.0/24"
availability_zone = "ap-south-1a"
}
resource "aws_instance" "prod-web-server" {
ami = "ami-04b1ddd35fd71475a"
count = 2
key_name = "test_key"
instance_type = "r5.large"
security_groups = ["prod-web-servers-sg"]
subnet_id = "${aws_subnet.private_subnet.id}"
}
You have a race condition there because Terraform doesn't know to wait until the security group is created before creating the instance.
To fix this you should interpolate the aws_security_group.prod-web-servers-sg.id into aws_instance.prod-web-server resource so that it can work out the dependency chain between the resources. You should also use vpc_security_group_ids instead of security_groups as mentioned in the aws_instance resource documentation:
security_groups - (Optional, EC2-Classic and default VPC only) A list of security group names (EC2-Classic) or IDs (default VPC) to associate with.
NOTE:
If you are creating Instances in a VPC, use vpc_security_group_ids instead.
So you should have something like the following:
resource "aws_default_vpc" "default" {}
resource "aws_security_group" "prod-web-servers-sg" {
name = "prod-web-servers-sg"
description = "security group for production grade web servers"
vpc_id = aws_default_vpc.default.id
ingress {
from_port = 80
to_port = 80
protocol = "tcp"
cidr_blocks = ["0.0.0.0/0"]
}
ingress {
from_port = 443
to_port = 443
protocol = "tcp"
cidr_blocks = ["0.0.0.0/0"]
}
}
#Subnet
resource "aws_subnet" "private_subnet" {
vpc_id = aws_default_vpc.default.id
cidr_block = "172.31.0.0/24"
availability_zone = "ap-south-1a"
}
resource "aws_instance" "prod-web-server" {
ami = "ami-04b1ddd35fd71475a"
count = 2
key_name = "test_key"
instance_type = "r5.large"
vpc_security_group_ids = [aws_security_group.prod-web-servers-sg.id]
subnet_id = aws_subnet.private_subnet.id
}

Terraform AWS not able to ping, or ssh just created EC2 instances

I would like to ask for assistance.
I wrote terraform script which is creating 5 EC2 instances but I am not able to ping or SSH them.
Do you see any potential issue with this? I have opened icmp, ssh, not when I checked form other computers/sites I get port is closed.
When I create manually EC2 is working from my computer, I am able to ssh/ping, but not with this terraform script.
provider "aws" {
version = "~> 3.0"
region = "us-east-1"
access_key = "AKxxxxxxxxxxx"
secret_key = "2CLBj/s9dC5r52Y"
}
# Create a VPC
resource "aws_vpc" "BrokenByteVPC" {
cidr_block = "192.168.100.0/28"
tags = {
Name = "BrokenByteVPC"
}
}
resource "aws_subnet" "BrokenbyteLB-subnet" {
vpc_id = aws_vpc.BrokenByteVPC.id
cidr_block = "192.168.100.0/28"
availability_zone = "us-east-1a"
tags = {
Name = "BrokenbyteLB-subnet"
}
}
resource "aws_internet_gateway" "BrokenByte-gateway" {
vpc_id = aws_vpc.BrokenByteVPC.id
tags = {
Name = "BrokenByte-gateway"
}
}
resource "aws_route_table" "BrokenByte-Route-table" {
vpc_id = aws_vpc.BrokenByteVPC.id
route {
cidr_block = "0.0.0.0/0"
gateway_id = aws_internet_gateway.BrokenByte-gateway.id
}
}
resource "aws_route_table_association" "a" {
subnet_id = aws_subnet.BrokenbyteLB-subnet.id
route_table_id = aws_route_table.BrokenByte-Route-table.id
}
resource "aws_security_group" "allow_traffic" {
name = "allow_Traffic"
description = "Allow SSH,HTTP and HTTPS inbound traffic"
vpc_id = aws_vpc.BrokenByteVPC.id
ingress {
description = "Dozvoli SVEEEEEEEE"
from_port = 0
to_port = 0
protocol = "-1"
cidr_blocks = ["0.0.0.0/0"]
}
ingress {
description = "SSH traffic"
from_port = 22
to_port = 22
protocol = "tcp"
cidr_blocks = ["0.0.0.0/0"]
}
ingress {
description = "HTTP traffic"
from_port = 80
to_port = 80
protocol = "tcp"
cidr_blocks = ["0.0.0.0/0"]
}
ingress {
description = "HTTPS traffic"
from_port = 443
to_port = 443
protocol = "tcp"
cidr_blocks = ["0.0.0.0/0"]
}
egress {
from_port = 0
to_port = 0
protocol = "-1"
cidr_blocks = ["0.0.0.0/0"]
}
tags = {
Name = "Allow_ssh_http_https"
}
}
resource "aws_network_interface" "NginX-public" {
subnet_id = aws_subnet.BrokenbyteLB-subnet.id
#private_ips = ["192.168.100.2"]
security_groups = [aws_security_group.allow_traffic.id]
}
resource "aws_network_interface" "NginX-LB" {
subnet_id = aws_subnet.BrokenbyteLB-subnet.id
private_ips = ["192.168.100.10"]
security_groups = [aws_security_group.allow_traffic.id]
}
resource "aws_network_interface" "www1" {
subnet_id = aws_subnet.BrokenbyteLB-subnet.id
private_ips = ["192.168.100.11"]
security_groups = [aws_security_group.allow_traffic.id]
}
resource "aws_network_interface" "www2" {
subnet_id = aws_subnet.BrokenbyteLB-subnet.id
private_ips = ["192.168.100.12"]
security_groups = [aws_security_group.allow_traffic.id]
}
resource "aws_network_interface" "www3" {
subnet_id = aws_subnet.BrokenbyteLB-subnet.id
private_ips = ["192.168.100.13"]
security_groups = [aws_security_group.allow_traffic.id]
}
resource "aws_eip" "BrokenByte-PublicIP" {
vpc = true
network_interface = aws_network_interface.NginX-public.id
#associate_with_private_ip = "192.168.100.10"
depends_on = [aws_internet_gateway.BrokenByte-gateway, aws_instance.BrokenByteNginX]
}
resource "aws_instance" "BrokenByteNginX" {
ami = "ami-0dba2cb6798deb6d8"
availability_zone = "us-east-1a"
instance_type = "t2.micro"
key_name = "aws_test"
network_interface {
device_index=0
network_interface_id = aws_network_interface.NginX-LB.id
}
network_interface {
device_index=1
network_interface_id = aws_network_interface.NginX-public.id
}
tags = {
Name = "BrokenByteNginXLB"
}
user_data = <<-EOF
#!/bin/bash
sudo apt-get update -y
EOF
}
resource "aws_instance" "BrokenByteWWW1" {
ami = "ami-0dba2cb6798deb6d8"
availability_zone = "us-east-1a"
instance_type = "t2.micro"
key_name = "aws_test"
network_interface {
device_index=0
network_interface_id = aws_network_interface.www1.id
}
tags = {
Name = "BrokenByteWWW1"
}
}
resource "aws_instance" "BrokenByteWWW2" {
ami = "ami-0dba2cb6798deb6d8"
availability_zone = "us-east-1a"
instance_type = "t2.micro"
key_name = "aws_test"
network_interface {
device_index=0
network_interface_id = aws_network_interface.www2.id
}
tags = {
Name = "BrokenByteWWW2"
}
}
resource "aws_instance" "BrokenByteWWW3" {
ami = "ami-0dba2cb6798deb6d8"
availability_zone = "us-east-1a"
instance_type = "t2.micro"
key_name = "aws_test"
network_interface {
device_index=0
network_interface_id = aws_network_interface.www3.id
}
tags = {
Name = "BrokenByteWWW3"
}
}
None of your instances have public IP address (except the one with aws_eip.BrokenByte-PublicIP), since your public subnet is missing map_public_ip_on_launch. You can rectify the issue by:
resource "aws_subnet" "BrokenbyteLB-subnet" {
vpc_id = aws_vpc.BrokenByteVPC.id
cidr_block = "192.168.100.0/28"
availability_zone = "us-east-1a"
map_public_ip_on_launch = true
tags = {
Name = "BrokenbyteLB-subnet"
}
}
I was sure is something related to network card, but wasn't sure what.
Now is fine, I can ping and SSH, just swapped public IP to be network 0, and I removed code for network.
#Marcin, your first reply showed me in which direction to look.
# network_interface {
# device_index=0
# network_interface_id = aws_network_interface.NginX-LB.id
# }
network_interface {
device_index=0
network_interface_id = aws_network_interface.NginX-public.id
}

I'm planning to create 3 aws_vpc and peering using terraform

I'm planning to create 3 aws_vpc and peering using terraform. My problems is all 3 config files are on a different folder. WHenn I Run terraform apply I get this error
Error: resource 'aws_vpc_peering_connection.transit2pco' config: unknown resource 'aws_vpc.Transit-VPC' referenced in variable aws_vpc.Transit-VPC.id
#create a vpc in aws named PCO-VPC-Prod
resource "aws_vpc" "PCO-VPC-Prod" {
cidr_block = "${var.pco_cidr_block}"
enable_dns_support = true
enable_dns_hostnames = true
tags = {
Name = "PCO-VPC-Prod"
}
}
# Create a subnet to launch our instances into
resource "aws_subnet" "PCO-pub-sub-a" {
availability_zone = "us-west-1a"
vpc_id = "${aws_vpc.PCO-VPC-Prod.id}"
cidr_block = "${var.pco-pub-sub-a}"
map_public_ip_on_launch = true
tags {
Name = "PCO-pub-sub-a"
Created = "terraform"
}
}
# Create a subnet to launch our instances into
resource "aws_subnet" "PCO-pub-sub-b" {
availability_zone = "us-west-1b"
vpc_id = "${aws_vpc.PCO-VPC-Prod.id}"
cidr_block = "${var.pco-pub-sub-b}"
map_public_ip_on_launch = true
tags {
Name = "PCO-pub-sub-a"
Created = "terraform"
}
}
# Create a subnet to launch our instances into
resource "aws_subnet" "PCO-priv-sub-a" {
availability_zone = "us-west-1a"
vpc_id = "${aws_vpc.PCO-VPC-Prod.id}"
cidr_block = "${var.pco-priv-sub-a}"
map_public_ip_on_launch = false
tags {
Name = "PCO-priv-sub-a"
Created = "terraform"
}
}
# Create a subnet to launch our instances into
resource "aws_subnet" "PCO-priv-sub-b" {
availability_zone = "us-west-1b"
vpc_id = "${aws_vpc.PCO-VPC-Prod.id}"
cidr_block = "${var.pco-priv-sub-b}"
map_public_ip_on_launch = false
tags {
Name = "PCO-priv-sub-a"
Created = "terraform"
}
}
#create the public route table
resource "aws_route_table" "PCO-rt-pub" {
vpc_id = "${aws_vpc.PCO-VPC-Prod.id}"
tags {
Name = "Pco Public route table"
}
}
#create the private route table
resource "aws_route_table" "PCO-rt-priv" {
vpc_id = "${aws_vpc.PCO-VPC-Prod.id}"
tags {
Name = "Pco Private route table"
}
}
# Associate subnet PCO-pub-sub-a to public route table
resource "aws_route_table_association" "PCO-pub-sub-a-association" {
subnet_id = "${aws_subnet.PCO-pub-sub-a.id}"
route_table_id = "${aws_vpc.PCO-VPC-Prod.main_route_table_id}"
}
# Associate subnet PCO-pub-sub-b to public route table
resource "aws_route_table_association" "PCO-pub-sub-b-association" {
subnet_id = "${aws_subnet.PCO-pub-sub-b.id}"
route_table_id = "${aws_route_table.PCO-rt-pub.id}"
}
# Associate subnet PCO-priv-sub-a to private route table
resource "aws_route_table_association" "PCO-priv-sub-a-association" {
subnet_id = "${aws_subnet.PCO-priv-sub-a.id}"
route_table_id = "${aws_route_table.PCO-rt-priv.id}"
}
# Associate subnet PCO-priv-sub-b to private route table
resource "aws_route_table_association" "PCO-priv-sub-b-association" {
subnet_id = "${aws_subnet.PCO-priv-sub-b.id}"
route_table_id = "${aws_route_table.PCO-rt-priv.id}"
}
resource "aws_security_group" "PCO_public_subnet_security_group" {
name = "PCO_public_sg"
description = "PCO_public_sg"
vpc_id = "${aws_vpc.PCO-VPC-Prod.id}"
tags = { Name = "PCO_public_sg"}
ingress {
from_port = 22
to_port = 22
protocol = "tcp"
cidr_blocks = ["0.0.0.0/0"]
}
ingress {
from_port = 0
to_port = 0
protocol = "-1"
cidr_blocks = ["${var.pco-priv-sub-a}"]
}
egress {
from_port = 0
to_port = 0
protocol = "-1"
cidr_blocks = ["0.0.0.0/0"]
}
}
resource "aws_security_group" "PCO_private_subnet_security_group" {
name = "vpc2_private_sg"
description = "vpc2_private_sg"
vpc_id = "${aws_vpc.PCO-VPC-Prod.id}"
tags = { Name = "vpc2_private_sg"}
ingress {
from_port = 0
to_port = 0
protocol = "-1"
cidr_blocks = ["${var.pco-pub-sub-a}"]
}
ingress {
from_port = 0
to_port = 0
protocol = "-1"
cidr_blocks = ["${var.transit-priv-sub-a}"]
}
egress {
from_port = 0
to_port = 0
protocol = "-1"
cidr_blocks = ["0.0.0.0/0"]
}
}
resource "aws_instance" "pco_public_instance" {
ami = "ami-b2527ad2"
instance_type = "t2.micro"
vpc_security_group_ids = ["${aws_security_group.PCO_public_subnet_security_group.id}"]
subnet_id = "${aws_subnet.PCO-pub-sub-a.id}"
tags {
Name = "pco"
}
}
resource "aws_instance" "pco_private_instance" {
ami = "ami-b2527ad2"
instance_type = "t2.micro"
vpc_security_group_ids = ["${aws_security_group.PCO_private_subnet_security_group.id}"]
subnet_id = "${aws_subnet.PCO-priv-sub-a.id}"
tags {
Name = "pco2"
}
}
/**
* VPC peering connection.
*
* Establishes a relationship resource between the transit and tx VPC.
*/
resource "aws_vpc_peering_connection" "transit2tx" {
peer_vpc_id = "${aws_vpc.TX-VPC-Prod.id}"
vpc_id = "${aws_vpc.Transit-VPC.id}"
auto_accept = true
accepter {
allow_remote_vpc_dns_resolution = true
}
requester {
allow_remote_vpc_dns_resolution = true
}
}
/**
* Route rule.
*
* Creates a new route rule on the "transit" VPC main route table. All requests
* to the "tx" VPC's IP range will be directed to the VPC peering
* connection.
*/
resource "aws_route" "transit2tx" {
route_table_id = "${aws_vpc.Transit-VPC.main_route_table_id}"
destination_cidr_block = "${aws_vpc.TX-VPC-Prod.cidr_block}"
vpc_peering_connection_id = "${aws_vpc_peering_connection.transit2tx.id}"
}
/**
* Route rule.
*
* Creates a new route rule on the "pco" VPC main route table. All
* requests to the "pco" VPC's IP range will be directed to the VPC
* peering connection.
*/
resource "aws_route" "tx2transit" {
route_table_id = "${aws_vpc.TX-VPC-Prod.main_route_table_id}"
destination_cidr_block = "${aws_vpc.Transit-VPC.cidr_block}"
vpc_peering_connection_id = "${aws_vpc_peering_connection.transit2tx.id}"
}
I believe you'll need to use a Data Source to reference "Transit-VPC"
https://www.terraform.io/docs/providers/aws/d/vpc.html