I'm running the below terraform code to deploy an ec2 instance inside a VPC to work as web server but for some reason I cant reach the website and cant shh to it, I have set the ingress and egress rules properly I believe:
########Provider########
provider "aws" {
region = "us-west-2"
access_key = "[redacted]"
secret_key = "[redacted]"
}
########VPC########
resource "aws_vpc" "vpc1" {
cidr_block = "10.1.0.0/16"
tags = {
Name = "Production"
}
}
########Internet GW########
resource "aws_internet_gateway" "gw" {
vpc_id = aws_vpc.vpc1.id
}
########Route table########
resource "aws_route_table" "rt" {
vpc_id = aws_vpc.vpc1.id
route {
cidr_block = "0.0.0.0/24"
gateway_id = aws_internet_gateway.gw.id
}
route {
ipv6_cidr_block = "::/0"
gateway_id = aws_internet_gateway.gw.id
}
}
########Sub Net########
resource "aws_subnet" "subnet1" {
vpc_id = aws_vpc.vpc1.id
cidr_block = "10.1.0.0/24"
availability_zone = "us-west-2a"
map_public_ip_on_launch = "true"
tags = {
Name = "prod-subnet-1"
}
}
########RT assosiation########
resource "aws_route_table_association" "a" {
subnet_id = aws_subnet.subnet1.id
route_table_id = aws_route_table.rt.id
}
########Security Group########
resource "aws_security_group" "sec1" {
name = "allow_web"
description = "Allow web inbound traffic"
vpc_id = aws_vpc.vpc1.id
ingress {
description = "HTTP from VPC"
from_port = 80
to_port = 80
protocol = "tcp"
cidr_blocks = ["10.1.0.0/16"]
}
#SSH access from anywhere
ingress {
description = "SSH from VPC"
from_port = 22
to_port = 22
protocol = "tcp"
cidr_blocks = ["0.0.0.0/0"]
}
egress {
from_port = 0
to_port = 0
protocol = "-1"
cidr_blocks = ["0.0.0.0/0"]
}
tags = {
Name = "allow_web"
}
}
########Net Interface for the Instance########
#resource "aws_network_interface" "wsn" {
# subnet_id = aws_subnet.subnet1.id
# private_ips = ["10.0.1.50"]
# security_groups = [aws_security_group.sec1.id]
#}
########Load Balancer########
resource "aws_elb" "elb" {
name = "lb"
subnets = [aws_subnet.subnet1.id]
security_groups = [aws_security_group.sec1.id]
instances = [aws_instance.web1.id]
listener {
instance_port = 80
instance_protocol = "http"
lb_port = 80
lb_protocol = "http"
}
}
########EC2 Instance########
resource "aws_instance" "web1" {
ami = "ami-003634241a8fcdec0" #ubuntu 18.4
instance_type = "t2.micro"
availability_zone = "us-west-2a"
key_name = "main-key"
subnet_id = aws_subnet.subnet1.id
#network_interface {
# device_index = 0
# network_interface_id = aws_network_interface.wsn.id
#}
user_data = <<-EOF
#!/bin/bash
sudo apt update -y
sudo apt install apache2 -y
sudo systemctl start apache2
sudo bash -c 'echo Hello world!!! > /var/www/html/index.html'
EOF
tags = {
Name = "HelloWorld"
}
}
output "aws_elb_public_dns" {
value = aws_elb.elb.dns_name
}
The plan and the apply runs all fine but in the loadbalancer the instance is "outofservice"
what could be wrong here??
You are missing security group to your instance: vpc_security_group_ids.
Subsequently, you won't be able to ssh to it nor the http traffic will be allowed from the outside.
Also your route to IGW is incorrect. It should be:
cidr_block = "0.0.0.0/0"
Same for SG for your ELB to allow traffic from the internet. It should be:
cidr_blocks = ["0.0.0.0/0"]
Related
I tried to create instance from a subnet and vpc id but am having issue with the provision remote exec.The purpose of this is to create 2 public subnets(eu-west-1a) and 2 private subnets(eu-west-1b) and use the subnet and vpc id in it to create an instance and then ssh and install nginx. I am not sure how to resolve this and unfortunately am not expert in Terraform so guidance is needed here. When I tried to also ssh it using the command prompt, it is saying connection timed out. The port is open in security group port 22
╷
│
Error: remote-exec provisioner error
│
│ with aws_instance.EC2InstanceCreate,
│ on main_ec2.tf line 11, in resource "aws_instance" "EC2InstanceCreate":
│ 11: provisioner "remote-exec" {
│
│ timeout - last error: dial tcp 54.154.137.10:22: i/o timeout
╵
[1enter image description here
My code below :
`# Server Definition
resource "aws_instance" "EC2InstanceCreate" {
ami = "${var.aws_ami}"
instance_type = "${var.server_type}"
key_name = "${var.target_keypairs}"
subnet_id = "${var.target_subnet}"
provisioner "remote-exec" {
connection {
type = "ssh"
host = "${self.public_ip}"
user = "centos"
private_key = "${file("/home/michael/cs-104-michael/lesson6/EC2Tutorial.pem")}"
timeout = "5m"
}
inline = [
"sudo yum -y update",
"sudo yum -y install nginx",
"sudo service nginx start",
"sudo yum -y install wget, unzip",
]
}
tags = {
Name = "cs-104-lesson6-michael"
Environment = "TEST"
App = "React App"
}
}
output "pub_ip" {
value = ["${aws_instance.EC2InstanceCreate.public_ip}"]
depends_on = [aws_instance.EC2InstanceCreate]
}`
security group config :
# Create security group for webserver
resource "aws_security_group" "webserver_sg" {
name = "sg_ws_name"
vpc_id = "${var.target_vpc}"
ingress {
from_port = 80
to_port = 80
protocol = "tcp"
description = "HTTP"
cidr_blocks = ["0.0.0.0/0"]
}
ingress {
from_port = 22
to_port = 22
protocol = "tcp"
description = "HTTP"
cidr_blocks = ["0.0.0.0/0"]
}
egress {
from_port = 0
to_port = 0
protocol = "-1"
cidr_blocks = ["0.0.0.0/0"]
ipv6_cidr_blocks = ["::/0"]
}
tags = {
Name = "Security Group VPC devmind"
Project = "demo-assignment"
}
}
subnet code :
resource "aws_subnet" "public-subnet" {
vpc_id = "${aws_vpc.default.id}"
cidr_block = "${var.public_subnet_2a_cidr}"
availability_zone = "eu-west-1a"
map_public_ip_on_launch = true
tags = {
Name = "Web Public subnet 1"
}
}
resource "aws_subnet" "public-subnet2" {
vpc_id = "${aws_vpc.default.id}"
cidr_block = "${var.public_subnet_2b_cidr}"
availability_zone = "eu-west-1a"
map_public_ip_on_launch = true
tags = {
Name = "Web Public subnet 2"
}
}
# Define private subnets
resource "aws_subnet" "private-subnet" {
vpc_id = "${aws_vpc.default.id}"
cidr_block = "${var.private_db_subnet_2a_cidr}"
availability_zone = "eu-west-1b"
map_public_ip_on_launch = false
tags = {
Name = "App Private subnet 1"
}
}
resource "aws_subnet" "private-subnet2" {
vpc_id = "${aws_vpc.default.id}"
cidr_block = "${var.private_db_subnet_2b_cidr}"
availability_zone = "eu-west-1b"
map_public_ip_on_launch = false
tags = {
Name = "App Private subnet 2"
}
}
vpc code :
# Define our VPC
resource "aws_vpc" "default" {
cidr_block = "${var.vpc_cidr}"
enable_dns_hostnames = true
tags = {
Name = "Devops POC VPC"
}
}
Internet gateway included code :
# Internet Gateway
resource "aws_internet_gateway" "gw" {
vpc_id = "${aws_vpc.default.id}"
tags = {
name = "VPC IGW"
}
}
You are not providing vpc_security_group_ids for your instance:
vpc_security_group_ids = [aws_security_group.webserver_sg.id]
There could be many other issues, such as incorrectly setup VPC which is not shown.
tldr;
I can't access to my service through the ALB DNS name. Trying to reach the URL will timeout.
I noticed that from IGW and Nate there's an isolated routed subnet (Public Subnet 2) and also a task that's not being exposed through the ALB because somehow it got a different attached subnet.
More general context
Got Terraform modules defining
ECS cluster, service and task definition
ALB setup, including a target group and a listener
Got a couple subnets and a security group for ALB
Got private subnets and own sg for ECS
Target group port is the same as container port already
Using CodePipeline a get a task running, I can see logs of my service meaning it starts.
Some questions
Can I have multiple IGW associated to a single NAT within a single VPC?
Tasks get attached a couple private subnets and a sg with permissions to the alb sg. Also, tasks should access a Redis instance so I'm also attaching to them a SG and a subnet where Elastic Cache node lives (shown in the terraform module below). Any advise here?
ALB and networking resources
variable "vpc_id" {
type = string
default = "vpc-0af6233d57f7a6e1b"
}
variable "environment" {
type = string
default = "dev"
}
data "aws_vpc" "vpc" {
id = var.vpc_id
}
### Public subnets
resource "aws_subnet" "public_subnet_us_east_1a" {
vpc_id = data.aws_vpc.vpc.id
cidr_block = "10.0.10.0/24"
map_public_ip_on_launch = true
availability_zone = "us-east-1a"
tags = {
Name = "audible-blog-us-${var.environment}-public-subnet-1a"
}
}
resource "aws_subnet" "public_subnet_us_east_1b" {
vpc_id = data.aws_vpc.vpc.id
cidr_block = "10.0.11.0/24"
availability_zone = "us-east-1b"
map_public_ip_on_launch = true
tags = {
Name = "audible-blog-us-${var.environment}-public-subnet-1b"
}
}
### Private subnets
resource "aws_subnet" "private_subnet_us_east_1a" {
vpc_id = data.aws_vpc.vpc.id
cidr_block = "10.0.12.0/24"
map_public_ip_on_launch = true
availability_zone = "us-east-1a"
tags = {
Name = "audible-blog-us-${var.environment}-private-subnet-1a"
}
}
resource "aws_subnet" "private_subnet_us_east_1b" {
vpc_id = data.aws_vpc.vpc.id
cidr_block = "10.0.13.0/24"
availability_zone = "us-east-1b"
tags = {
Name = "audible-blog-us-${var.environment}-private-subnet-1b"
}
}
# Create a NAT gateway with an EIP for each private subnet to get internet connectivity
resource "aws_eip" "gw_a" {
vpc = true
}
resource "aws_eip" "gw_b" {
vpc = true
}
resource "aws_nat_gateway" "gw_a" {
subnet_id = aws_subnet.public_subnet_us_east_1a.id
allocation_id = aws_eip.gw_a.id
}
resource "aws_nat_gateway" "gw_b" {
subnet_id = aws_subnet.public_subnet_us_east_1b.id
allocation_id = aws_eip.gw_b.id
}
# Create a new route table for the private subnets
# And make it route non-local traffic through the NAT gateway to the internet
resource "aws_route_table" "private_a" {
vpc_id = data.aws_vpc.vpc.id
route {
cidr_block = "0.0.0.0/0"
nat_gateway_id = aws_nat_gateway.gw_a.id
}
}
resource "aws_route_table" "private_b" {
vpc_id = data.aws_vpc.vpc.id
route {
cidr_block = "0.0.0.0/0"
nat_gateway_id = aws_nat_gateway.gw_b.id
}
}
# Explicitely associate the newly created route tables to the private subnets (so they don't default to the main route table)
resource "aws_route_table_association" "private_a" {
subnet_id = aws_subnet.private_subnet_us_east_1a.id
route_table_id = aws_route_table.private_a.id
}
resource "aws_route_table_association" "private_b" {
subnet_id = aws_subnet.private_subnet_us_east_1b.id
route_table_id = aws_route_table.private_b.id
}
# This is the group you need to edit if you want to restrict access to your application
resource "aws_security_group" "alb_sg" {
name = "audible-blog-us-${var.environment}-lb-sg"
description = "Internet to ALB Security Group"
vpc_id = data.aws_vpc.vpc.id
ingress {
from_port = 80
to_port = 80
protocol = "tcp"
cidr_blocks = ["0.0.0.0/0"]
}
ingress {
from_port = 443
to_port = 443
protocol = "tcp"
cidr_blocks = ["0.0.0.0/0"]
}
egress {
from_port = 0
to_port = 0
protocol = "-1"
cidr_blocks = ["0.0.0.0/0"]
}
tags = {
name = "audible-blog-us-${var.environment}-lb-sg"
}
}
# Traffic to the ECS Cluster should only come from the ALB
resource "aws_security_group" "ecs_tasks_sg" {
name = "audible-blog-us-${var.environment}-ecs-sg"
description = "ALB to ECS Security Group"
vpc_id = data.aws_vpc.vpc.id
ingress {
from_port = 8080
to_port = 8080
protocol = "tcp"
cidr_blocks = ["0.0.0.0/0"]
security_groups = [ aws_security_group.alb_sg.id ]
}
egress {
from_port = 0
to_port = 0
protocol = "-1"
cidr_blocks = ["0.0.0.0/0"]
}
tags = {
name = "audible-blog-us-${var.environment}-ecs-sg"
}
}
resource "aws_alb" "alb" {
name = "audible-blog-us-${var.environment}-alb"
internal = false
load_balancer_type = "application"
subnets = [ aws_subnet.public_subnet_us_east_1a.id, aws_subnet.public_subnet_us_east_1b.id ]
security_groups = [ aws_security_group.alb_sg.id ]
tags = {
name = "audible-blog-us-${var.environment}-alb"
environment = var.environment
}
}
resource "aws_alb_target_group" "target_group" {
name = "audible-blog-us-${var.environment}-target-group"
port = "8080"
protocol = "HTTP"
vpc_id = data.aws_vpc.vpc.id
target_type = "ip"
health_check {
enabled = true
path = "/blog"
interval = 30
matcher = "200-304"
port = "traffic-port"
unhealthy_threshold = 5
}
depends_on = [aws_alb.alb]
}
resource "aws_alb_listener" "web_app_http" {
load_balancer_arn = aws_alb.alb.arn
port = 80
protocol = "HTTP"
depends_on = [aws_alb_target_group.target_group]
default_action {
target_group_arn = aws_alb_target_group.target_group.arn
type = "forward"
}
}
output "networking_details" {
value = {
load_balancer_arn = aws_alb.alb.arn
load_balancer_target_group_arn = aws_alb_target_group.target_group.arn
subnets = [
aws_subnet.private_subnet_us_east_1a.id,
aws_subnet.private_subnet_us_east_1b.id
]
security_group = aws_security_group.ecs_tasks_sg.id
}
}
ECS Fargate module
module "permissions" {
source = "./permissions"
environment = var.environment
}
resource "aws_ecs_cluster" "cluster" {
name = "adl-blog-us-${var.environment}"
}
resource "aws_cloudwatch_log_group" "logs_group" {
name = "/ecs/adl-blog-us-next-${var.environment}"
retention_in_days = 90
}
resource "aws_ecs_task_definition" "task" {
family = "adl-blog-us-task-${var.environment}"
container_definitions = jsonencode([
{
name = "adl-blog-us-next"
image = "536299334720.dkr.ecr.us-east-1.amazonaws.com/adl-blog-us:latest"
portMappings = [
{
containerPort = 8080
hostPort = 8080
},
{
containerPort = 6379
hostPort = 6379
}
]
environment: [
{
"name": "ECS_TASK_FAMILY",
"value": "adl-blog-us-task-${var.environment}"
}
],
logConfiguration: {
logDriver: "awslogs",
options: {
awslogs-group: "/ecs/adl-blog-us-next-${var.environment}",
awslogs-region: "us-east-1",
awslogs-stream-prefix: "ecs"
}
},
healthCheck: {
retries: 3,
command: [
"CMD-SHELL",
"curl -sf http://localhost:8080/blog || exit 1"
],
timeout: 5,
interval: 30,
startPeriod: null
}
}
])
cpu = 256
memory = 512
requires_compatibilities = ["FARGATE"]
network_mode = "awsvpc"
execution_role_arn = module.permissions.task_definition_execution_role_arn
task_role_arn = module.permissions.task_definition_execution_role_arn
}
resource "aws_ecs_service" "service" {
name = "adl-blog-us-task-service-${var.environment}"
cluster = aws_ecs_cluster.cluster.id
deployment_controller {
type = "ECS"
}
deployment_maximum_percent = 200
deployment_minimum_healthy_percent = 50
task_definition = aws_ecs_task_definition.task.family
desired_count = 3
launch_type = "FARGATE"
network_configuration {
subnets = concat(
var.public_alb_networking_details.subnets,
[ var.private_networking_details.subnet.id ]
)
security_groups = [
var.public_alb_networking_details.security_group,
var.private_networking_details.security_group.id
]
assign_public_ip = true
}
load_balancer {
target_group_arn = var.public_alb_networking_details.load_balancer_target_group_arn
container_name = "adl-blog-us-next"
container_port = 8080
}
force_new_deployment = true
lifecycle {
ignore_changes = [desired_count]
}
depends_on = [
module.permissions
]
}
variable "private_networking_details" {}
variable "public_alb_networking_details" {}
variable "environment" {
type = string
}
Your container ports are 8080 and 6379. However your target group says its 80. So you have to double check what are your actual ports that you use on Fargate and adjust your TG accordingly.
There could be other issues as well, which aren't yet apparent. For example, you are opening port 443, but there is no listener for that. So any attempt of using https will fail.
I'm using Terraform and I'm having a tricky time with connecting my autoscaled AWS EC2 instances to the internet. I can launch a standalone EC2 that connects with no difficulty, but when I visit the public IP addresses of my instances created with an autoscaling group I get "This site can’t be reached xxx.xx.xxx.xxx unexpectedly closed the connection."
The main difference I'm seeing is that I can specify a network interface with an EC2, but I'm not sure how this would work with my launch template. My instances launch into different subnets in different availability zones, and the template is as follows:
provider "aws" {
region = "us-east-1"
access_key = "xxxxx"
secret_key = "xxxxx"
}
data "template_file" "testfile" {
template = <<EOF
#!/bin/bash
sudo apt update -y
sudo apt install apache2 -y
sudo systemct1 start apache2
sudo bash -c 'echo hello, world! > var/www/html/index.html'
EOF
}
resource "aws_vpc" "first_vpc" {
cidr_block = "10.0.0.0/16"
tags = {
Name = "prod-vpc"
}
}
resource "aws_internet_gateway" "gw" {
vpc_id = aws_vpc.first_vpc.id
tags = {
Name = "prod-igw"
}
}
resource "aws_route_table" "prod_route_table" {
vpc_id = aws_vpc.first_vpc.id
route {
cidr_block = "0.0.0.0/0"
gateway_id = aws_internet_gateway.gw.id
}
route {
ipv6_cidr_block = "::/0"
gateway_id = aws_internet_gateway.gw.id
}
tags = {
Name = "prod-rt"
}
}
resource "aws_subnet" "subnet_1" {
vpc_id = aws_vpc.first_vpc.id
cidr_block = "10.0.1.0/24"
availability_zone = "us-east-1a"
map_public_ip_on_launch = true
tags = {
Name = "prod-subnet-1"
Tier = "public"
}
}
resource "aws_subnet" "subnet_2" {
vpc_id = aws_vpc.first_vpc.id
cidr_block = "10.0.2.0/24"
availability_zone = "us-east-1b"
map_public_ip_on_launch = true
tags = {
Name = "prod-subnet-2"
Tier = "public"
}
}
resource "aws_subnet" "subnet_3" {
vpc_id = aws_vpc.first_vpc.id
cidr_block = "10.0.3.0/24"
availability_zone = "us-east-1c"
map_public_ip_on_launch = true
tags = {
Name = "prod-subnet-3"
Tier = "public"
}
}
resource "aws_route_table_association" "a" {
subnet_id = aws_subnet.subnet_1.id
route_table_id = aws_route_table.prod_route_table.id
}
resource "aws_route_table_association" "b" {
subnet_id = aws_subnet.subnet_2.id
route_table_id = aws_route_table.prod_route_table.id
}
resource "aws_route_table_association" "c" {
subnet_id = aws_subnet.subnet_3.id
route_table_id = aws_route_table.prod_route_table.id
}
resource "aws_security_group" "allow_web" {
name = "allow_web"
description = "Allow web inbound traffic"
vpc_id = aws_vpc.first_vpc.id
ingress {
description = "HTTPS from VPC"
from_port = 443
to_port = 443
protocol = "tcp"
cidr_blocks = ["0.0.0.0/0"]
}
ingress {
description = "HTTP from VPC"
from_port = 80
to_port = 80
protocol = "tcp"
cidr_blocks = ["0.0.0.0/0"]
}
egress {
from_port = 0
to_port = 0
protocol = "-1"
cidr_blocks = ["0.0.0.0/0"]
ipv6_cidr_blocks = ["::/0"]
}
tags = {
Name = "allow_tls"
}
}
resource "aws_launch_template" "frontend" {
name = "frontend"
image_id = "ami-0ee02acd56a52998e"
instance_type = "t2.micro"
vpc_security_group_ids = [aws_security_group.allow_web.id]
network_interfaces {
device_index = 0
associate_public_ip_address = true
}
user_data = base64encode(data.template_file.testfile.rendered)
}
resource "aws_lb" "loadbalancer" {
name = "loadbalancer"
internal = false
load_balancer_type = "application"
security_groups = [aws_security_group.allow_web.id]
subnets = [aws_subnet.subnet_1.id, aws_subnet.subnet_2.id, aws_subnet.subnet_3.id]
tags = {
Environment = "production"
}
}
resource "aws_autoscaling_group" "as_group_1" {
vpc_zone_identifier = [aws_subnet.subnet_1.id, aws_subnet.subnet_2.id, aws_subnet.subnet_3.id]
desired_capacity = 3
max_size = 5
min_size = 2
target_group_arns = [aws_lb_target_group.frontendhttp.arn]
launch_template {
id = aws_launch_template.frontend.id
version = "$Latest"
}
}
resource "aws_lb_target_group" "frontendhttp" {
name = "frontendhttp"
port = 80
protocol = "HTTP"
vpc_id = aws_vpc.first_vpc.id
}
resource "aws_lb_listener" "frontendhttp" {
load_balancer_arn = aws_lb.loadbalancer.arn
port = "80"
protocol = "HTTP"
default_action {
type = "forward"
target_group_arn = aws_lb_target_group.frontendhttp.arn
}
}
#Test standalone instance
resource "aws_network_interface" "web_server_1" {
subnet_id = aws_subnet.subnet_1.id
private_ips = ["10.0.1.50"]
security_groups = [aws_security_group.allow_web.id]
}
resource "aws_instance" "ubuntu-1" {
ami = "ami-0ee02acd56a52998e"
instance_type = "t2.micro"
availability_zone = "us-east-1a" #hardcoded to ensure that subnet and instance are in same availability availability zone
network_interface {
device_index = 0
network_interface_id = aws_network_interface.web_server_1.id
}
user_data = <<-EOF
#!/bin/bash
sudo apt update -y
sudo apt install apache2 -y
sudo systemct1 start apache2
sudo bash -c 'echo hello! > var/www/html/index.html'
EOF
tags = {
Name = "web-server"
}
}
I modified a bit your template (user data, its indentation and aws_launch_template), and now it works now. It will work only over HTTP, as you don't have HTTPS setup, so don't need SG rules for HTTPS.
data "template_file" "testfile" {
template = <<EOF
#!/bin/bash
apt update -y
apt install apache2 -y
systemct1 start apache2
echo "hello, world!" > var/www/html/index.html
EOF
}
resource "aws_vpc" "first_vpc" {
cidr_block = "10.0.0.0/16"
tags = {
Name = "prod-vpc"
}
}
resource "aws_internet_gateway" "gw" {
vpc_id = aws_vpc.first_vpc.id
tags = {
Name = "prod-igw"
}
}
resource "aws_route_table" "prod_route_table" {
vpc_id = aws_vpc.first_vpc.id
route {
cidr_block = "0.0.0.0/0"
gateway_id = aws_internet_gateway.gw.id
}
route {
ipv6_cidr_block = "::/0"
gateway_id = aws_internet_gateway.gw.id
}
tags = {
Name = "prod-rt"
}
}
resource "aws_subnet" "subnet_1" {
vpc_id = aws_vpc.first_vpc.id
cidr_block = "10.0.1.0/24"
availability_zone = "us-east-1a"
map_public_ip_on_launch = true
tags = {
Name = "prod-subnet-1"
Tier = "public"
}
}
resource "aws_subnet" "subnet_2" {
vpc_id = aws_vpc.first_vpc.id
cidr_block = "10.0.2.0/24"
availability_zone = "us-east-1b"
map_public_ip_on_launch = true
tags = {
Name = "prod-subnet-2"
Tier = "public"
}
}
resource "aws_subnet" "subnet_3" {
vpc_id = aws_vpc.first_vpc.id
cidr_block = "10.0.3.0/24"
availability_zone = "us-east-1c"
map_public_ip_on_launch = true
tags = {
Name = "prod-subnet-3"
Tier = "public"
}
}
resource "aws_route_table_association" "a" {
subnet_id = aws_subnet.subnet_1.id
route_table_id = aws_route_table.prod_route_table.id
}
resource "aws_route_table_association" "b" {
subnet_id = aws_subnet.subnet_2.id
route_table_id = aws_route_table.prod_route_table.id
}
resource "aws_route_table_association" "c" {
subnet_id = aws_subnet.subnet_3.id
route_table_id = aws_route_table.prod_route_table.id
}
resource "aws_security_group" "allow_web" {
name = "allow_web"
description = "Allow web inbound traffic"
vpc_id = aws_vpc.first_vpc.id
ingress {
description = "HTTP from VPC"
from_port = 80
to_port = 80
protocol = "tcp"
cidr_blocks = ["0.0.0.0/0"]
}
egress {
from_port = 0
to_port = 0
protocol = "-1"
cidr_blocks = ["0.0.0.0/0"]
ipv6_cidr_blocks = ["::/0"]
}
tags = {
Name = "allow_http"
}
}
resource "aws_launch_template" "frontend" {
name = "frontend"
image_id = "ami-0ee02acd56a52998e"
instance_type = "t2.micro"
vpc_security_group_ids = [aws_security_group.allow_web.id]
# DONT NEED THIS
# network_interfaces {
# device_index = 0
# associate_public_ip_address = true
# }
user_data = base64encode(data.template_file.testfile.rendered)
}
resource "aws_lb" "loadbalancer" {
name = "loadbalancer"
internal = false
load_balancer_type = "application"
security_groups = [aws_security_group.allow_web.id]
subnets = [aws_subnet.subnet_1.id, aws_subnet.subnet_2.id, aws_subnet.subnet_3.id]
tags = {
Environment = "production"
}
}
resource "aws_autoscaling_group" "as_group_1" {
vpc_zone_identifier = [aws_subnet.subnet_1.id, aws_subnet.subnet_2.id, aws_subnet.subnet_3.id]
desired_capacity = 3
max_size = 5
min_size = 2
target_group_arns = [aws_lb_target_group.frontendhttp.arn]
launch_template {
id = aws_launch_template.frontend.id
version = "$Latest"
}
}
resource "aws_lb_target_group" "frontendhttp" {
name = "frontendhttp"
port = 80
protocol = "HTTP"
vpc_id = aws_vpc.first_vpc.id
}
resource "aws_lb_listener" "frontendhttp" {
load_balancer_arn = aws_lb.loadbalancer.arn
port = "80"
protocol = "HTTP"
default_action {
type = "forward"
target_group_arn = aws_lb_target_group.frontendhttp.arn
}
}
I am trying to launch an EFS file system in my default VPC. I am able to create the EFS but not able to mount the target in all subnets. In subnet_id I not sure how to pass the value of all the subnet ids of default VPC. Below is my Terraform code:
$ cat ec2.tf
provider "aws" {
[enter image description here][1]region = "ap-south-1"
profile = "saumikhp"
}
data "aws_vpc" "default" {
default = true
}
data "aws_subnet_ids" "example" {
vpc_id = var.vpc_id
}
data "aws_subnet" "example" {
for_each = data.aws_subnet_ids.example.ids
id = each.value
}
resource "aws_key_pair" "key" {
key_name = "mykey12345"
public_key = file("mykey12345.pub")
}
resource "aws_security_group" "web-sg" {
name = "web-sg"
description = "Allow port 22 and 80"
vpc_id = "vpc-18819d70"
ingress {
from_port = 22
to_port = 22
protocol = "tcp"
cidr_blocks = ["0.0.0.0/0"]
}
egress {
from_port = 0
to_port = 0
protocol = "-1"
cidr_blocks = ["0.0.0.0/0"]
}
ingress {
from_port = 2049
to_port = 2049
protocol = "tcp"
cidr_blocks = ["0.0.0.0/0"]
}
ingress {
from_port = 80
to_port = 80
protocol = "tcp"
cidr_blocks = ["0.0.0.0/0"]
}
tags = {
Name = "web-sg"
}
}
resource "aws_instance" "myinstance" {
ami = "ami-0447a12f28fddb066"
instance_type = "t2.micro"
key_name = "mykey12345"
security_groups = ["web-sg"]
connection {
type = "ssh"
user = "ec2-user"
private_key = file("mykey12345")
host = aws_instance.myinstance.public_ip
}
provisioner "remote-exec" {
inline = [
"sudo yum install httpd php git -y",
"sudo systemctl restart httpd",
"sudo systemctl enable httpd",
]
}
tags = {
Name = "SaumikOS"
}
}
resource "aws_efs_file_system" "efs" {
creation_token = "efs"
performance_mode = "generalPurpose"
throughput_mode = "bursting"
encrypted = "true"
tags = {
Name = "EfsExample"
}
}
resource "aws_efs_mount_target" "efs-mt" {
depends_on = [
aws_instance.myinstance,
]
for_each = data.aws_subnet_ids.example.ids
subnet_id = each.value
file_system_id = "aws_efs_file_system.efs.id"
security_groups = ["aws_security_group.web-sg.id"]
}
Error after running terraform apply
You can get the subnets from the default VPC by using a combination of the aws_vpc and aws_subnet_ids data sources.
data "aws_vpc" "default" {
default = true
}
data "aws_subnet_ids" "example" {
vpc_id = var.vpc_id
}
You can then create an EFS mount target in each of the subnets by looping over these (each mount target only takes a single subnet_id):
resource "aws_efs_mount_target" "efs-mt" {
for_each = data.aws_subnet_ids.example.ids
file_system_id = aws_efs_file_system.efs.id
subnet_id = each.value
security_groups = [aws_security_group.web-sg.id]
}
I am running a spring cloud config server on aws which is just a docker container running a spring boot app. It is reading properties from a git repo. Our client applications read config from the server on startup and intermittently at runtime. About a third of the time, the client apps will timeout when pulling config at startup, causing the app to crash. At runtime, the apps seem to succeed 4 out of 5 times, though they will just use existing config if a request fails.
I am using an ec2 instance behind an alb which handles ssl termination. I was orignally using a t3.micro, but upgraded to an m5.large guessing that the t3 class may not support continuous availability.
The alb required 2 subnets, so I created a second with nothing in it initially. I am unsure if the alb will attempt to route to the second subnet at some point, which could be causing the failures. The target group is using a health check which returns correctly, but idk enough about alb's to rule out round-robin'ing to an empty subnet. I attempted to create a second ec2 instance to parallel my first config server in the second subnet. however, I was unable to ssh into the second instance even though it's using the same security group and config as the first. I'm not sure why that failed, but i'm guessing there is something else wrong with my setup.
All infrastructure was deployed with terraform, which I have included below.
resources.tf
provider "aws" {
region = "us-east-2"
version = ">= 2.38.0"
}
data "aws_ami" "amzn_linux" {
most_recent = true
filter {
name = "name"
values = ["amzn2-ami-hvm-2.0.*-x86_64-gp2"]
}
filter {
name = "virtualization-type"
values = ["hvm"]
}
owners = ["137112412989"]
}
resource "aws_vpc" "config-vpc" {
cidr_block = "10.0.0.0/16"
enable_dns_hostnames = true
enable_dns_support = true
}
resource "aws_security_group" "config_sg" {
name = "config-sg"
description = "http, https, and ssh"
vpc_id = aws_vpc.config-vpc.id
ingress {
from_port = 9000
to_port = 9000
protocol = "tcp"
cidr_blocks = ["0.0.0.0/0"]
}
ingress {
from_port = 8080
to_port = 8080
protocol = "tcp"
cidr_blocks = ["0.0.0.0/0"]
}
ingress {
from_port = 80
to_port = 80
protocol = "tcp"
cidr_blocks = ["0.0.0.0/0"]
}
ingress {
from_port = 443
to_port = 443
protocol = "tcp"
cidr_blocks = ["0.0.0.0/0"]
}
ingress {
from_port = 22
to_port = 22
protocol = "tcp"
cidr_blocks = ["0.0.0.0/0"]
}
egress {
from_port = 0
to_port = 0
protocol = "-1"
cidr_blocks = ["0.0.0.0/0"]
}
}
resource "aws_subnet" "subnet-alpha" {
cidr_block = cidrsubnet(aws_vpc.config-vpc.cidr_block, 3, 1)
vpc_id = aws_vpc.config-vpc.id
availability_zone = "us-east-2a"
}
resource "aws_subnet" "subnet-beta" {
cidr_block = cidrsubnet(aws_vpc.config-vpc.cidr_block, 3, 2)
vpc_id = aws_vpc.config-vpc.id
availability_zone = "us-east-2b"
}
resource "aws_internet_gateway" "config-vpc-ig" {
vpc_id = aws_vpc.config-vpc.id
}
resource "aws_route_table" "config-vpc-rt" {
vpc_id = aws_vpc.config-vpc.id
route {
cidr_block = "0.0.0.0/0"
gateway_id = aws_internet_gateway.config-vpc-ig.id
}
}
resource "aws_route_table_association" "subnet-association-alpha" {
subnet_id = aws_subnet.subnet-alpha.id
route_table_id = aws_route_table.config-vpc-rt.id
}
resource "aws_route_table_association" "subnet-association-beta" {
subnet_id = aws_subnet.subnet-beta.id
route_table_id = aws_route_table.config-vpc-rt.id
}
resource "aws_alb" "alb" {
name = "config-alb"
subnets = [aws_subnet.subnet-alpha.id, aws_subnet.subnet-beta.id]
security_groups = [aws_security_group.config_sg.id]
}
resource "aws_alb_target_group" "alb_target_group" {
name = "config-tg"
port = 9000
protocol = "HTTP"
vpc_id = aws_vpc.config-vpc.id
health_check {
enabled = true
path = "/actuator/health"
port = 9000
protocol = "HTTP"
}
}
resource "aws_instance" "config_server_alpha" {
ami = data.aws_ami.amzn_linux.id
instance_type = "m5.large"
vpc_security_group_ids = [aws_security_group.config_sg.id]
key_name = "config-ssh"
subnet_id = aws_subnet.subnet-alpha.id
associate_public_ip_address = true
}
resource "aws_instance" "config_server_beta" {
ami = data.aws_ami.amzn_linux.id
instance_type = "m5.large"
vpc_security_group_ids = [aws_security_group.config_sg.id]
key_name = "config-ssh"
subnet_id = aws_subnet.subnet-beta.id
associate_public_ip_address = true
}
resource "aws_alb_target_group_attachment" "config-target-alpha" {
target_group_arn = aws_alb_target_group.alb_target_group.arn
target_id = aws_instance.config_server_alpha.id
port = 9000
}
resource "aws_alb_target_group_attachment" "config-target-beta" {
target_group_arn = aws_alb_target_group.alb_target_group.arn
target_id = aws_instance.config_server_beta.id
port = 9000
}
resource "aws_alb_listener" "alb_listener_80" {
load_balancer_arn = aws_alb.alb.arn
port = 80
default_action {
type = "redirect"
redirect {
port = 443
protocol = "HTTPS"
status_code = "HTTP_301"
}
}
}
resource "aws_alb_listener" "alb_listener_8080" {
load_balancer_arn = aws_alb.alb.arn
port = 8080
default_action {
type = "redirect"
redirect {
port = 443
protocol = "HTTPS"
status_code = "HTTP_301"
}
}
}
resource "aws_alb_listener" "alb_listener_https" {
load_balancer_arn = aws_alb.alb.arn
port = 443
protocol = "HTTPS"
ssl_policy = "ELBSecurityPolicy-2016-08"
certificate_arn = "arn:..."
default_action {
target_group_arn = aws_alb_target_group.alb_target_group.arn
type = "forward"
}
}
config server
#SpringBootApplication
#EnableConfigServer
public class ConfigserverApplication {
public static void main(String[] args) {
SpringApplication.run(ConfigserverApplication.class, args);
}
}
application.yml
spring:
profiles:
active: local
---
spring:
profiles: local, default, cloud
cloud:
config:
server:
git:
uri: ...
searchPaths: '{application}/{profile}'
username: ...
password:...
security:
user:
name: admin
password: ...
server:
port: 9000
management:
endpoint:
health:
show-details: always
info:
git:
mode: FULL
bootstrap.yml
spring:
application:
name: config-server
encrypt:
key: |
-----BEGIN RSA PRIVATE KEY-----
...
-----END RSA PRIVATE KEY-----