How to deploy a minimalistic EKS cluster with terraform? - amazon-web-services

Friends,
I am completely new to Terraform but I am trying learn here. At the moment I am reading the book Terraform UP and Running but I need to spin up an EKS cluster to deploy one of my learning projects. For this, I am following this [tutorial][1] of Hashicorp.
My main questions are the following: Do I really need all of this (see the terraform code for aws bellow) to deploy a cluster on AWS? How could I reduce the bellow code to the minimum necessary to spin up a cluster with a master and one worker which are able to communicate with each other?
On Gloud I could spin up a cluster with just these few lines of code:
provider "google" {
credentials = file(var.credentials)
project = var.project
region = var.region
}
resource "google_container_cluster" "primary" {
name = var.cluster_name
network = var.network
location = var.region
initial_node_count = var.initial_node_count
}
resource "google_container_node_pool" "primary_preemtible_nodes" {
name = var.node_name
location = var.region
cluster = google_container_cluster.primary.name
node_count = var.node_count
node_config {
preemptible = var.preemptible
machine_type = var.machine_type
}
}
Can I do something similar do spin up an EKS cluster? The code bellow is working but I feel like I am biting more than I can chew.
provider "aws" {
region = "${var.AWS_REGION}"
secret_key = "${var.AWS_SECRET_KEY}"
access_key = "${var.AWS_ACCESS_KEY}"
}
# ----- Base VPC Networking -----
data "aws_availability_zones" "available_zones" {}
# Creates a virtual private network which will isolate
# the resources to be created.
resource "aws_vpc" "blur-vpc" {
#Specifies the range of IP adresses for the VPC.
cidr_block = "10.0.0.0/16"
tags = "${
map(
"Name", "terraform-eks-node",
"kubernetes.io/cluster/${var.cluster-name}", "shared"
)
}"
}
resource "aws_subnet" "subnet" {
count = 2
availability_zone = "${data.aws_availability_zones.available_zones.names[count.index]}"
cidr_block = "10.0.${count.index}.0/24"
vpc_id = "${aws_vpc.blur-vpc.id}"
tags = "${
map(
"Name", "blur-subnet",
"kubernetes.io/cluster/${var.cluster-name}", "shared",
)
}"
}
# The component that allows communication between
# the VPC and the internet.
resource "aws_internet_gateway" "gateway" {
# Attaches the gateway to the VPC.
vpc_id = "${aws_vpc.blur-vpc.id}"
tags = {
Name = "eks-gateway"
}
}
# Determines where network traffic from the gateway
# will be directed.
resource "aws_route_table" "route-table" {
vpc_id = "${aws_vpc.blur-vpc.id}"
route {
cidr_block = "0.0.0.0/0"
gateway_id = "${aws_internet_gateway.gateway.id}"
}
}
resource "aws_route_table_association" "table_association" {
count = 2
subnet_id = "${aws_subnet.subnet.*.id[count.index]}"
route_table_id = "${aws_route_table.route-table.id}"
}
# -- Resources required for the master setup --
# This bellow block (IAM role + Policy) allows the EKS service to
# manage or retrieve data from other AWS services.
# Similar to a IAM but not uniquely associated with one person.
# A role can be assumed by anyone who needs it.
resource "aws_iam_role" "blur-iam-role" {
name = "eks-cluster"
assume_role_policy = <<POLICY
{
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Principal": {
"Service": "eks.amazonaws.com"
},
"Action": "sts:AssumeRole"
}
]
}
POLICY
}
# Attaches the policy "AmazonEKSClusterPolicy" to the role created above.
resource "aws_iam_role_policy_attachment" "blur-iam-role-AmazonEKSClusterPolicy" {
policy_arn = "arn:aws:iam::aws:policy/AmazonEKSClusterPolicy"
role = "${aws_iam_role.blur-iam-role.name}"
}
# Master security group
# # A security group acts as a virtual firewall to control inbound and outbound traffic.
# This security group will control networking access to the K8S master.
resource "aws_security_group" "blur-cluster" {
name = "eks-blur-cluster"
description = "Allows the communucation with the worker nodes"
vpc_id = "${aws_vpc.blur-vpc.id}"
egress {
from_port = 0
to_port = 0
protocol = "-1"
cidr_blocks = ["0.0.0.0/0"]
}
tags = {
Name = "blur-cluster"
}
}
# The actual master node
resource "aws_eks_cluster" "blur-cluster" {
name = "${var.cluster-name}"
# Attaches the IAM role created above.
role_arn = "${aws_iam_role.blur-iam-role.arn}"
vpc_config {
# Attaches the security group created for the master.
# Attaches also the subnets.
security_group_ids = ["${aws_security_group.blur-cluster.id}"]
subnet_ids = "${aws_subnet.subnet.*.id}"
}
depends_on = [
"aws_iam_role_policy_attachment.blur-iam-role-AmazonEKSClusterPolicy",
# "aws_iam_role_policy_attachment.blur-iam-role-AmazonEKSServicePolicy"
]
}
# -- Resources required for the worker nodes setup --
# IAM role for the workers. Allows worker nodes to manage or retrieve data
# from other services and its required for the workers to join the cluster.
resource "aws_iam_role" "iam-role-worker"{
name = "eks-worker"
assume_role_policy = <<POLICY
{
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Principal": {
"Service": "ec2.amazonaws.com"
},
"Action": "sts:AssumeRole"
}
]
}
POLICY
}
# allows Amazon EKS worker nodes to connect to Amazon EKS Clusters.
resource "aws_iam_role_policy_attachment" "iam-role-worker-AmazonEKSWorkerNodePolicy" {
policy_arn = "arn:aws:iam::aws:policy/AmazonEKSWorkerNodePolicy"
role = "${aws_iam_role.iam-role-worker.name}"
}
# This permission is required to modify the IP address configuration of worker nodes
resource "aws_iam_role_policy_attachment" "iam-role-worker-AmazonEKS_CNI_Policy" {
policy_arn = "arn:aws:iam::aws:policy/AmazonEKS_CNI_Policy"
role = "${aws_iam_role.iam-role-worker.name}"
}
# Allows to list repositories and pull images
resource "aws_iam_role_policy_attachment" "iam-role-worker-AmazonEC2ContainerRegistryReadOnly" {
policy_arn = "arn:aws:iam::aws:policy/AmazonEC2ContainerRegistryReadOnly"
role = "${aws_iam_role.iam-role-worker.name}"
}
# An instance profile represents an EC2 instances (Who am I?)
# and assumes a role (what can I do?).
resource "aws_iam_instance_profile" "worker-node" {
name = "worker-node"
role = "${aws_iam_role.iam-role-worker.name}"
}
# Security group for the worker nodes
resource "aws_security_group" "security-group-worker" {
name = "worker-node"
description = "Security group for worker nodes"
vpc_id = "${aws_vpc.blur-vpc.id}"
egress {
cidr_blocks = [ "0.0.0.0/0" ]
from_port = 0
to_port = 0
protocol = "-1"
}
tags = "${
map(
"Name", "blur-cluster",
"kubernetes.io/cluster/${var.cluster-name}", "owned"
)
}"
}
resource "aws_security_group_rule" "ingress-self" {
description = "Allow communication among nodes"
from_port = 0
to_port = 65535
protocol = "-1"
security_group_id = "${aws_security_group.security-group-worker.id}"
source_security_group_id = "${aws_security_group.security-group-worker.id}"
type = "ingress"
}
resource "aws_security_group_rule" "ingress-cluster-https" {
description = "Allow worker to receive communication from the cluster control plane"
from_port = 443
to_port = 443
protocol = "tcp"
security_group_id = "${aws_security_group.security-group-worker.id}"
source_security_group_id = "${aws_security_group.blur-cluster.id}"
type = "ingress"
}
resource "aws_security_group_rule" "ingress-cluster-others" {
description = "Allow worker to receive communication from the cluster control plane"
from_port = 1025
to_port = 65535
protocol = "tcp"
security_group_id = "${aws_security_group.security-group-worker.id}"
source_security_group_id = "${aws_security_group.blur-cluster.id}"
type = "ingress"
}
# Worker Access to Master
resource "aws_security_group_rule" "cluster-node-ingress-http" {
description = "Allows pods to communicate with the cluster API server"
from_port = 443
to_port = "443"
protocol = "tcp"
security_group_id = "${aws_security_group.blur-cluster.id}"
source_security_group_id = "${aws_security_group.security-group-worker.id}"
type = "ingress"
}
# --- Worker autoscaling group ---
# This data will be used to filter and select an AMI which is compatible with the specific k8s version being deployed
data "aws_ami" "eks-worker" {
filter {
name = "name"
values = ["amazon-eks-node-${aws_eks_cluster.blur-cluster.version}-v*"]
}
most_recent = true
owners = ["602401143452"]
}
data "aws_region" "current" {}
locals {
node-user-data =<<USERDATA
#!/bin/bash
set -o xtrace
/etc/eks/bootstrap.sh --apiserver-endpoint '${aws_eks_cluster.blur-cluster.endpoint}'
USERDATA
}
# To spin up an auto scaling group an "aws_launch_configuration" is needed.
# This ALC requires an "image_id" as well as a "security_group".
resource "aws_launch_configuration" "launch_config" {
associate_public_ip_address = true
iam_instance_profile = "${aws_iam_instance_profile.worker-node.name}"
image_id = "${data.aws_ami.eks-worker.id}"
instance_type = "t2.micro"
name_prefix = "terraform-eks"
security_groups = ["${aws_security_group.security-group-worker.id}"]
user_data_base64 = "${base64encode(local.node-user-data)}"
lifecycle {
create_before_destroy = true
}
}
# Actual autoscaling group
resource "aws_autoscaling_group" "autoscaling" {
desired_capacity = 2
launch_configuration = "${aws_launch_configuration.launch_config.id}"
max_size = 2
min_size = 1
name = "terraform-eks"
vpc_zone_identifier = "${aws_subnet.subnet.*.id}"
tag {
key = "Name"
value = "terraform-eks"
propagate_at_launch = true
}
# "kubernetes.io/cluster/*" tag allows EKS and K8S to discover and manage compute resources.
tag {
key = "kubernetes.io/cluster/${var.cluster-name}"
value = "owned"
propagate_at_launch = true
}
}
[1]: https://registry.terraform.io/providers/hashicorp/aws/2.33.0/docs/guides/eks-getting-started#preparation

Yes, you should create most of them, because as you can see at Terraform AWS documents, VPC configuration is required to deploy EKS cluster. But you don't have to set up a security group rule for workers to access the master. Also, try to use aws_eks_node_group resource to create worker nodegroup. It will save you from creating launch configuration and autoscaling group seperately.
https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/eks_node_group

Related

How to launch multiple AWS EC2 instances from a single VPC using Terraform?

Is it possible to launch multiple ec2 instances from terraform using a single VPC? I'm building something which requires multiple instances to be launched from the same region and I'm doing all this using Terraform. But there's a limit in AWS VPC: per region only 5 VPCs are allowed. What I've been doing until now is each time when I need to launch an instance I create a separate VPC for it in terraform. Below is the code for reference:
terraform {
required_providers {
aws = {
source = "hashicorp/aws"
version = "~> 3.0"
}
}
}
# Configure the AWS Provider
provider "aws" {
region = "us-east-2"
access_key = "XXXXXXXXXXXXXXXXX"
secret_key = "XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX"
}
# 1. Create vpc
resource "aws_vpc" "prod-vpc" {
cidr_block = "10.0.0.0/16"
tags = {
Name = "production"
}
}
# 2. Create Internet Gateway
resource "aws_internet_gateway" "gw" {
vpc_id = aws_vpc.prod-vpc.id
}
# 3. Create Custom Route Table
resource "aws_route_table" "prod-route-table" {
vpc_id = aws_vpc.prod-vpc.id
route {
cidr_block = "0.0.0.0/0"
gateway_id = aws_internet_gateway.gw.id
}
route {
ipv6_cidr_block = "::/0"
gateway_id = aws_internet_gateway.gw.id
}
tags = {
Name = "Prod"
}
}
# 4. Create a Subnet
resource "aws_subnet" "subnet-1" {
vpc_id = aws_vpc.prod-vpc.id
cidr_block = "10.0.1.0/24"
availability_zone = "us-east-2a"
tags = {
Name = "prod-subnet"
}
}
# 5. Associate subnet with Route Table
resource "aws_route_table_association" "a" {
subnet_id = aws_subnet.subnet-1.id
route_table_id = aws_route_table.prod-route-table.id
}
# 6. Create Security Group to allow port 22,80,443
resource "aws_security_group" "allow_web" {
name = "allow_web_traffic"
description = "Allow Web inbound traffic"
vpc_id = aws_vpc.prod-vpc.id
ingress {
description = "HTTPS"
from_port = 443
to_port = 443
protocol = "tcp"
cidr_blocks = ["0.0.0.0/0"]
}
ingress {
description = "self"
from_port = 8000
to_port = 8000
protocol = "tcp"
cidr_blocks = ["0.0.0.0/0"]
}
ingress {
description = "HTTP"
from_port = 80
to_port = 80
protocol = "tcp"
cidr_blocks = ["0.0.0.0/0"]
}
ingress {
description = "SSH"
from_port = 22
to_port = 22
protocol = "tcp"
cidr_blocks = ["0.0.0.0/0"]
}
egress {
from_port = 0
to_port = 0
protocol = "-1"
cidr_blocks = ["0.0.0.0/0"]
}
tags = {
Name = "allow_web"
}
}
# 7. Create a network interface with an ip in the subnet that was created in step 4
resource "aws_network_interface" "web-server-nic" {
subnet_id = aws_subnet.subnet-1.id
private_ips = ["10.0.1.50"]
security_groups = [aws_security_group.allow_web.id]
}
# 8. Assign an elastic IP to the network interface created in step 7
resource "aws_eip" "one" {
vpc = true
network_interface = aws_network_interface.web-server-nic.id
associate_with_private_ip = "10.0.1.50"
depends_on = [aws_internet_gateway.gw]
}
output "server_public_ip" {
value = aws_eip.one.public_ip
}
# 9. Create Ubuntu server and install/enable apache2
resource "aws_instance" "web-server-instance" {
ami = var.AMI_ID
instance_type = "g4dn.xlarge"
availability_zone = "us-east-2a"
key_name = "us-east-2"
network_interface {
device_index = 0
network_interface_id = aws_network_interface.web-server-nic.id
}
root_block_device {
volume_size = "200"
}
iam_instance_profile = aws_iam_instance_profile.training_profile.name
depends_on = [aws_eip.one]
user_data = <<-EOF
#!/bin/bash
python3 /home/ubuntu/setting_instance.py
EOF
tags = {
Name = var.INSTANCE_NAME
}
}
The only downside to this code is it creates separate VPC everytime I create an instance. I read in a stackoverflow post that we can import an existing VPC using terraform import command. Along with the VPC, I had to import the Internet Gateway and Route Table as well (it was throwing error otherwise). But then I wasn't able to access the instance using SSH and also the commands in the user_data part didn't execute (setting_instance.py will send a firebase notification once the instance starts. That's the only purpose of setting_instance.py)
Not only VPC I'd also like to know if I can use the other resources as well to it's fullest extent possible.
I'm new to terraform and AWS. Any suggestions in the above code are welcome.
EDIT: Instances are created one at a time according to the need, i.e., whenever there is a need to create a new instance I use this code. In the current scenario if there are already 5 instances running up in a region then I won't be able to use this code to create a 6th instance in the same region when the demand arises.
If as you say, they would be exactly same, the easiest way would be to use count, which would indicate how many instance you want to have. For that you can introduce new variable:
variable "number_of_instance" {
default = 1
}
and then
resource "aws_instance" "web-server-instance" {
count = var.number_of_instance
ami = var.AMI_ID
instance_type = "g4dn.xlarge"
availability_zone = "us-east-2a"
key_name = "us-east-2"
network_interface {
device_index = 0
network_interface_id = aws_network_interface.web-server-nic.id
}
root_block_device {
volume_size = "200"
}
iam_instance_profile = aws_iam_instance_profile.training_profile.name
depends_on = [aws_eip.one]
user_data = <<-EOF
#!/bin/bash
python3 /home/ubuntu/setting_instance.py
EOF
tags = {
Name = var.INSTANCE_NAME
}
}
All this must be manage by same state file, not fully separate state files, as again you will end up with duplicates of the VPC. You only change number_of_instance to what you want. For more resilient solution, you would have to use autoscaling group for the instances.

AWS Elasticache redis cluster configuration

I'm a bit new to terraform and was and needed some help on what's the issue with this. It creates the according resources but when connecting to the endpoint, I get a timeout. I noticed the security group isn't actually being created but I'm not sure why. Any help would be appreciated.
configuration:
provider "aws" {
region = "us-west-2"
}
resource "aws_elasticache_cluster" "example" {
cluster_id = "cluster-example"
engine = "redis"
node_type = "cache.m4.large"
num_cache_nodes = 1
parameter_group_name = "default.redis3.2"
engine_version = "3.2.10"
port = 6379
}
resource "aws_security_group" "example" {
name = "example"
description = "Used by the example Redis cluster"
vpc_id = "${aws_vpc.example.id}"
ingress {
description = "TLS from VPC"
from_port = 443
to_port = 443
protocol = "tcp"
cidr_blocks = [aws_vpc.example.cidr_block]
}
egress {
from_port = 0
to_port = 0
protocol = "-1"
cidr_blocks = ["0.0.0.0/0"]
ipv6_cidr_blocks = ["::/0"]
}
}
resource "aws_vpc" "example" {
cidr_block = "10.0.0.0/16"
tags = {
Name = "example"
}
}
resource "aws_subnet" "example" {
vpc_id = "${aws_vpc.example.id}"
cidr_block = "10.0.0.0/20"
tags = {
Name = "example"
}
}
resource "aws_elasticache_subnet_group" "example" {
name = "example"
description = "Example subnet group"
subnet_ids = ["${aws_subnet.example.id}"]
}
connection to endpoint:
import os
import redis
ENDPOINT = os.environ.get('REDIS_HOST')
client = redis.Redis(host=ENDPOINT, port=6379, db=0)
client.ping()
(passwordless cluster)
EDIT:
I call the endpoint in python on my local machine.
You can't access EC cluster from outside of AWS directly, as it can only be accessed from VPC. You must use VPN, Direct Connect or SSH tunnel if you want to connect from your home network.

Can't connect to Terraform-created instance with Private Key, but CAN connect when I create instance in Console

I've created the following key pair and EC2 instance using Terraform. I'll leave the SG config out of it, but it allows SSH from the internet.
When I try to SSH into this instance I get the errors "Server Refused our Key" and "No supported authentication methods available (server sent: publickey).
However I am able to login when I create a separate EC2 instance in the console and assign it the same key pair assigned in the TF script.
Has anyone seen this behavior? What causes it?
# Create Dev VPC
resource "aws_vpc" "dev_vpc" {
cidr_block = "10.0.0.0/16"
instance_tenancy = "default"
enable_dns_hostnames = "true"
tags = {
Name = "dev"
}
}
# Create an Internet Gateway Resource
resource "aws_internet_gateway" "igw" {
vpc_id = aws_vpc.dev_vpc.id
tags = {
Name = "dev-engineering-igw"
}
}
# Create a Route Table
resource "aws_route_table" " _dev_public_routes" {
vpc_id = aws_vpc. _dev.id
tags = {
name = " _dev_public_routes"
}
}
# Create a Route
resource "aws_route" " _dev_internet_access" {
route_table_id = aws_route_table. _dev_public_routes.id
destination_cidr_block = "0.0.0.0/0"
gateway_id = aws_internet_gateway.igw.id
}
# Associate the Route Table to our Public Subnet
resource "aws_route_table_association" " _dev_public_subnet_assoc" {
subnet_id = aws_subnet. _dev_public.id
route_table_id = aws_route_table. _dev_public_routes.id
}
# Create public subnet for hosting customer-facing Django app
resource "aws_subnet" " _dev_public" {
vpc_id = aws_vpc. _dev.id
cidr_block = "10.0.0.0/17"
availability_zone = "us-west-2a"
tags = {
Env = "dev"
}
}
resource "aws_security_group" "allow_https" {
name = "allow_https"
description = "Allow http and https inbound traffic"
vpc_id = aws_vpc. _dev.id
ingress {
description = "HTTP and HTTPS into VPC"
from_port = 80
to_port = 80
protocol = "tcp"
cidr_blocks = ["0.0.0.0/0"]
}
ingress {
description = "HTTP and HTTPS into VPC"
from_port = 443
to_port = 443
protocol = "tcp"
cidr_blocks = ["0.0.0.0/0"]
}
ingress {
description = "SSH"
from_port = 22
to_port = 22
protocol = "tcp"
cidr_blocks = ["0.0.0.0/0"]
}
egress {
description = "HTTP and HTTPS out of VPC for Session Manager"
from_port = 443
to_port = 443
protocol = "tcp"
cidr_blocks = ["0.0.0.0/0"]
}
tags = {
Name = "allow_https"
}
}
resource "aws_instance" "web" {
ami = data.aws_ami.ubuntu20.id
instance_type = "t3.micro"
subnet_id = aws_subnet. _dev_public.id
associate_public_ip_address = "true"
vpc_security_group_ids = ["${aws_security_group.allow_https.id}"]
key_name = "key_name"
metadata_options { #Enabling IMDSv2
http_endpoint = "disabled"
http_tokens = "required"
}
tags = {
Env = "dev"
}
}
As specified in the comments, removing the metadata_options from the instance resource resolves the issue.
The fix is to update the metadata_options to be:
metadata_options { #Enabling IMDSv2
http_endpoint = "enabled"
http_tokens = "required"
}
Looking at the Terraform documentation for metadata_options shows that:
http_endpoint = "disabled" means that the metadata service is unavailable.
http_tokens = "required" means that the metadata service requires session tokens (ie IMDSv2).
This is an invalid configuration, as specified in the AWS docs:
You can opt in to require that IMDSv2 is used when requesting instance metadata. Use the modify-instance-metadata-options CLI command and set the http-tokens parameter to required. When you specify a value for http-tokens, you must also set http-endpoint to enabled.

How use localstack to test security group rules for an ec2 instance?

I would like to use Localstack for quick testing of diff security group rules. For example, I want to create an ec2 instance, create an internet gateway, and then add a security group that allows ingress to the ec2 instance only on a specific port. I'd then test it by doing a curl to the ec2 instance to see if I got the rule correct.
Is this possible with LocalStack?
terraform {
required_providers {
aws = {
source = "hashicorp/aws"
version = "~> 4.0"
}
}
}
provider "aws" {
region = "us-east-1"
access_key = "localstacktest"
secret_key = "localstacktestkey"
skip_credentials_validation = true
skip_requesting_account_id = true
skip_metadata_api_check = true
s3_use_path_style = true
endpoints {
ec2 = "http://localhost:4566"
iam = "http://localhost:4566"
s3 = "http://localhost:4566"
glacier = "http://localhost:4566"
sns = "http://localhost:4566"
organizations = "http://localhost:4566"
}
}
# ...elided...
resource "aws_security_group" "aws_ec2_sg" {
name = "aws_ec2_sg_allow_ssh"
vpc_id = aws_vpc.aws_ec2_vpc.id
ingress {
description = "Allow inbound ssh traffic"
cidr_blocks = [var.cidr_block]
from_port = var.port
protocol = "tcp"
to_port = var.port
}
#...elided...
}
resource "aws_instance" "aws_ec2_instance" {
ami = var.ami_id
instance_type = var.instance_type
vpc_security_group_ids = [aws_security_group.aws_ec2_sg.id]
tags = {
name = var.ec2_name
}
}
Is there a way to then make a curl call and have it return "ok", but if I change up the security group, to have to then fail due to security restrictions?

Terraform - DB and security group are in different VPCs

What am I trying to achive:
Create and RDS Aurora cluster and place it in the same VPC as EC2 instances that I start so they can comunicate.
I'm trying to start an SG named "RDS_DB_SG" and make it part of the VPC i'm creating in the process.
I also create an SG named "BE_SG" and make it part of the same VPC.
I'm doing this so I can get access between the 2 (RDS and BE server).
What I did so far:
Created an .tf code and started everything up.
What I got:
It starts ok if I don't include the RDS cluster inside the RDS SG - The RDS creates it's own VPC.
When I include the RDS in the SG I want for him, The RDS cluster can't start and get's an error.
Error I got:
"The DB instance and EC2 security group are in different VPCs. The DB instance is in vpc-5a***63c and the EC2 security group is in vpc-0e5391*****273b3d"
Workaround for now:
I started the infrastructure without specifing a VPC for the RDS. It created it's own default VPC.
I then created manuall VPC-peering between the VPC that was created for the EC2's and the VPC that was created for the RDS.
But I want them to be in the same VPC so I won't have to create the VPC-peering manuall.
My .tf code:
variable "vpc_cidr" {
description = "CIDR for the VPC"
default = "10.0.0.0/16"
}
resource "aws_vpc" "vpc" {
cidr_block = "${var.vpc_cidr}"
tags = {
Name = "${var.env}_vpc"
}
}
resource "aws_subnet" "vpc_subnet" {
vpc_id = "${aws_vpc.vpc.id}"
cidr_block = "${var.vpc_cidr}"
availability_zone = "eu-west-1a"
tags = {
Name = "${var.env}_vpc"
}
}
resource "aws_db_subnet_group" "subnet_group" {
name = "${var.env}-subnet-group"
subnet_ids = ["${aws_subnet.vpc_subnet.id}"]
}
resource "aws_security_group" "RDS_DB_SG" {
name = "${var.env}-rds-sg"
vpc_id = "${aws_vpc.vpc.id}"
ingress {
from_port = 3396
to_port = 3396
protocol = "tcp"
security_groups = ["${aws_security_group.BE_SG.id}"]
}
}
resource "aws_security_group" "BE_SG" {
name = "${var.env}_BE_SG"
vpc_id = "${aws_vpc.vpc.id}"
ingress {
from_port = 80
to_port = 80
protocol = "tcp"
cidr_blocks = ["0.0.0.0/0"]
}
}
resource "aws_instance" "BE" {
ami = "ami-*********************"
instance_type = "t2.large"
associate_public_ip_address = true
key_name = "**********"
tags = {
Name = "WEB-${var.env}"
Porpuse = "Launched by Terraform"
ENV = "${var.env}"
}
subnet_id = "${aws_subnet.vpc_subnet.id}"
vpc_security_group_ids = ["${aws_security_group.BE_SG.id}", "${aws_security_group.ssh.id}"]
}
resource "aws_rds_cluster" "rds-cluster" {
cluster_identifier = "${var.env}-cluster"
database_name = "${var.env}-rds"
master_username = "${var.env}"
master_password = "PASSWORD"
backup_retention_period = 5
vpc_security_group_ids = ["${aws_security_group.RDS_DB_SG.id}"]
}
resource "aws_rds_cluster_instance" "rds-instance" {
count = 1
cluster_identifier = "${aws_rds_cluster.rds-cluster.id}"
instance_class = "db.r4.large"
engine_version = "5.7.12"
engine = "aurora-mysql"
preferred_backup_window = "04:00-22:00"
}
Any suggestions on how to achieve my first goal?