I've a two modules under modules directory named as prienv and vpc
nnice#MPL-G8WW7D3:~/terra-test/modules/prienv$ pwd
/home/nnice/terra-test/modules/prienv
nnice#MPL-G8WW7D3:~/terra-test/modules/prienv$ ls
data.tf main.tf variable.tf
Here is my data.tf file under prienv module
nnice#MPL-G8WW7D3:~/terra-test/modules/prienv$ cat data.tf
data "aws_security_groups" "mysg-data" {
filter {
name = "tag:Name"
values = ["MySG"]
}
depends_on = [aws_security_groups.mysg]
}
data "aws_subnet" "myprisn-data" {
filter {
name = "tag:Name"
values = ["MyPriSN"]
}
depends_on = [aws_subnet.myprisn]
}
Here is my variable.tf file under prienv module
variable "pri_av" {
description = "for private availability zone"
type = string
default = "us-east-1b"
}
variable "ami" {
description = "ami for ec2"
type = string
default = "ami-03ededff12e34e59e"
}
variable "instance_type" {
description = "Instance type t2.micro"
type = string
default = "t2.micro" #will use double quotes for string type
}
variable "key_pair" {
description = "key pair for ec2"
type = string
default = "learner_key"
}
variable "instance_count" {
description = "EC2 instance count"
type = number
default = 1 #here we're using it to create two private instances
}
variable "project_environment" {
description = "project name and environment for private instances"
type = map(string)
default = {
project = "private", # it works upon key value pair where project is for key and environment is value
environment = "testing" # you will find them under tag on aws console
}
}
And this is my main.tf file under prienv module
resource "aws_instance" "mypriec2" {
ami = var.ami
instance_type = var.instance_type
count = var.instance_count
availability_zone = var.pri_av
#subnet_id = aws_subnet.myprisn.id
subnet_id = data.aws_subnet.myprisn-data.id
#vpc_security_group_ids = [aws_security_group.mysg.id]
vpc_security_group_ids = data.aws_security_groups.mysg-data.ids
key_name = var.key_pair
# disable_api_termination = true
tags = var.project_environment
}
resource "aws_key_pair" "mykeypair" {
key_name = var.key_pair
public_key = "ssh-rsa AAAAB3NaffrWscf59juCakElys9F3+zVuz0ta4gRUtKgWVPIj6ACr00VNDzsTTW2/sSjYtE5zWolVKCITlhqiIhgRKUDLKoxclxUKnK6IGIafdaefafaheiufa;fdaeoasfdkQvNtGrrHzY5/dbZhIUTxDUyvT5O5U= nnice#MPL-G8WW7D3"
}
and here is my vpc moduls
nnice#MPL-G8WW7D3:~/terra-test/modules/vpc$ pwd
/home/nnice/terra-test/modules/vpc
nnice#MPL-G8WW7D3:~/terra-test/modules/vpc$ ls
data.tf main.tf variable.tf
This is my data.tf file under vpc module
nnice#MPL-G8WW7D3:~/terra-test/modules/vpc$ cat data.tf
data "aws_vpc" "myvpc-data" {
filter {
name = "tag:Name"
values = ["MyVPC"]
}
depends_on = [aws_vpc.myvpc]
}
data "aws_subnet" "mypubsn-data" {
filter {
name = "tag:Name"
values = ["MyPubSN"]
}
depends_on = [aws_subnet.mypubsn]
}
data "aws_subnet" "myprisn-data" {
filter {
name = "tag:Name"
values = ["MyPriSN"]
}
depends_on = [aws_subnet.myprisn]
}
This is my main.tf file under vpc module
nnice#MPL-G8WW7D3:~/terra-test/modules/vpc$ cat main.tf
##################################################################
############################## VPC ###############################
##################################################################
resource "aws_vpc" "myvpc" {
cidr_block = var.vpc_cidr
instance_tenancy = var.vpc_tenancy
tags = {
Name = var.vpc_tag
}
}
##################################################################
############################# Subnet #############################
##################################################################
#PUBLIC SUBNET
resource "aws_subnet" "mypubsn" {
#vpc_id = aws_vpc.myvpc.id
vpc_id = data.aws_vpc.myvpc-data.id
cidr_block = var.mypubsn_cidr
availability_zone = var.pub_av
map_public_ip_on_launch = var.map_public_ip_on_launch
tags = {
Name = var.mypubsn_tag
}
}
#PRIVATE SUBNET
resource "aws_subnet" "myprisn" {
#vpc_id = aws_vpc.myvpc.id
vpc_id = data.aws_vpc.myvpc-data.id
cidr_block = var.myprisn_cidr
availability_zone = var.pri_av
tags = {
Name = var.myprisn_tag
}
}
##################################################################
############################### IGW ##############################
##################################################################
resource "aws_internet_gateway" "myigw" {
#vpc_id = aws_vpc.myvpc.id
vpc_id = data.aws_vpc.myvpc-data.id
tags = {
Name = var.igw_tag
}
}
##################################################################
############################ Route Table #########################
##################################################################
#PUBLIC RT
resource "aws_route_table" "mypubrt" {
#vpc_id = aws_vpc.myvpc.id
vpc_id = data.aws_vpc.myvpc-data.id
tags = {
Name = var.mypubsn_tag
}
}
#PRIVATE RT
resource "aws_route_table" "myprirt" {
#vpc_id = aws_vpc.myvpc.id
vpc_id = data.aws_vpc.myvpc-data.id
tags = {
Name = var.myprisn_tag
}
}
####################################################################
######################## Route Table Associate #####################
####################################################################
#PUBLIC RT association
resource "aws_route_table_association" "pub" {
#subnet_id = aws_subnet.mypubsn.id
subnet_id = data.aws_subnet.mypubsn-data.id
route_table_id = aws_route_table.mypubrt.id
}
#PRIVATE RT association
resource "aws_route_table_association" "pri" {
#subnet_id = aws_subnet.myprisn.id
subnet_id = data.aws_subnet.myprisn-data.id
route_table_id = aws_route_table.myprirt.id
}
###################################################################
########################### Route #################################
###################################################################
#PUBLIC Route
resource "aws_route" "mypubroute" {
route_table_id = aws_route_table.mypubrt.id
destination_cidr_block = var.pubroute
gateway_id = aws_internet_gateway.myigw.id
depends_on = [aws_route_table.mypubrt]
}
#PRIVATE Route
#resource "aws_route" "mypriroute" {
# route_table_id = aws_route_table.myprirt.id
# destination_cidr_block = "0.0.0.0/0"
# gateway_id = aws_internet_gateway.myigw.id
# depends_on = [aws_route_table.myprirt]
#}
###################################################################
############################ SG ###################################
###################################################################
resource "aws_security_group" "mysg" {
name = "MySecurityGroup"
description = "Allow TLS inbound traffic"
#vpc_id = aws_vpc.myvpc.id
vpc_id = data.aws_vpc.myvpc-data.id
ingress {
description = "TLS from VPC"
from_port = 22
to_port = 22
protocol = "tcp"
# cidr_blocks = [aws_vpc.myvpc.cidr_block]
cidr_blocks = ["0.0.0.0/0"]
# ipv6_cidr_blocks = [aws_vpc.main.ipv6_cidr_block]
}
ingress {
description = "TLS from VPC"
from_port = 80
to_port = 80
protocol = "tcp"
# cidr_blocks = [aws_vpc.myvpc.cidr_block]
cidr_blocks = ["0.0.0.0/0"]
# ipv6_cidr_blocks = [aws_vpc.main.ipv6_cidr_block]
}
egress {
from_port = 0
to_port = 0
protocol = "-1"
cidr_blocks = ["0.0.0.0/0"]
ipv6_cidr_blocks = ["::/0"]
}
tags = {
Name = var.SG_tag
}
}
And this is my variable.tf file under vpc module
nnice#MPL-G8WW7D3:~/terra-test/modules/vpc$ cat variable.tf
variable "vpc_tenancy" {
description = "vpc instance tenancy"
type = string
default = "default"
}
variable "pub_av" {
description = "for public availability zone"
type = string
default = "us-east-1a"
}
variable "pri_av" {
description = "for private availability zone"
type = string
default = "us-east-1b"
}
variable "vpc_tag" {
description = "Tag for VPC"
type = string
default = "MyVPC"
}
variable "vpc_cidr" {
description = "for vpc cidr"
type = string
default = "10.0.0.0/16"
}
variable "mypubsn_cidr" {
description = "for public subnet cidr"
type = string
default = "10.0.1.0/24"
}
variable "myprisn_cidr" {
description = "for private subnet cidr"
type = string
default = "10.0.2.0/24"
}
variable "mypubsn_tag" {
description = "tag for public subnet"
type = string
default = "MyPubSN"
}
variable "myprisn_tag" {
description = "tag for private subnet"
type = string
default = "MyPriSN"
}
variable "igw_tag" {
description = "tag for IGW subnet"
type = string
default = "MyIGW"
}
variable "pubrt_tag" {
description = "tag for private subnet"
type = string
default = "MyPubRT"
}
variable "prirt_tag" {
description = "tag for IGW subnet"
type = string
default = "MyPriRT"
}
variable "pubroute" {
description = "cidr for public route"
type = string
default = "0.0.0.0/0"
}
variable "SG_tag" {
description = "tag for SG"
type = string
default = "MySG"
}
variable "map_public_ip_on_launch" {
description = "auto enable public ip to public subnet"
type = bool
default = true
}
And there is env directory where I have my main.tf file
nnice#MPL-G8WW7D3:~/terra-test$ ls
env modules
nnice#MPL-G8WW7D3:~/terra-test$ cd env/private-ec2/
nnice#MPL-G8WW7D3:~/terra-test/env/private-ec2$ pwd
/home/nnice/terra-test/env/private-ec2
nnice#MPL-G8WW7D3:~/terra-test/env/private-ec2$ ls
main.tf
nnice#MPL-G8WW7D3:~/terra-test/env/private-ec2$ cat main.tf
#Provider
provider "aws" {
region = "us-east-1"
}
#VPC
module "vpc" {
source = "../../modules/vpc"
}
#EC2
module "prienv" {
source = "../../modules/prienv"
}
When I'm trying to run terraform plan, I'm getting following errors
Error: Reference to undeclared resource
│
│ on ../../modules/prienv/data.tf line 6, in data "aws_security_groups" "mysg-data":
│ 6: depends_on = [aws_security_groups.mysg]
│
│ A managed resource "aws_security_groups" "mysg" has not been declared in module.prienv.
╵
╷
│ Error: Reference to undeclared resource
│
│ on ../../modules/prienv/data.tf line 14, in data "aws_subnet" "myprisn-data":
│ 14: depends_on = [aws_subnet.myprisn]
│
│ A managed resource "aws_subnet" "myprisn" has not been declared in module.prienv.
Could anyone please let me know its solution? I already using vpc module in my main.tf file
The issue you are running into is that aws_security_groups.mysgand aws_subnet.myprisn are locally defined in the vpc module but you are referencing them in the depends_on statement in the prienv module.
Also the Terraform documentation suggests that the depends_on argument
should be used only as a last resort.
So the way to go would be, to define the security group and the subnet as an output in the vpc module, as variable in the prienv module, and then passing the outputs of vpc as parameter values to prienv.
This way terraform will recognize the dependencies between the resources from the dependency graph it creates and you won't neet the depends_on argument.
So in consequence, the diffs to your config could look like:
modules/vpc/outputs.tf
output "sec_group" {
value = aws_security_group.mysg
description = "..."
}
output "subnet" {
value = aws_subnet.myprisn
description = "..."
}
modules/prienv/variable.tf
variable "sec_group" {
description = "..."
}
variable "subnet" {
description = "..."
}
modules/prienv/main.tf
resource "aws_instance" "mypriec2" {
...
subnet_id = var.subnet.id
vpc_security_group_ids = var.sec_group.ids
...
}
env/main.tf
module "vpc" {
source = "../../modules/vpc"
}
#EC2
module "prienv" {
source = "../../modules/prienv"
sec_group = module.vpc.sec_group
subnet = module.vpc.subnet
}
Related
I am trying to create nat gateway from terraform by using AWS as provider but subnet_id in resource aws_nat_gateway always gives me error. I am trying to assign public subnet in subnet_id on resource "aws_nat_gateway" "sample_nat_gateway" from variables.tf file but failing in doing so and need support if someone can assist ?
Below is my vpc.tf file of vpc module
resource "aws_subnet" "public-subnet" {
for_each = var.prefix
availability_zone_id = each.value["az"]
cidr_block = each.value["cidr"]
vpc_id = aws_vpc.sample_vpc.id
tags = {
Name = "${var.name}-${each.value["az"]}"
}
}
resource "aws_nat_gateway" "sample_nat_gateway" {
allocation_id = aws_eip.sample_eip.id
subnet_id = ""
tags = {
Name = "${var.name}-sample-nat-gateway"
Environment = var.environment
}
depends_on = [aws_internet_gateway.sample_igw]
}
variables.tf
variable "prefix" {
type = map
default = {
sub-1 = {
az = "use2-az1"
cidr = "10.0.1.0/16"
}
sub-2 = {
az = "use2-az2"
cidr = "10.0.2.0/24"
}
}
}
Subent's can't be empty You have to provide valid subnet id where the NAT is going to be placed. For example:
resource "aws_nat_gateway" "sample_nat_gateway" {
allocation_id = aws_eip.sample_eip.id
subnet_id = aws_subnet.public-subnet["sub-1"].id
tags = {
Name = "${var.name}-sample-nat-gateway"
Environment = var.environment
}
depends_on = [aws_internet_gateway.sample_igw]
}
where aws_subnet.example is one of the public subnets in your VPC.
Following this Github repo, the user pool domain farm_users is created yet terraform applyreturns this error. Tried destroy. Tried deleting the user pool domain in the aws console and repeating apply.
╷
│ Error: Error creating Cognito User Pool Domain: InvalidParameterException: Domain already associated with another user pool.
│
│ with module.api.aws_cognito_user_pool_domain.farm_users_pool_domain,
│ on modules/api/main.tf line 55, in resource "aws_cognito_user_pool_domain" "farm_users_pool_domain":
│ 55: resource "aws_cognito_user_pool_domain" "farm_users_pool_domain" {
│
After running apply:
$ aws cognito-idp describe-user-pool-domain --domain "fupdomain"
An error occurred (ResourceNotFoundException) when calling the DescribeUserPoolDomain operation: User pool domain fupdomain does not exist in this account.
main.tf
provider "aws" {
version = "~> 2.31"
region = var.region
}
data "aws_caller_identity" "current" {}
resource "random_string" "build_id" {
length = 16
special = false
upper = false
number = false
}
module "network" {
source = "./modules/network"
availability_zone = var.availability_zone
vpc_cidr = var.vpc_cidr
}
module "node_iam_role" {
source = "./modules/node_iam_role"
}
resource "aws_s3_bucket" "render_bucket" {
bucket = "${random_string.build_id.result}-render-data"
acl = "private"
}
# Stores server-side code bundles. i.e. Worker node and lambda layer
resource "aws_s3_bucket" "code_bundles_bucket" {
bucket = "${random_string.build_id.result}-code-bundles"
acl = "private"
}
# Stores and serves javascript client
resource "aws_s3_bucket" "client_bucket" {
bucket = "${random_string.build_id.result}-client-bucket"
acl = "public-read"
website {
index_document = "index.html"
error_document = "error.html"
}
}
# Code bundles
data "archive_file" "worker_node_code" {
type = "zip"
source_dir = "${path.root}/src/farm_worker"
output_path = "${path.root}/src/bundles/farm_worker.zip"
}
resource "aws_s3_bucket_object" "worker_code_bundle" {
bucket = aws_s3_bucket.code_bundles_bucket.id
key = "farm_worker.zip"
source = "${path.root}/src/bundles/farm_worker.zip"
depends_on = [data.archive_file.worker_node_code]
}
# Security groups for the worker nodes
resource "aws_security_group" "ssh" {
name = "allow_ssh"
vpc_id = module.network.vpc_id
ingress {
from_port = 22
to_port = 22
protocol = "tcp"
cidr_blocks = ["0.0.0.0/0"]
}
egress {
from_port = 0
to_port = 0
protocol = "-1"
cidr_blocks = ["0.0.0.0/0"]
}
}
resource "aws_security_group" "nfs" {
name = "NFS"
vpc_id = module.network.vpc_id
ingress {
from_port = 2049
to_port = 2049
protocol = "tcp"
cidr_blocks = ["0.0.0.0/0"]
}
egress {
from_port = 0
to_port = 0
protocol = "-1"
cidr_blocks = ["0.0.0.0/0"]
}
}
# Build queues for project init and frame rendering
resource "aws_sqs_queue" "frame_render_deadletter" {
name = "frame_render_deadletter_queue"
}
resource "aws_sqs_queue" "frame_render_queue" {
name = "frame_render_queue"
visibility_timeout_seconds = 7000
redrive_policy = "{\"deadLetterTargetArn\":\"${aws_sqs_queue.frame_render_deadletter.arn}\",\"maxReceiveCount\":5}"
}
resource "aws_sqs_queue" "project_init_queue" {
name = "project_init_queue"
visibility_timeout_seconds = 7000
}
# EFS for shared storage during baking and rendering
resource "aws_efs_file_system" "shared_render_vol" {
tags = {
Name = "SharedRenderEFS"
}
}
resource "aws_efs_mount_target" "shared_mount" {
file_system_id = aws_efs_file_system.shared_render_vol.id
subnet_id = module.network.subnet_id
security_groups = [aws_security_group.nfs.id]
}
module "worker_node" {
source = "./modules/worker_node"
key_name = var.node_key_name
image_id = var.blender_node_image_id
vpc_security_group_ids = [aws_security_group.ssh.id, aws_security_group.nfs.id]
iam_instance_profile = module.node_iam_role.worker_iam_profile_name
build_id = random_string.build_id.result
region = var.region
render_bucket = aws_s3_bucket.render_bucket.id
code_bucket = aws_s3_bucket.code_bundles_bucket.id
frame_queue_url = aws_sqs_queue.frame_render_queue.id
project_init_queue_url = aws_sqs_queue.project_init_queue.id
shared_file_system_id = aws_efs_file_system.shared_render_vol.id
instance_types = var.instance_types
asg_name = var.worker_asg_name
asg_subnets = [module.network.subnet_id]
asg_max_workers = var.worker_node_max_count
asg_min_workers = 0
cloudwatch_namespace = var.cloudwatch_namespace
}
module "bpi_emitter" {
source = "./modules/bpi_emitter"
cloudwatch_namespace = var.cloudwatch_namespace
asg_name = module.worker_node.asg_name
frame_queue = aws_sqs_queue.frame_render_queue.id
project_init_queue = aws_sqs_queue.project_init_queue.id
frame_queue_bpi = var.frame_queue_bpi
project_init_queue_bpi = var.project_init_queue_bpi
}
# module "bucket_upload_listener" {
# source = "./modules/bucket_upload_listener"
# bucket_name = aws_s3_bucket.render_bucket.id
# bucket_arn = aws_s3_bucket.render_bucket.arn
# project_init_queue = aws_sqs_queue.project_init_queue.id
# }
resource "aws_dynamodb_table" "projects_table" {
name = "FarmProjects"
billing_mode = "PAY_PER_REQUEST"
hash_key = "ProjectId"
attribute {
name = "ProjectId"
type = "S"
}
}
resource "aws_dynamodb_table" "application_settings" {
name = "FarmApplicationSettings"
billing_mode = "PAY_PER_REQUEST"
hash_key = "SettingName"
attribute {
name = "SettingName"
type = "S"
}
}
module "api" {
source = "./modules/api"
region = var.region
bucket = aws_s3_bucket.render_bucket.id
frame_queue = aws_sqs_queue.frame_render_queue.id
project_init_queue = aws_sqs_queue.project_init_queue.id
client_endpoint = "https://${aws_s3_bucket.client_bucket.website_endpoint}"
dynamo_tables = {
projects = aws_dynamodb_table.projects_table.name,
application_settings = aws_dynamodb_table.application_settings.name
}
}
The domain name should be globally unique. This means that, if in another account the same domain is used, then you can't use it. Try for example:
aws cognito-idp create-user-pool-domain --domain fupdomain --user-pool-id <pool-id>
The output will be:
An error occurred (InvalidParameterException) when calling the
CreateUserPoolDomain operation: Domain already associated with another
user pool.
This makes sense, as the domain name is used to build a url of the form:
https://{domain}.auth.us-east-1.amazoncognito.com
This is where users should be authenticated against.
You need to edit the template and pick another name.
I'm trying to use a vpc module i made for aws in a top module.
My tree is as follows:
.
├── dev.vars.json
├── modules
│ └── vpc
│ ├── README.md
│ ├── main.tf
│ ├── outputs.tf
│ ├── variables.tf
│ └── versions.tf
├── outputs.tf
├── variables.tf
└── main.tf
the "vpc" module works fine, I'm trying to use that module in my main.tf file on the root folder like this:
$ cat main.tf
module "dev_vpc" {
source = "./modules/vpc"
}
my variables:
variable "vpc" {
type = object({
name = string
})
}
my outputs.tf
# VPC
output "vpc_id" {
description = "The ID of the VPC"
value = module.vpc.vpc_id
}
...
and my dev.vars.json:
{
"vpc": {
"name": "development-vpc"
},
}
Once i got the vpc in "modules/vpc" working, I want to use it on the top main.tf file, but when i run apply (after init) i get:
$ terraform plan -var-file dev.vars.json
╷
│ Error: Missing required argument
│
│ on main.tf line 1, in module "dev_vpc":
│ 1: module "dev_vpc" {
│
│ The argument "vpc" is required, but no definition was found.
the main.tf in modules/vpc:
provider "aws" {
region = local.region
}
locals {
region = "us-east-1"
}
################################################################################
# VPC Module
################################################################################
resource "aws_vpc" "dev_vpc" {
source = "terraform-aws-modules/vpc/aws"
version = "2.66.0"
name = var.vpc.name
cidr = "10.0.0.0/16"
azs = ["${local.region}a", "${local.region}b", "${local.region}c"]
private_subnets = ["10.0.1.0/24", "10.0.2.0/24", "10.0.3.0/24"]
public_subnets = ["10.0.101.0/24", "10.0.102.0/24", "10.0.103.0/24"]
enable_ipv6 = true
enable_nat_gateway = false
single_nat_gateway = true
public_subnet_tags = {
Name = "overridden-name-public"
}
tags = {
Owner = "user"
Environment = "dev"
}
vpc_tags = {
Name = "vpc-name"
}
}
I haven't been able to figure out how to fix this.
Many thanks!
davidcsi
It ended up being that i used a terraform from terraform's github, and there's a lot of dependencies that wouldn't work.
my final vpc code is:
$ cat main.tf
provider "aws" {
region = "${var.region}"
}
/*==== The VPC ======*/
resource "aws_vpc" "vpc" {
cidr_block = "${var.vpc_cidr}"
enable_dns_hostnames = true
enable_dns_support = true
tags = {
Name = "${var.environment}-vpc"
Environment = "${var.environment}"
}
}
$ cat subnets.tf
/* Internet gateway for the public subnet */
resource "aws_internet_gateway" "ig" {
vpc_id = "${aws_vpc.vpc.id}"
tags = {
Name = "${var.environment}-igw"
Environment = "${var.environment}"
}
}
/* Elastic IP for NAT */
resource "aws_eip" "nat_eip" {
vpc = true
depends_on = [aws_internet_gateway.ig]
}
/* NAT */
resource "aws_nat_gateway" "nat" {
allocation_id = "${aws_eip.nat_eip.id}"
subnet_id = "${element(aws_subnet.public_subnet.*.id, 0)}"
depends_on = [aws_internet_gateway.ig]
tags = {
Name = "nat"
Environment = "${var.environment}"
}
}
/* Public subnet */
resource "aws_subnet" "public_subnet" {
vpc_id = "${aws_vpc.vpc.id}"
count = "${length(var.public_subnets_cidr)}"
cidr_block = "${element(var.public_subnets_cidr, count.index)}"
availability_zone = "${element(var.availability_zones, count.index)}"
map_public_ip_on_launch = true
tags = {
Name = "${var.environment}-${element(var.availability_zones, count.index)}- public-subnet"
Environment = "${var.environment}"
}
}
/* Private subnet */
resource "aws_subnet" "private_subnet" {
vpc_id = "${aws_vpc.vpc.id}"
count = "${length(var.private_subnets_cidr)}"
cidr_block = "${element(var.private_subnets_cidr, count.index)}"
availability_zone = "${element(var.availability_zones, count.index)}"
map_public_ip_on_launch = false
tags = {
Name = "${var.environment}-${element(var.availability_zones, count.index)}-private-subnet"
Environment = "${var.environment}"
}
}
/* Routing table for private subnet */
resource "aws_route_table" "private" {
vpc_id = "${aws_vpc.vpc.id}"
tags = {
Name = "${var.environment}-private-route-table"
Environment = "${var.environment}"
}
}
/* Routing table for public subnet */
resource "aws_route_table" "public" {
vpc_id = "${aws_vpc.vpc.id}"
tags = {
Name = "${var.environment}-public-route-table"
Environment = "${var.environment}"
}
}
resource "aws_route" "public_internet_gateway" {
route_table_id = "${aws_route_table.public.id}"
destination_cidr_block = "0.0.0.0/0"
gateway_id = "${aws_internet_gateway.ig.id}"
}
resource "aws_route" "private_nat_gateway" {
route_table_id = "${aws_route_table.private.id}"
destination_cidr_block = "0.0.0.0/0"
nat_gateway_id = "${aws_nat_gateway.nat.id}"
}
/* Route table associations */
resource "aws_route_table_association" "public" {
count = "${length(var.public_subnets_cidr)}"
subnet_id = "${element(aws_subnet.public_subnet.*.id, count.index)}"
route_table_id = "${aws_route_table.public.id}"
}
resource "aws_route_table_association" "private" {
count = "${length(var.private_subnets_cidr)}"
subnet_id = "${element(aws_subnet.private_subnet.*.id, count.index)}"
route_table_id = "${aws_route_table.private.id}"
$ cat security_groups.tf
/*==== VPC's Default Security Group ======*/
resource "aws_security_group" "default" {
name = "${var.environment}-default-sg"
description = "Default security group to allow inbound/outbound from the VPC"
vpc_id = "${aws_vpc.vpc.id}"
depends_on = [aws_vpc.vpc]
ingress {
from_port = "0"
to_port = "0"
protocol = "-1"
self = true
}
egress {
from_port = "0"
to_port = "0"
protocol = "-1"
self = "true"
}
tags = {
Environment = "${var.environment}"
}
}
$ cat outputs.tf
output "vpc_id" {
value = "${aws_vpc.vpc.id}"
}
cat variables.tf
variable "region" {
description = "AWS Deployment region.."
default = "us-east-1"
}
variable "vpc_cidr" {
description = "CIDR to assign to this VPC"
default = "10.0.0.0/16"
}
variable "environment" {
description = "On what environment is this running?"
default = "dev"
}
variable "availability_zones" {
description = "On what environment is this running?"
default = [
"us-east-1a",
"us-east-1b",
"us-east-1c"
]
}
variable "public_subnets_cidr" {
description = "public_subnets_cidr"
default = ["10.0.1.0/24", "10.0.2.0/24", "10.0.3.0/24"]
}
variable "private_subnets_cidr" {
description = "On what environment is this running?"
default = ["10.0.101.0/24", "10.0.102.0/24", "10.0.103.0/24"]
}
This doesn't give me any issues when using it as a module.
I have been trying to spin up ECS using terraform. About two days ago it was working as expected, however today I tried to run terraform apply and I keep getting an error saying
"The requested configuration is currently not supported. Launching EC2 instance failed"
I have researched a lot about this issue, I tried hardcoding the VPC tenancy to default, I've tried changing the region, the instance type and nothing seems to fix the issue.
The is my terraform config:
provider "aws" {
region = var.region
}
data "aws_availability_zones" "available" {}
# Define a vpc
resource "aws_vpc" "motivy_vpc" {
cidr_block = var.motivy_network_cidr
tags = {
Name = var.motivy_vpc
}
enable_dns_support = "true"
instance_tenancy = "default"
enable_dns_hostnames = "true"
}
# Internet gateway for the public subnet
resource "aws_internet_gateway" "motivy_ig" {
vpc_id = aws_vpc.motivy_vpc.id
tags = {
Name = "motivy_ig"
}
}
# Public subnet 1
resource "aws_subnet" "motivy_public_sn_01" {
vpc_id = aws_vpc.motivy_vpc.id
cidr_block = var.motivy_public_01_cidr
availability_zone = data.aws_availability_zones.available.names[0]
tags = {
Name = "motivy_public_sn_01"
}
}
# Public subnet 2
resource "aws_subnet" "motivy_public_sn_02" {
vpc_id = aws_vpc.motivy_vpc.id
cidr_block = var.motivy_public_02_cidr
availability_zone = data.aws_availability_zones.available.names[1]
tags = {
Name = "motivy_public_sn_02"
}
}
# Routing table for public subnet 1
resource "aws_route_table" "motivy_public_sn_rt_01" {
vpc_id = aws_vpc.motivy_vpc.id
route {
cidr_block = "0.0.0.0/0"
gateway_id = aws_internet_gateway.motivy_ig.id
}
tags = {
Name = "motivy_public_sn_rt_01"
}
}
# Routing table for public subnet 2
resource "aws_route_table" "motivy_public_sn_rt_02" {
vpc_id = aws_vpc.motivy_vpc.id
route {
cidr_block = "0.0.0.0/0"
gateway_id = aws_internet_gateway.motivy_ig.id
}
tags = {
Name = "motivy_public_sn_rt_02"
}
}
# Associate the routing table to public subnet 1
resource "aws_route_table_association" "motivy_public_sn_rt_01_assn" {
subnet_id = aws_subnet.motivy_public_sn_01.id
route_table_id = aws_route_table.motivy_public_sn_rt_01.id
}
# Associate the routing table to public subnet 2
resource "aws_route_table_association" "motivy_public_sn_rt_02_assn" {
subnet_id = aws_subnet.motivy_public_sn_02.id
route_table_id = aws_route_table.motivy_public_sn_rt_02.id
}
# ECS Instance Security group
resource "aws_security_group" "motivy_public_sg" {
name = "motivys_public_sg"
description = "Test public access security group"
vpc_id = aws_vpc.motivy_vpc.id
ingress {
from_port = 22
to_port = 22
protocol = "tcp"
cidr_blocks = [
"0.0.0.0/0"]
}
ingress {
from_port = 80
to_port = 80
protocol = "tcp"
cidr_blocks = [
"0.0.0.0/0"]
}
ingress {
from_port = 5000
to_port = 5000
protocol = "tcp"
cidr_blocks = [
"0.0.0.0/0"]
}
ingress {
from_port = 0
to_port = 0
protocol = "-1"
cidr_blocks = [
var.motivy_public_01_cidr,
var.motivy_public_02_cidr
]
}
egress {
# allow all traffic to private SN
from_port = "0"
to_port = "0"
protocol = "-1"
cidr_blocks = [
"0.0.0.0/0"]
}
tags = {
Name = "motivy_public_sg"
}
}
data "aws_ecs_task_definition" "motivy_server" {
task_definition = aws_ecs_task_definition.motivy_server.family
}
resource "aws_ecs_task_definition" "motivy_server" {
family = "motivy_server"
container_definitions = file("task-definitions/service.json")
}
data "aws_ami" "latest_ecs" {
most_recent = true # get the latest version
filter {
name = "name"
values = [
"amzn2-ami-ecs-*"] # ECS optimized image
}
owners = [
"amazon" # Only official images
]
}
resource "aws_launch_configuration" "ecs-launch-configuration" {
name = "ecs-launch-configuration"
image_id = data.aws_ami.latest_ecs.id
instance_type = "t2.micro"
iam_instance_profile = aws_iam_instance_profile.ecs-instance-profile.id
root_block_device {
volume_type = "standard"
volume_size = 100
delete_on_termination = true
}
enable_monitoring = true
lifecycle {
create_before_destroy = true
}
security_groups = [aws_security_group.motivy_public_sg.id]
associate_public_ip_address = "true"
key_name = var.ecs_key_pair_name
user_data = <<EOF
#!/bin/bash
echo ECS_CLUSTER=${var.ecs_cluster} >> /etc/ecs/ecs.config
EOF
}
resource "aws_appautoscaling_target" "ecs_motivy_server_target" {
max_capacity = 2
min_capacity = 1
resource_id = "service/${aws_ecs_cluster.motivy_ecs_cluster.name}/${aws_ecs_service.motivy_server_service.name}"
scalable_dimension = "ecs:service:DesiredCount"
service_namespace = "ecs"
depends_on = [ aws_ecs_service.motivy_server_service ]
}
resource "aws_iam_role" "ecs-instance-role" {
name = "ecs-instance-role"
path = "/"
assume_role_policy = data.aws_iam_policy_document.ecs-instance-policy.json
}
data "aws_iam_policy_document" "ecs-instance-policy" {
statement {
actions = ["sts:AssumeRole"]
principals {
type = "Service"
identifiers = ["ec2.amazonaws.com"]
}
}
}
resource "aws_iam_role_policy_attachment" "ecs-instance-role-attachment" {
role = aws_iam_role.ecs-instance-role.name
policy_arn = "arn:aws:iam::aws:policy/service-role/AmazonEC2ContainerServiceforEC2Role"
}
resource "aws_iam_instance_profile" "ecs-instance-profile" {
name = "ecs-instance-profile"
path = "/"
role = aws_iam_role.ecs-instance-role.id
provisioner "local-exec" {
command = "sleep 10"
}
}
resource "aws_autoscaling_group" "motivy-server-autoscaling-group" {
name = "motivy-server-autoscaling-group"
termination_policies = [
"OldestInstance" # When a “scale down” event occurs, which instances to kill first?
]
default_cooldown = 30
health_check_grace_period = 30
max_size = var.max_instance_size
min_size = var.min_instance_size
desired_capacity = var.desired_capacity
# Use this launch configuration to define “how” the EC2 instances are to be launched
launch_configuration = aws_launch_configuration.ecs-launch-configuration.name
lifecycle {
create_before_destroy = true
}
# Refer to vpc.tf for more information
# You could use the private subnets here instead,
# if you want the EC2 instances to be hidden from the internet
vpc_zone_identifier = [aws_subnet.motivy_public_sn_01.id, aws_subnet.motivy_public_sn_02.id]
tags = [{
key = "Name",
value = var.ecs_cluster,
# Make sure EC2 instances are tagged with this tag as well
propagate_at_launch = true
}]
}
resource "aws_alb" "motivy_server_alb_load_balancer" {
name = "motivy-alb-load-balancer"
security_groups = [aws_security_group.motivy_public_sg.id]
subnets = [aws_subnet.motivy_public_sn_01.id, aws_subnet.motivy_public_sn_02.id]
tags = {
Name = "motivy_server_alb_load_balancer"
}
}
resource "aws_alb_target_group" "motivy_server_target_group" {
name = "motivy-server-target-group"
port = 5000
protocol = "HTTP"
vpc_id = aws_vpc.motivy_vpc.id
deregistration_delay = "10"
health_check {
healthy_threshold = "2"
unhealthy_threshold = "6"
interval = "30"
matcher = "200,301,302"
path = "/"
protocol = "HTTP"
timeout = "5"
}
stickiness {
type = "lb_cookie"
}
tags = {
Name = "motivy-server-target-group"
}
}
resource "aws_alb_listener" "alb-listener" {
load_balancer_arn = aws_alb.motivy_server_alb_load_balancer.arn
port = "80"
protocol = "HTTP"
default_action {
target_group_arn = aws_alb_target_group.motivy_server_target_group.arn
type = "forward"
}
}
resource "aws_autoscaling_attachment" "asg_attachment_motivy_server" {
autoscaling_group_name = aws_autoscaling_group.motivy-server-autoscaling-group.id
alb_target_group_arn = aws_alb_target_group.motivy_server_target_group.arn
}
This is the exact error I get
Error: "motivy-server-autoscaling-group": Waiting up to 10m0s: Need at least 2 healthy instances in ASG, have 0. Most recent activity: {
ActivityId: "a775c531-9496-fdf9-5157-ab2448626293",
AutoScalingGroupName: "motivy-server-autoscaling-group",
Cause: "At 2020-04-05T22:10:28Z an instance was started in response to a difference between desired and actual capacity, increasing the capacity from 0 to 2.",
Description: "Launching a new EC2 instance. Status Reason: The requested configuration is currently not supported. Please check the documentation for supported configurations. Launching EC2 instance failed.",
Details: "{\"Subnet ID\":\"subnet-05de5fc0e994d05fe\",\"Availability Zone\":\"us-east-1a\"}",
EndTime: 2020-04-05 22:10:29 +0000 UTC,
Progress: 100,
StartTime: 2020-04-05 22:10:29.439 +0000 UTC,
StatusCode: "Failed",
StatusMessage: "The requested configuration is currently not supported. Please check the documentation for supported configurations. Launching EC2 instance failed."
}
I'm not sure why it worked two days ago.
But recent Amazon ECS-optimized AMIs' volume_type is gp2.
You should choose gp2 as root_block_device.volume_type.
resource "aws_launch_configuration" "ecs-launch-configuration" {
# ...
root_block_device {
volume_type = "gp2"
volume_size = 100
delete_on_termination = true
}
# ...
}
data "aws_ami" "latest_ecs" {
most_recent = true # get the latest version
filter {
name = "name"
values = ["amzn2-ami-ecs-hvm-*-x86_64-ebs"] # ECS optimized image
}
owners = [
"amazon" # Only official images
]
}
For me worked using t3 gen instead of t2
AWS EC2 instance creation is failing while creating a network interface in the aws_instance section. The configuration is following configuration as defined in Terraform Network Interfaces
Configuration.
On removing the network block the configuration works seamlessly. With network block the following error was logged
"Error: Error launching source instance: Unsupported: The requested configuration is currently not supported. Please check the documentation for supported configurations."
variable "aws_region" {}
variable "aws_access_key" {}
variable "aws_secret_key" {}
variable "vpc_cidr_block" {}
variable "environment" {}
variable "applicationtype" {}
variable "subnet_cidr_block" {}
variable "amiid" {}
variable "instancetype" {}
variable "bucketname" {}
variable "publickey-fe" {}
variable "publickey-be" {}
provider "aws" {
profile = "default"
region = "${var.aws_region}"
access_key = "${var.aws_access_key}"
secret_key = "${var.aws_secret_key}"
}
data "aws_availability_zones" "availability" {
state = "available"
}
resource "aws_vpc" "sitespeed_vpc" {
cidr_block = "${var.vpc_cidr_block}"
instance_tenancy = "dedicated"
tags = {
env = "${var.environment}"
application = "${var.applicationtype}"
Name = "site-speed-VPC"
}
}
resource "aws_subnet" "sitespeed_subnet" {
vpc_id = "${aws_vpc.sitespeed_vpc.id}"
cidr_block = "${var.subnet_cidr_block}"
availability_zone = "${data.aws_availability_zones.availability.names[0]}"
tags = {
env = "${var.environment}"
application = "${var.applicationtype}"
Name = "site-speed-Subnet"
}
}
resource "aws_network_interface" "sitespeed_frontend_NIC" {
subnet_id = "${aws_subnet.sitespeed_subnet.id}"
private_ips = ["192.168.10.100"]
tags = {
env = "${var.environment}"
application = "${var.applicationtype}"
Name = "site-speed-frontend-nic"
}
}
resource "aws_network_interface" "sitespeed_backend_NIC" {
subnet_id = "${aws_subnet.sitespeed_subnet.id}"
private_ips = ["192.168.10.110"]
tags = {
env = "${var.environment}"
application = "${var.applicationtype}"
Name = "site-speed-backend-nic"
}
}
resource "aws_key_pair" "sitespeed_front_key" {
key_name = "site_speed_front_key"
public_key = "${var.publickey-fe}"
}
resource "aws_key_pair" "sitespeed_back_key" {
key_name = "site_speed_back_key"
public_key = "${var.publickey-be}"
}
resource "aws_instance" "sitespeed_front" {
ami = "ami-00942d7cd4f3ca5c0"
instance_type = "t2.micro"
key_name = "site_speed_front_key"
availability_zone = "${data.aws_availability_zones.availability.names[0]}"
network_interface {
network_interface_id = "${aws_network_interface.sitespeed_frontend_NIC.id}"
device_index = 0
}
tags = {
env = "${var.environment}"
application = "${var.applicationtype}"
Name = "site-speed-frontend-server"
public = "yes"
}
}
resource "aws_instance" "sitespeed_backend" {
ami = "ami-00942d7cd4f3ca5c0"
instance_type = "t2.micro"
key_name = "site_speed_back_key"
network_interface {
network_interface_id = "${aws_network_interface.sitespeed_backend_NIC.id}"
device_index = 0
}
tags = {
env = "${var.environment}"
application = "${var.applicationtype}"
Name = "site-speed-backend-server"
public = "No"
}
}
resource "aws_s3_bucket" "b" {
bucket = "${var.bucketname}"
acl = "private"
tags = {
env = "${var.environment}"
application = "${var.applicationtype}"
}
}
The issue was due to the Terraform Version. Following is the updated script that supports Terraform V.0.12.16 to create an EC2 Instance on AWS.
// Variable Definition
variable "aws_region" {}
variable "aws_vpc_cidr_block" {}
variable "aws_subnet_cidr_block" {}
variable "aws_private_ip_fe" {}
variable "aws_Name" {}
variable "aws_Application" {}
variable "aws_ami" {}
variable "aws_instance_type" {}
// Provider Definition
provider "aws" {
version = "~> 2.40"
region = var.aws_region
}
// Adds a VPC
resource "aws_vpc" "aws_ec2_deployment_test-vpc" {
cidr_block = var.aws_vpc_cidr_block
tags = {
Name = join("-", [var.aws_Name, "vpc"])
Application = var.aws_Application
}
}
//Adds a subnet
resource "aws_subnet" "aws_ec2_deployment_test-subnet" {
vpc_id = aws_vpc.aws_ec2_deployment_test-vpc.id
cidr_block = var.aws_subnet_cidr_block
availability_zone = join("", [var.aws_region, "a"])
tags = {
Name = join("-", [var.aws_Name, "subnet"])
Application = var.aws_Application
}
}
//Adds a Network Interface
resource "aws_network_interface" "aws_ec2_deployment_test-fe" {
subnet_id = aws_subnet.aws_ec2_deployment_test-subnet.id
private_ips = [ var.aws_private_ip_fe ]
tags = {
Name = join("-", [var.aws_Name, "network-interface-fe"])
Application = var.aws_Application
}
}
//Adds an EC2 Instance
resource "aws_instance" "aws_ec2_deployment_test-fe"{
ami = var.aws_ami
instance_type = var.aws_instance_type
network_interface {
network_interface_id = aws_network_interface.aws_ec2_deployment_test-fe.id
device_index = 0
}
tags = {
Name = join("-", [var.aws_Name, "fe-ec2"])
Application = var.aws_Application
}
}
// Print Output Values
output "aws_ec2_deployment_test-vpc" {
description = "CIDR Block for the VPC: "
value = aws_vpc.aws_ec2_deployment_test-vpc.cidr_block
}
output "aws_ec2_deployment_test-subnet" {
description = "Subnet Block: "
value = aws_subnet.aws_ec2_deployment_test-subnet.cidr_block
}
output "aws_ec2_deployment_test-private-ip" {
description = "System Private IP: "
value = aws_network_interface.aws_ec2_deployment_test-fe.private_ip
}
output "aws_ec2_deployment_test-EC2-Details" {
description = "EC2 Details: "
value = aws_instance.aws_ec2_deployment_test-fe.public_ip
}
Gist link to the solution