Terraform error creating user pool domain - amazon-web-services

Following this Github repo, the user pool domain farm_users is created yet terraform applyreturns this error. Tried destroy. Tried deleting the user pool domain in the aws console and repeating apply.
╷
│ Error: Error creating Cognito User Pool Domain: InvalidParameterException: Domain already associated with another user pool.
│
│ with module.api.aws_cognito_user_pool_domain.farm_users_pool_domain,
│ on modules/api/main.tf line 55, in resource "aws_cognito_user_pool_domain" "farm_users_pool_domain":
│ 55: resource "aws_cognito_user_pool_domain" "farm_users_pool_domain" {
│
After running apply:
$ aws cognito-idp describe-user-pool-domain --domain "fupdomain"
An error occurred (ResourceNotFoundException) when calling the DescribeUserPoolDomain operation: User pool domain fupdomain does not exist in this account.
main.tf
provider "aws" {
version = "~> 2.31"
region = var.region
}
data "aws_caller_identity" "current" {}
resource "random_string" "build_id" {
length = 16
special = false
upper = false
number = false
}
module "network" {
source = "./modules/network"
availability_zone = var.availability_zone
vpc_cidr = var.vpc_cidr
}
module "node_iam_role" {
source = "./modules/node_iam_role"
}
resource "aws_s3_bucket" "render_bucket" {
bucket = "${random_string.build_id.result}-render-data"
acl = "private"
}
# Stores server-side code bundles. i.e. Worker node and lambda layer
resource "aws_s3_bucket" "code_bundles_bucket" {
bucket = "${random_string.build_id.result}-code-bundles"
acl = "private"
}
# Stores and serves javascript client
resource "aws_s3_bucket" "client_bucket" {
bucket = "${random_string.build_id.result}-client-bucket"
acl = "public-read"
website {
index_document = "index.html"
error_document = "error.html"
}
}
# Code bundles
data "archive_file" "worker_node_code" {
type = "zip"
source_dir = "${path.root}/src/farm_worker"
output_path = "${path.root}/src/bundles/farm_worker.zip"
}
resource "aws_s3_bucket_object" "worker_code_bundle" {
bucket = aws_s3_bucket.code_bundles_bucket.id
key = "farm_worker.zip"
source = "${path.root}/src/bundles/farm_worker.zip"
depends_on = [data.archive_file.worker_node_code]
}
# Security groups for the worker nodes
resource "aws_security_group" "ssh" {
name = "allow_ssh"
vpc_id = module.network.vpc_id
ingress {
from_port = 22
to_port = 22
protocol = "tcp"
cidr_blocks = ["0.0.0.0/0"]
}
egress {
from_port = 0
to_port = 0
protocol = "-1"
cidr_blocks = ["0.0.0.0/0"]
}
}
resource "aws_security_group" "nfs" {
name = "NFS"
vpc_id = module.network.vpc_id
ingress {
from_port = 2049
to_port = 2049
protocol = "tcp"
cidr_blocks = ["0.0.0.0/0"]
}
egress {
from_port = 0
to_port = 0
protocol = "-1"
cidr_blocks = ["0.0.0.0/0"]
}
}
# Build queues for project init and frame rendering
resource "aws_sqs_queue" "frame_render_deadletter" {
name = "frame_render_deadletter_queue"
}
resource "aws_sqs_queue" "frame_render_queue" {
name = "frame_render_queue"
visibility_timeout_seconds = 7000
redrive_policy = "{\"deadLetterTargetArn\":\"${aws_sqs_queue.frame_render_deadletter.arn}\",\"maxReceiveCount\":5}"
}
resource "aws_sqs_queue" "project_init_queue" {
name = "project_init_queue"
visibility_timeout_seconds = 7000
}
# EFS for shared storage during baking and rendering
resource "aws_efs_file_system" "shared_render_vol" {
tags = {
Name = "SharedRenderEFS"
}
}
resource "aws_efs_mount_target" "shared_mount" {
file_system_id = aws_efs_file_system.shared_render_vol.id
subnet_id = module.network.subnet_id
security_groups = [aws_security_group.nfs.id]
}
module "worker_node" {
source = "./modules/worker_node"
key_name = var.node_key_name
image_id = var.blender_node_image_id
vpc_security_group_ids = [aws_security_group.ssh.id, aws_security_group.nfs.id]
iam_instance_profile = module.node_iam_role.worker_iam_profile_name
build_id = random_string.build_id.result
region = var.region
render_bucket = aws_s3_bucket.render_bucket.id
code_bucket = aws_s3_bucket.code_bundles_bucket.id
frame_queue_url = aws_sqs_queue.frame_render_queue.id
project_init_queue_url = aws_sqs_queue.project_init_queue.id
shared_file_system_id = aws_efs_file_system.shared_render_vol.id
instance_types = var.instance_types
asg_name = var.worker_asg_name
asg_subnets = [module.network.subnet_id]
asg_max_workers = var.worker_node_max_count
asg_min_workers = 0
cloudwatch_namespace = var.cloudwatch_namespace
}
module "bpi_emitter" {
source = "./modules/bpi_emitter"
cloudwatch_namespace = var.cloudwatch_namespace
asg_name = module.worker_node.asg_name
frame_queue = aws_sqs_queue.frame_render_queue.id
project_init_queue = aws_sqs_queue.project_init_queue.id
frame_queue_bpi = var.frame_queue_bpi
project_init_queue_bpi = var.project_init_queue_bpi
}
# module "bucket_upload_listener" {
# source = "./modules/bucket_upload_listener"
# bucket_name = aws_s3_bucket.render_bucket.id
# bucket_arn = aws_s3_bucket.render_bucket.arn
# project_init_queue = aws_sqs_queue.project_init_queue.id
# }
resource "aws_dynamodb_table" "projects_table" {
name = "FarmProjects"
billing_mode = "PAY_PER_REQUEST"
hash_key = "ProjectId"
attribute {
name = "ProjectId"
type = "S"
}
}
resource "aws_dynamodb_table" "application_settings" {
name = "FarmApplicationSettings"
billing_mode = "PAY_PER_REQUEST"
hash_key = "SettingName"
attribute {
name = "SettingName"
type = "S"
}
}
module "api" {
source = "./modules/api"
region = var.region
bucket = aws_s3_bucket.render_bucket.id
frame_queue = aws_sqs_queue.frame_render_queue.id
project_init_queue = aws_sqs_queue.project_init_queue.id
client_endpoint = "https://${aws_s3_bucket.client_bucket.website_endpoint}"
dynamo_tables = {
projects = aws_dynamodb_table.projects_table.name,
application_settings = aws_dynamodb_table.application_settings.name
}
}

The domain name should be globally unique. This means that, if in another account the same domain is used, then you can't use it. Try for example:
aws cognito-idp create-user-pool-domain --domain fupdomain --user-pool-id <pool-id>
The output will be:
An error occurred (InvalidParameterException) when calling the
CreateUserPoolDomain operation: Domain already associated with another
user pool.
This makes sense, as the domain name is used to build a url of the form:
https://{domain}.auth.us-east-1.amazoncognito.com
This is where users should be authenticated against.
You need to edit the template and pick another name.

Related

Choosing CIDR blocks

I am struggling to understand CIDR blocks in the way I am using them. My understanding is (probably wrong) that they are a way of reserving a range of IP addresses for your environment, and you can apportion them across applications. But I can't get it working in my case. I am using terraform to manage a simple environment. A VPC containing a Lambda and an RDS instance. The RDS will not be publicly accessible, the lambda will be invoked by an HTTP trigger. Each of the Lambda and RDS instance need their own subnets, the RDS needs two. I have this configuration in terraform which keeps failing with this and similar errors:
The new Subnets are not in the same Vpc as the existing subnet group
The terraform set up is:
resource "aws_vpc" "main" {
cidr_block = "10.0.0.0/16"
tags = {
Name = "vpc"
}
}
resource "aws_subnet" "rds_subnet_1a" {
vpc_id = aws_vpc.main.id
cidr_block = "10.0.1.0/24"
availability_zone = "eu-west-1a"
tags = {
Name = "rds_subnet_1a"
}
}
resource "aws_subnet" "rds_subnet_1b" {
vpc_id = aws_vpc.main.id
cidr_block = "10.0.2.0/24"
availability_zone = "eu-west-1b"
tags = {
Name = "rds_subnet_1b"
}
}
resource "aws_subnet" "lambda_subnet_1a" {
vpc_id = aws_vpc.main.id
cidr_block = "10.0.3.0/24"
availability_zone = "eu-west-1a"
tags = {
Name = "lambda_subnet_1a"
}
}
resource "aws_db_subnet_group" "default" {
name = "main"
subnet_ids = [aws_subnet.rds_subnet_1a.id, aws_subnet.rds_subnet_1b.id]
tags = {
Name = "My DB subnet group"
}
}
resource "aws_security_group" "rds" {
name = "rds-sg"
vpc_id = aws_vpc.main.id
ingress {
from_port = 5432
to_port = 5432
protocol = "tcp"
cidr_blocks = ["10.0.0.0/16"]
}
egress {
from_port = 5432
to_port = 5432
protocol = "tcp"
cidr_blocks = ["10.0.0.0/16"]
}
tags = {
Name = "rds-sg"
}
}
resource "aws_security_group" "lambda" {
name = "lambda_sg"
vpc_id = aws_vpc.main.id
ingress {
protocol = -1
self = true
from_port = 0
to_port = 0
}
egress {
from_port = 0
to_port = 0
protocol = "-1"
cidr_blocks = ["10.0.0.0/16"]
}
tags = {
Name = "lambda_sg"
}
}
I know this is basic, but I just think if I get some answers to my situation it may help me understand the concepts better.
EDIT - lambda config:
resource "aws_lambda_function" "api_uprn" {
function_name = "api-uprn"
s3_bucket = aws_s3_bucket.lambdas_bucket.id
s3_key = "api-uprn/function_0.0.8.zip"
runtime = "python3.9"
handler = "app.main.handler"
role = aws_iam_role.lambda_exec.arn
vpc_config {
subnet_ids = [aws_subnet.subnet_1a.id]
security_group_ids = [aws_security_group.lambda.id]
}
}
resource "aws_cloudwatch_log_group" "api_uprn" {
name = "/aws/lambda/${aws_lambda_function.api_uprn.function_name}"
retention_in_days = 30
}
resource "aws_iam_role" "lambda_exec" {
name = "api_uprn"
assume_role_policy = jsonencode({
Version = "2012-10-17"
Statement = [{
Action = "sts:AssumeRole"
Effect = "Allow"
Sid = ""
Principal = {
Service = "lambda.amazonaws.com"
}
}
]
})
}
resource "aws_iam_role_policy_attachment" "lambda_policy" {
role = aws_iam_role.lambda_exec.name
policy_arn = "arn:aws:iam::aws:policy/service-role/AWSLambdaBasicExecutionRole"
}
resource "aws_iam_role_policy_attachment" "rds_read" {
role = aws_iam_role.lambda_exec.name
policy_arn = "arn:aws:iam::aws:policy/AmazonRDSReadOnlyAccess"
}
resource "aws_iam_role_policy_attachment" "lambda_vpc_access" {
role = aws_iam_role.lambda_exec.name
policy_arn = "arn:aws:iam::aws:policy/service-role/AWSLambdaVPCAccessExecutionRole"
}
Can you please post here, the full error? It will be easier to understand what state is throwing the error!
My tip is that you need to change your subnet_ids at your lambda configuration. From what I understand, your lambda configuration should be like this:
resource "aws_lambda_function" "api_uprn" {
function_name = "api-uprn"
s3_bucket = aws_s3_bucket.lambdas_bucket.id
s3_key = "api-uprn/function_0.0.8.zip"
runtime = "python3.9"
handler = "app.main.handler"
role = aws_iam_role.lambda_exec.arn
vpc_config {
subnet_ids = [aws_subnet. lambda_subnet_1a.id]
security_group_ids = [aws_security_group.lambda.id]
}
}

A managed resource has not been declared in module

I've a two modules under modules directory named as prienv and vpc
nnice#MPL-G8WW7D3:~/terra-test/modules/prienv$ pwd
/home/nnice/terra-test/modules/prienv
nnice#MPL-G8WW7D3:~/terra-test/modules/prienv$ ls
data.tf main.tf variable.tf
Here is my data.tf file under prienv module
nnice#MPL-G8WW7D3:~/terra-test/modules/prienv$ cat data.tf
data "aws_security_groups" "mysg-data" {
filter {
name = "tag:Name"
values = ["MySG"]
}
depends_on = [aws_security_groups.mysg]
}
data "aws_subnet" "myprisn-data" {
filter {
name = "tag:Name"
values = ["MyPriSN"]
}
depends_on = [aws_subnet.myprisn]
}
Here is my variable.tf file under prienv module
variable "pri_av" {
description = "for private availability zone"
type = string
default = "us-east-1b"
}
variable "ami" {
description = "ami for ec2"
type = string
default = "ami-03ededff12e34e59e"
}
variable "instance_type" {
description = "Instance type t2.micro"
type = string
default = "t2.micro" #will use double quotes for string type
}
variable "key_pair" {
description = "key pair for ec2"
type = string
default = "learner_key"
}
variable "instance_count" {
description = "EC2 instance count"
type = number
default = 1 #here we're using it to create two private instances
}
variable "project_environment" {
description = "project name and environment for private instances"
type = map(string)
default = {
project = "private", # it works upon key value pair where project is for key and environment is value
environment = "testing" # you will find them under tag on aws console
}
}
And this is my main.tf file under prienv module
resource "aws_instance" "mypriec2" {
ami = var.ami
instance_type = var.instance_type
count = var.instance_count
availability_zone = var.pri_av
#subnet_id = aws_subnet.myprisn.id
subnet_id = data.aws_subnet.myprisn-data.id
#vpc_security_group_ids = [aws_security_group.mysg.id]
vpc_security_group_ids = data.aws_security_groups.mysg-data.ids
key_name = var.key_pair
# disable_api_termination = true
tags = var.project_environment
}
resource "aws_key_pair" "mykeypair" {
key_name = var.key_pair
public_key = "ssh-rsa AAAAB3NaffrWscf59juCakElys9F3+zVuz0ta4gRUtKgWVPIj6ACr00VNDzsTTW2/sSjYtE5zWolVKCITlhqiIhgRKUDLKoxclxUKnK6IGIafdaefafaheiufa;fdaeoasfdkQvNtGrrHzY5/dbZhIUTxDUyvT5O5U= nnice#MPL-G8WW7D3"
}
and here is my vpc moduls
nnice#MPL-G8WW7D3:~/terra-test/modules/vpc$ pwd
/home/nnice/terra-test/modules/vpc
nnice#MPL-G8WW7D3:~/terra-test/modules/vpc$ ls
data.tf main.tf variable.tf
This is my data.tf file under vpc module
nnice#MPL-G8WW7D3:~/terra-test/modules/vpc$ cat data.tf
data "aws_vpc" "myvpc-data" {
filter {
name = "tag:Name"
values = ["MyVPC"]
}
depends_on = [aws_vpc.myvpc]
}
data "aws_subnet" "mypubsn-data" {
filter {
name = "tag:Name"
values = ["MyPubSN"]
}
depends_on = [aws_subnet.mypubsn]
}
data "aws_subnet" "myprisn-data" {
filter {
name = "tag:Name"
values = ["MyPriSN"]
}
depends_on = [aws_subnet.myprisn]
}
This is my main.tf file under vpc module
nnice#MPL-G8WW7D3:~/terra-test/modules/vpc$ cat main.tf
##################################################################
############################## VPC ###############################
##################################################################
resource "aws_vpc" "myvpc" {
cidr_block = var.vpc_cidr
instance_tenancy = var.vpc_tenancy
tags = {
Name = var.vpc_tag
}
}
##################################################################
############################# Subnet #############################
##################################################################
#PUBLIC SUBNET
resource "aws_subnet" "mypubsn" {
#vpc_id = aws_vpc.myvpc.id
vpc_id = data.aws_vpc.myvpc-data.id
cidr_block = var.mypubsn_cidr
availability_zone = var.pub_av
map_public_ip_on_launch = var.map_public_ip_on_launch
tags = {
Name = var.mypubsn_tag
}
}
#PRIVATE SUBNET
resource "aws_subnet" "myprisn" {
#vpc_id = aws_vpc.myvpc.id
vpc_id = data.aws_vpc.myvpc-data.id
cidr_block = var.myprisn_cidr
availability_zone = var.pri_av
tags = {
Name = var.myprisn_tag
}
}
##################################################################
############################### IGW ##############################
##################################################################
resource "aws_internet_gateway" "myigw" {
#vpc_id = aws_vpc.myvpc.id
vpc_id = data.aws_vpc.myvpc-data.id
tags = {
Name = var.igw_tag
}
}
##################################################################
############################ Route Table #########################
##################################################################
#PUBLIC RT
resource "aws_route_table" "mypubrt" {
#vpc_id = aws_vpc.myvpc.id
vpc_id = data.aws_vpc.myvpc-data.id
tags = {
Name = var.mypubsn_tag
}
}
#PRIVATE RT
resource "aws_route_table" "myprirt" {
#vpc_id = aws_vpc.myvpc.id
vpc_id = data.aws_vpc.myvpc-data.id
tags = {
Name = var.myprisn_tag
}
}
####################################################################
######################## Route Table Associate #####################
####################################################################
#PUBLIC RT association
resource "aws_route_table_association" "pub" {
#subnet_id = aws_subnet.mypubsn.id
subnet_id = data.aws_subnet.mypubsn-data.id
route_table_id = aws_route_table.mypubrt.id
}
#PRIVATE RT association
resource "aws_route_table_association" "pri" {
#subnet_id = aws_subnet.myprisn.id
subnet_id = data.aws_subnet.myprisn-data.id
route_table_id = aws_route_table.myprirt.id
}
###################################################################
########################### Route #################################
###################################################################
#PUBLIC Route
resource "aws_route" "mypubroute" {
route_table_id = aws_route_table.mypubrt.id
destination_cidr_block = var.pubroute
gateway_id = aws_internet_gateway.myigw.id
depends_on = [aws_route_table.mypubrt]
}
#PRIVATE Route
#resource "aws_route" "mypriroute" {
# route_table_id = aws_route_table.myprirt.id
# destination_cidr_block = "0.0.0.0/0"
# gateway_id = aws_internet_gateway.myigw.id
# depends_on = [aws_route_table.myprirt]
#}
###################################################################
############################ SG ###################################
###################################################################
resource "aws_security_group" "mysg" {
name = "MySecurityGroup"
description = "Allow TLS inbound traffic"
#vpc_id = aws_vpc.myvpc.id
vpc_id = data.aws_vpc.myvpc-data.id
ingress {
description = "TLS from VPC"
from_port = 22
to_port = 22
protocol = "tcp"
# cidr_blocks = [aws_vpc.myvpc.cidr_block]
cidr_blocks = ["0.0.0.0/0"]
# ipv6_cidr_blocks = [aws_vpc.main.ipv6_cidr_block]
}
ingress {
description = "TLS from VPC"
from_port = 80
to_port = 80
protocol = "tcp"
# cidr_blocks = [aws_vpc.myvpc.cidr_block]
cidr_blocks = ["0.0.0.0/0"]
# ipv6_cidr_blocks = [aws_vpc.main.ipv6_cidr_block]
}
egress {
from_port = 0
to_port = 0
protocol = "-1"
cidr_blocks = ["0.0.0.0/0"]
ipv6_cidr_blocks = ["::/0"]
}
tags = {
Name = var.SG_tag
}
}
And this is my variable.tf file under vpc module
nnice#MPL-G8WW7D3:~/terra-test/modules/vpc$ cat variable.tf
variable "vpc_tenancy" {
description = "vpc instance tenancy"
type = string
default = "default"
}
variable "pub_av" {
description = "for public availability zone"
type = string
default = "us-east-1a"
}
variable "pri_av" {
description = "for private availability zone"
type = string
default = "us-east-1b"
}
variable "vpc_tag" {
description = "Tag for VPC"
type = string
default = "MyVPC"
}
variable "vpc_cidr" {
description = "for vpc cidr"
type = string
default = "10.0.0.0/16"
}
variable "mypubsn_cidr" {
description = "for public subnet cidr"
type = string
default = "10.0.1.0/24"
}
variable "myprisn_cidr" {
description = "for private subnet cidr"
type = string
default = "10.0.2.0/24"
}
variable "mypubsn_tag" {
description = "tag for public subnet"
type = string
default = "MyPubSN"
}
variable "myprisn_tag" {
description = "tag for private subnet"
type = string
default = "MyPriSN"
}
variable "igw_tag" {
description = "tag for IGW subnet"
type = string
default = "MyIGW"
}
variable "pubrt_tag" {
description = "tag for private subnet"
type = string
default = "MyPubRT"
}
variable "prirt_tag" {
description = "tag for IGW subnet"
type = string
default = "MyPriRT"
}
variable "pubroute" {
description = "cidr for public route"
type = string
default = "0.0.0.0/0"
}
variable "SG_tag" {
description = "tag for SG"
type = string
default = "MySG"
}
variable "map_public_ip_on_launch" {
description = "auto enable public ip to public subnet"
type = bool
default = true
}
And there is env directory where I have my main.tf file
nnice#MPL-G8WW7D3:~/terra-test$ ls
env modules
nnice#MPL-G8WW7D3:~/terra-test$ cd env/private-ec2/
nnice#MPL-G8WW7D3:~/terra-test/env/private-ec2$ pwd
/home/nnice/terra-test/env/private-ec2
nnice#MPL-G8WW7D3:~/terra-test/env/private-ec2$ ls
main.tf
nnice#MPL-G8WW7D3:~/terra-test/env/private-ec2$ cat main.tf
#Provider
provider "aws" {
region = "us-east-1"
}
#VPC
module "vpc" {
source = "../../modules/vpc"
}
#EC2
module "prienv" {
source = "../../modules/prienv"
}
When I'm trying to run terraform plan, I'm getting following errors
Error: Reference to undeclared resource
│
│ on ../../modules/prienv/data.tf line 6, in data "aws_security_groups" "mysg-data":
│ 6: depends_on = [aws_security_groups.mysg]
│
│ A managed resource "aws_security_groups" "mysg" has not been declared in module.prienv.
╵
╷
│ Error: Reference to undeclared resource
│
│ on ../../modules/prienv/data.tf line 14, in data "aws_subnet" "myprisn-data":
│ 14: depends_on = [aws_subnet.myprisn]
│
│ A managed resource "aws_subnet" "myprisn" has not been declared in module.prienv.
Could anyone please let me know its solution? I already using vpc module in my main.tf file
The issue you are running into is that aws_security_groups.mysgand aws_subnet.myprisn are locally defined in the vpc module but you are referencing them in the depends_on statement in the prienv module.
Also the Terraform documentation suggests that the depends_on argument
should be used only as a last resort.
So the way to go would be, to define the security group and the subnet as an output in the vpc module, as variable in the prienv module, and then passing the outputs of vpc as parameter values to prienv.
This way terraform will recognize the dependencies between the resources from the dependency graph it creates and you won't neet the depends_on argument.
So in consequence, the diffs to your config could look like:
modules/vpc/outputs.tf
output "sec_group" {
value = aws_security_group.mysg
description = "..."
}
output "subnet" {
value = aws_subnet.myprisn
description = "..."
}
modules/prienv/variable.tf
variable "sec_group" {
description = "..."
}
variable "subnet" {
description = "..."
}
modules/prienv/main.tf
resource "aws_instance" "mypriec2" {
...
subnet_id = var.subnet.id
vpc_security_group_ids = var.sec_group.ids
...
}
env/main.tf
module "vpc" {
source = "../../modules/vpc"
}
#EC2
module "prienv" {
source = "../../modules/prienv"
sec_group = module.vpc.sec_group
subnet = module.vpc.subnet
}

My Lambda can't connect to my RDS instance

I'm trying to create both services within the same VPC and give them appropriate security groups but they I can't make it work.
variable "vpc_cidr_block" {
default = "10.1.0.0/16"
}
variable "cidr_block_subnet_public" {
default = "10.1.1.0/24"
}
variable "cidr_block_subnets_private" {
default = ["10.1.2.0/24", "10.1.3.0/24", "10.1.4.0/24"]
}
data "aws_availability_zones" "available" {
state = "available"
}
resource "aws_vpc" "vpc" {
cidr_block = var.vpc_cidr_block
}
resource "aws_subnet" "private" {
count = length(var.cidr_block_subnets_private)
cidr_block = var.cidr_block_subnets_private[count.index]
vpc_id = aws_vpc.vpc.id
availability_zone = data.aws_availability_zones.available.names[count.index]
}
resource "aws_security_group" "lambda" {
vpc_id = aws_vpc.vpc.id
egress {
from_port = 0
to_port = 0
protocol = "-1"
cidr_blocks = ["0.0.0.0/0"]
}
}
resource "aws_security_group" "rds" {
vpc_id = aws_vpc.vpc.id
ingress {
description = "PostgreSQL"
from_port = 5432
protocol = "tcp"
to_port = 5432
// security_groups = [aws_security_group.lambda.id]
}
egress {
from_port = 0
to_port = 0
protocol = "-1"
cidr_blocks = ["0.0.0.0/0"]
}
}
resource "aws_lambda_function" "event" {
function_name = "ServerlessExampleEvent"
timeout = 30
s3_bucket = "mn-lambda"
s3_key = "mn/v1.0.0/lambda-1.0.0-all.jar"
handler = "dk.fitfit.handler.EventRequestHandler"
runtime = "java11"
memory_size = 256
role = aws_iam_role.event.arn
vpc_config {
security_group_ids = [aws_security_group.lambda.id]
subnet_ids = [for s in aws_subnet.private: s.id]
}
environment {
variables = {
JDBC_DATABASE_URL = "jdbc:postgresql://${aws_db_instance.rds.address}:${aws_db_instance.rds.port}/${aws_db_instance.rds.identifier}"
DATABASE_USERNAME = aws_db_instance.rds.username
DATABASE_PASSWORD = aws_db_instance.rds.password
}
}
}
resource "aws_db_subnet_group" "db" {
subnet_ids = aws_subnet.private.*.id
}
resource "aws_db_instance" "rds" {
allocated_storage = 10
engine = "postgres"
engine_version = "11.5"
instance_class = "db.t2.micro"
username = "postgres"
password = random_password.password.result
skip_final_snapshot = true
apply_immediately = true
vpc_security_group_ids = [aws_security_group.rds.id]
db_subnet_group_name = aws_db_subnet_group.db.name
}
resource "random_password" "password" {
length = 32
special = false
}
I tried to not clutter the question by only posting the relevant part of my HCL. Please let me know if I missed anything important.
The biggest issue is the commented out security_groups parameter on the ingress block of the rds security group. Uncommenting that should then allow Postgresql traffic from the lambda security group:
resource "aws_security_group" "rds" {
vpc_id = aws_vpc.vpc.id
ingress {
description = "PostgreSQL"
from_port = 5432
protocol = "tcp"
to_port = 5432
security_groups = [aws_security_group.lambda.id]
}
egress {
from_port = 0
to_port = 0
protocol = "-1"
cidr_blocks = ["0.0.0.0/0"]
}
}
As well as that your JDBC string is basically resolving to something like jdbc:postgresql://terraform-20091110230000000000000001.xxxx.us-east-1.rds.amazonaws.com:5432/terraform-20091110230000000000000001 because you aren't specifying an identifier for the RDS instance and so it defaults to generating an identifier prefixed with terraform- plus the timestamp and a counter. The important part to note here is that your RDS instance doesn't yet include a database of the name terraform-20091110230000000000000001 for your application to connect to because you haven't specified it.
You can have RDS create a database on the RDS instance by using the name parameter. You can then update your JDBC connection string to specify the database name as well:
resource "aws_db_instance" "rds" {
allocated_storage = 10
engine = "postgres"
engine_version = "11.5"
instance_class = "db.t2.micro"
username = "postgres"
password = random_password.password.result
skip_final_snapshot = true
apply_immediately = true
name = "foo"
vpc_security_group_ids = [aws_security_group.rds.id]
db_subnet_group_name = aws_db_subnet_group.db.name
}
resource "aws_lambda_function" "event" {
function_name = "ServerlessExampleEvent"
timeout = 30
s3_bucket = "mn-lambda"
s3_key = "mn/v1.0.0/lambda-1.0.0-all.jar"
handler = "dk.fitfit.handler.EventRequestHandler"
runtime = "java11"
memory_size = 256
role = aws_iam_role.event.arn
vpc_config {
security_group_ids = [aws_security_group.lambda.id]
subnet_ids = [for s in aws_subnet.private : s.id]
}
environment {
variables = {
JDBC_DATABASE_URL = "jdbc:postgresql://${aws_db_instance.rds.address}:${aws_db_instance.rds.port}/${aws_db_instance.rds.name}"
DATABASE_USERNAME = aws_db_instance.rds.username
DATABASE_PASSWORD = aws_db_instance.rds.password
}
}
}

Unable to register ec2 instance into ECS using terraform

I am unable to register the ec2 instance into the ecs cluster, I have created the cluster, service and registered the task into it. But the ec2 instance is not registered. I have given the userdata to register the instance into the cluster but unable to register it. I am attaching the files which are needed. Ec2 instance are provisioning just not registering to the ECS cluster. I am implementing module wise structure. I am attaching the screenshot at the end of the question
Autoscaling:
resource "aws_launch_configuration" "ec2" {
image_id = var.image_id
instance_type = var.instance_type
name = "ec2-${terraform.workspace}"
user_data = <<EOF
#!/bin/bash
echo 'ECS_CLUSTER=${var.cluster_name.name}' >> /etc/ecs/ecs.config
echo 'ECS_DISABLE_PRIVILEGED=true' >> /etc/ecs/ecs.config
EOF
key_name = var.key_name
iam_instance_profile = var.instance_profile
security_groups = [aws_security_group.webserver.id]
}
resource "aws_autoscaling_group" "asg" {
vpc_zone_identifier = var.public_subnet
desired_capacity = 2
max_size = 2
min_size = 2
health_check_grace_period = 300
launch_configuration = aws_launch_configuration.ec2.name
target_group_arns = [var.tg.arn]
}
resource "aws_security_group" "webserver" {
name = "webserver-${terraform.workspace}"
description = "Allow internet traffic"
vpc_id = var.vpc_id
ingress {
description = "incoming for ec2-instance"
from_port = 0
to_port = 0
protocol = -1
security_groups = [var.alb_sg]
}
egress {
from_port = 0
to_port = 0
protocol = "-1"
cidr_blocks = ["0.0.0.0/0"]
}
tags = {
Name = "webserver-sg"
}
}
output "ec2_sg" {
value = aws_security_group.webserver.id
}
Cluster:
resource "aws_ecs_cluster" "cluster" {
name = "wordpress-${terraform.workspace}"
}
output "cluster" {
value = aws_ecs_cluster.cluster.id
}
output "cluster1" {
value = aws_ecs_cluster.cluster
}
Service:
resource "aws_ecs_service" "wordpress" {
name = "Wordpress-${terraform.workspace}"
cluster = var.cluster
task_definition = var.task.id
desired_count = 2
scheduling_strategy = "REPLICA"
load_balancer {
target_group_arn = var.tg.arn
container_name = "wordpress"
container_port = 80
}
deployment_controller {
type = "ECS"
}
}
Task:
data "template_file" "init" {
template = "${file("${path.module}/template/containerdef.json")}"
vars = {
rds_endpoint = "${var.rds_endpoint}"
name = "${var.name}"
username = "${var.username}"
password = "${var.password}"
}
}
resource "aws_ecs_task_definition" "task" {
family = "wordpress"
container_definitions = "${data.template_file.init.rendered}"
network_mode = "bridge"
requires_compatibilities = ["EC2"]
memory = "1GB"
cpu = "1 vCPU"
task_role_arn = var.task_execution.arn
}
main.tf
data "aws_availability_zones" "azs" {}
data "aws_ssm_parameter" "name" {
name = "Dbname"
}
data "aws_ssm_parameter" "password" {
name = "db_password"
}
module "my_vpc" {
source = "./modules/vpc"
vpc_cidr = var.vpc_cidr
public_subnet = var.public_subnet
private_subnet = var.private_subnet
availability_zone = data.aws_availability_zones.azs.names
}
module "db" {
source = "./modules/rds"
ec2_sg = "${module.autoscaling.ec2_sg}"
allocated_storage = var.db_allocated_storage
storage_type = var.db_storage_type
engine = var.db_engine
engine_version = var.db_engine_version
instance_class = var.db_instance_class
name = data.aws_ssm_parameter.name.value
username = data.aws_ssm_parameter.name.value
password = data.aws_ssm_parameter.password.value
vpc_id = "${module.my_vpc.vpc_id}"
public_subnet = "${module.my_vpc.public_subnets_ids}"
}
module "alb" {
source = "./modules/alb"
vpc_id = "${module.my_vpc.vpc_id}"
public_subnet = "${module.my_vpc.public_subnets_ids}"
}
module "task" {
source = "./modules/task"
name = data.aws_ssm_parameter.name.value
username = data.aws_ssm_parameter.name.value
password = data.aws_ssm_parameter.password.value
rds_endpoint = "${module.db.rds_endpoint}"
task_execution = "${module.role.task_execution}"
}
module "autoscaling" {
source = "./modules/autoscaling"
vpc_id = "${module.my_vpc.vpc_id}"
#public_subnet = "${module.my_vpc.public_subnets_ids}"
tg = "${module.alb.tg}"
image_id = var.image_id
instance_type = var.instance_type
alb_sg = "${module.alb.alb_sg}"
public_subnet = "${module.my_vpc.public_subnets_ids}"
instance_profile = "${module.role.instance_profile}"
key_name = var.key_name
cluster_name = "${module.cluster.cluster1}"
}
module "role" {
source = "./modules/Iam_role"
}
module "cluster" {
source = "./modules/Ecs-cluster"
}
module "service" {
source = "./modules/services"
cluster = "${module.cluster.cluster}"
tg = "${module.alb.tg}"
task = "${module.task.task}"
}
ec2-instance role:
resource "aws_iam_role" "container_instance" {
name = "container_instance-${terraform.workspace}"
assume_role_policy = <<EOF
{
"Version": "2012-10-17",
"Statement": [
{
"Action": "sts:AssumeRole",
"Principal": {
"Service": "ec2.amazonaws.com"
},
"Effect": "Allow"
}
]
}
EOF
tags = {
tag-key = "tag-value"
}
}
resource "aws_iam_instance_profile" "ec2_instance_role" {
name = "iam_instance_profile-${terraform.workspace}"
role = aws_iam_role.container_instance.name
}
resource "aws_iam_role_policy_attachment" "ec2_instance" {
role = aws_iam_role.container_instance.name
policy_arn = "arn:aws:iam::aws:policy/service-role/AmazonEC2ContainerServiceforEC2Role"
}
Screenshot:
[![enter image description here][2]][2]
Based on the chat discussion.
The issue could be caused by using incorrect instance profile:
iam_instance_profile = var.instance_profile.name
The important thing is that, now the two instances are correctly registered with the cluster.

AWS SG self reference resolving different environments

I want to make it more modular for different env (Dev,UAT, PROD) in that case i believe i should use 'name' of the SG("App${local.environment}sec_group") or only Sec_group?. here will it be able to resolve source_security_group_id ? main.tf file:-
resource "aws_security_group" "sec_group" {
name = "App${local.environment}sec_group"
vpc_id = "${local.vpc_id}"
} resource "aws_security_group_rule" "sec_group_allow_1865" {
type = "ingress"
from_port = 1865
to_port = 1865
protocol = "tcp"
security_group_id = "${aws_security_group.sec_group.id}"
source_security_group_id = "${aws_security_group.App${local.environment}sec_group.id}" '''
}
Variable.tf file:-
environment = "${lookup(var.ws_to_environment_map, terraform.workspace, var.default_environment)}"
vpc_id = "${lookup(var.ws_to_vpc_map, terraform.workspace, var.default_environment)}"
variable "default_environment" {
default = "dev"
}
variable "ws_to_vpc_map" {
type = "map"
default = {
dev = "vpc-03a05d67831e1ff035"
uat = ""
prod = ""
}
}
variable "ws_to_environment_map" {
type = "map"
default = {
dev = "DEV"
uat = "UAT"
prod = "PROD"
}
}
Here you could use
source_security_group_id = "${aws_security_group.sec_group.id}"
instead of
source_security_group_id = "${aws_security_group.App${local.environment}sec_group.id}"
aws_security_group.sec_group refers to the security group resource created with the name "sec_group"(resource "aws_security_group" "sec_group") and aws_security_group.sec_group.id would get its id.