I have the following variables
variable "policies" {
type = list(string)
description = "List of policy document to attach to the IAM Role."
default = []
}
variable "policy_name" {
type = string
description = "Name of the policy attached to the IAM Role."
default = null
}
variable "policy_description" {
type = string
description = "Description of the policy attached to the IAM Role."
default = ""
}
Which are used by the following Terraform resources:
resource "aws_iam_role" "this" {
name = var.role_name
assume_role_policy = var.assume_role_policy
}
data "aws_iam_policy_document" "this" {
count = var.policies != [] ? 1 : 0
source_policy_documents = var.policies
}
resource "aws_iam_policy" "this" {
count = var.policies != [] ? 1 : 0
name = var.policy_name
description = var.policy_description
policy = data.aws_iam_policy_document.this[count.index].json
}
resource "aws_iam_role_policy_attachment" "this" {
count = var.policies != [] ? 1 : 0
policy_arn = aws_iam_policy.this[count.index].arn
role = aws_iam_role.this.name
}
Now, my understanding is that aws_iam_policy_document, aws_iam_policy and aws_iam_role_policy_attachment are to be created only when var.policies is not empty.
However, these resources are still plan for creation when calling them like
module "iam_role_batch" {
source = "./resources/iam/role"
role_name = local.iam_role_batch_service_name
assume_role_policy = data.aws_iam_policy_document.batch_service.json
}
# module.iam_role_batch.aws_iam_policy.this[0] will be created
+ resource "aws_iam_policy" "this" {
+ arn = (known after apply)
+ id = (known after apply)
+ name = (known after apply)
+ path = "/"
+ policy = jsonencode(
{
+ Statement = null
+ Version = "2012-10-17"
}
)
+ policy_id = (known after apply)
+ tags_all = (known after apply)
}
# module.iam_role_batch.aws_iam_role_policy_attachment.this[0] will be created
+ resource "aws_iam_role_policy_attachment" "this" {
+ id = (known after apply)
+ policy_arn = (known after apply)
+ role = "xxxxxxx"
}
Plan: 2 to add, 0 to change, 0 to destroy.
Why? AFAIK, policies is by default set to [], so the resources should not be planned for creation.
What do I miss?
is by default set to []
Actually it is set to data type of list(string). So your condition var.policies != [] is always true, and that is why the resource is always created. [] is not the same as list(string).
Usually you would do the following instead:
count = length(var.policies) > 0 ? 1 : 0
Related
I am new to terraform. While creating terraform AWS DynamoDB module, It will try to replace the existing table instead of creating a new table every-time. But if we use new terraform state file it will create another dynamodb table without replacing.
Terraform Version: 0.15
locals {
table_name_from_env = var.dynamodb_table
table_name = join("-", [local.table_name_from_env, lower(var.Component)])
kinesis_name = join("-", [local.table_name, "kinesis"])
}
resource "aws_dynamodb_table" "non_autoscaled" {
count = !var.autoscaling_enabled ? 1 : 0
name = "${local.table_name}"
read_capacity = "${var.read_capacity}"
write_capacity = "${var.write_capacity}"
billing_mode = "${var.billing_mode}"
hash_key = "${var.hash_key}"
range_key = var.range_key
dynamic "attribute" {
for_each = var.attributes
content {
name = attribute.value.name
type = attribute.value.type
}
}
ttl {
enabled = var.ttl_enabled
attribute_name = var.ttl_attribute_name
}
# tags = tomap({"organization" = "${var.organization}", "businessunit" = "${var.businessunit}"})
tags = tomap({"Component" = "${var.Component}"})
# tags = local.common_tags
}
resource "aws_dynamodb_table" "autoscaled" {
count = var.autoscaling_enabled ? 1 : 0
name = "${local.table_name}"
read_capacity = "${var.read_capacity}"
write_capacity = "${var.write_capacity}"
billing_mode = "${var.billing_mode}"
hash_key = "${var.hash_key}"
range_key = var.range_key
dynamic "attribute" {
for_each = var.attributes
content {
name = attribute.value.name
type = attribute.value.type
}
}
ttl {
enabled = var.ttl_enabled
attribute_name = var.ttl_attribute_name
}
}
resource "aws_kinesis_stream" "dynamodb_table_kinesis" {
count = var.kinesis_enabled ? 1 : 0
name = "${local.kinesis_name}"
shard_count = "${var.shard_count}"
stream_mode_details {
stream_mode = "${var.kinesis_stream_mode}"
}
}
resource "aws_dynamodb_kinesis_streaming_destination" "dynamodb_table_kinesis_dest_non_autoscaled"{
count = var.kinesis_enabled && !var.autoscaling_enabled ? 1 : 0
stream_arn = aws_kinesis_stream.dynamodb_table_kinesis[0].arn
table_name = aws_dynamodb_table.non_autoscaled[0].name
}
resource "aws_dynamodb_kinesis_streaming_destination" "dynamodb_table_kinesis_dest_autoscaled"{
count = var.kinesis_enabled && var.autoscaling_enabled ? 1 : 0
stream_arn = aws_kinesis_stream.dynamodb_table_kinesis[0].arn
table_name = aws_dynamodb_table.autoscaled[0].name
}
Can anybody suggest what is missing in my approach ?
Terraform Plan output:
+ terraform plan
module.aws_managed_dynamodb_table.aws_kinesis_stream.dynamodb_table_kinesis[0]: Refreshing state... [id=arn:aws:kinesis:stream/dynamodb-testing12345-coms-kinesis]
module.aws_managed_dynamodb_table.aws_dynamodb_table.non_autoscaled[0]: Refreshing state... [id=dynamodb-testing12345-coms]
module.aws_managed_dynamodb_table.aws_dynamodb_kinesis_streaming_destination.dynamodb_table_kinesis_dest_non_autoscaled[0]: Refreshing state... [id=dynamodb-testing12345-coms,arn:aws:kinesis:ap-south-1:stream/dynamodb-testing12345-coms-kinesis]
Terraform used the selected providers to generate the following execution
plan. Resource actions are indicated with the following symbols:
-/+ destroy and then create replacement
Terraform will perform the following actions:
# module.aws_managed_dynamodb_table.aws_dynamodb_kinesis_streaming_destination.dynamodb_table_kinesis_dest_non_autoscaled[0] must be replaced
-/+ resource "aws_dynamodb_kinesis_streaming_destination" "dynamodb_table_kinesis_dest_non_autoscaled" {
~ id = "dynamodb-testing12345-coms,arn:aws:kinesis:ap-south-1:stream/dynamodb-testing12345-coms-kinesis" -> (known after apply)
~ stream_arn = "arn:aws:kinesis:ap-south-1:stream/dynamodb-testing12345-coms-kinesis" -> (known after apply) # forces replacement
~ table_name = "dynamodb-testing12345-coms" -> "dynamodb-testing123456-coms" # forces replacement
}
# module.aws_managed_dynamodb_table.aws_dynamodb_table.non_autoscaled[0] must be replaced
-/+ resource "aws_dynamodb_table" "non_autoscaled" {
~ arn = "arn:aws:dynamodb:ap-south-1:table/dynamodb-testing12345-coms" -> (known after apply)
~ id = "dynamodb-testing12345-coms" -> (known after apply)
~ name = "dynamodb-testing12345-coms" -> "dynamodb-testing123456-coms" # forces replacement
+ stream_arn = (known after apply)
- stream_enabled = false -> null
+ stream_label = (known after apply)
+ stream_view_type = (known after apply)
tags = {
"Component" = "XXX"
}
# (6 unchanged attributes hidden)
~ point_in_time_recovery {
~ enabled = false -> (known after apply)
}
+ server_side_encryption {
+ enabled = (known after apply)
+ kms_key_arn = (known after apply)
}
# (3 unchanged blocks hidden)
}
# module.aws_managed_dynamodb_table.aws_kinesis_stream.dynamodb_table_kinesis[0] must be replaced
-/+ resource "aws_kinesis_stream" "dynamodb_table_kinesis" {
~ arn = (known after apply)
~ id = (known after apply)
~ name = "dynamodb-testing12345-coms-kinesis" -> "dynamodb-testing123456-coms-kinesis" # forces replacement
- shard_level_metrics = [] -> null
- tags = {} -> null
~ tags_all = {} -> (known after apply)
# (4 unchanged attributes hidden)
# (1 unchanged block hidden)
}
Plan: 3 to add, 0 to change, 3 to destroy.
I need to create multiple DNS with their respected IPs. I need to assign the first IP to the first DNS and the 2nd one to 2nd DNS. something like dns1 - 10.1.20.70 and dns2-10.1.20.40. But getting both of the IPs are getting assigned for both DNS(dns1 and dns2).Any suggestions?
Code:
resource "aws_route53_record" "onprem_api_record" {
for_each = toset(local.vm_fqdn)
zone_id = data.aws_route53_zone.dns_zone.zone_id
name = each.value
type = "A"
records = var.api_ips[terraform.workspace]
ttl = "300"
}
locals {
vm_fqdn = flatten(["dns1-${terraform.workspace}.${local.domain}", "dns2-${terraform.workspace}.${local.domain}"] )
}
variable "api_ips" {
type = map(list(string))
default = {
"dev" = [ "10.1.20.70", "10.1.20.140" ]
"qa" = [ "10.1.22.180", "10.1.22.150" ]
"test" = [ "10.1.23.190", "10.1.23.160" ]
}
}
Output
+ resource "aws_route53_record" "onprem_api_record" {
+ allow_overwrite = (known after apply)
+ fqdn = (known after apply)
+ id = (known after apply)
+ name = "dns1.dev.ciscodcloud.com"
+ records = [
+ "10.1.20.40",
+ "10.1.20.70",
]
+ ttl = 300
+ type = "A"
+ zone_id = "Z30HW9VL6PYDXQ"
}
aws_route53_record.onprem_api_record["dna2.dev.cisco.com"] will be created
+ resource "aws_route53_record" "onprem_api_record" {
+ allow_overwrite = (known after apply)
+ fqdn = (known after apply)
+ id = (known after apply)
+ name = "dns2.dev.cisco.com"
+ records = [
+ "10.1.20.40",
+ "10.1.20.70",
]
+ ttl = 300
+ type = "A"
+ zone_id = "Z30HW9VL6PYDXQ"
}
Plan: 2 to add, 0 to change, 1 to destroy.
You may want to use zipmap. Here is a terse example showing its use in for_each with for as could be used in your case.
resource "aws_route53_record" "onprem_api_record" {
for_each = { for fqdn, ip in zipmap(local.vm_fqdn, local.ips["dev"]) : fqdn => ip }
zone_id = "x"
name = each.key
type = "A"
records = [each.value]
ttl = "300"
}
locals {
ips = {
"dev" = ["10.1.20.70", "10.1.20.140"]
"qa" = ["10.1.22.180", "10.1.22.150"]
"test" = ["10.1.23.190", "10.1.23.160"]
}
vm_fqdn = ["dns1-dev.domain", "dns2-dev.domain"]
}
And the plan looks like:
# aws_route53_record.onprem_api_record["dns1-dev.domain"] will be created
+ resource "aws_route53_record" "onprem_api_record" {
+ allow_overwrite = (known after apply)
+ fqdn = (known after apply)
+ id = (known after apply)
+ name = "dns1-dev.domain"
+ records = [
+ "10.1.20.70",
]
+ ttl = 300
+ type = "A"
+ zone_id = "x"
}
# aws_route53_record.onprem_api_record["dns2-dev.domain"] will be created
+ resource "aws_route53_record" "onprem_api_record" {
+ allow_overwrite = (known after apply)
+ fqdn = (known after apply)
+ id = (known after apply)
+ name = "dns2-dev.domain"
+ records = [
+ "10.1.20.140",
]
+ ttl = 300
+ type = "A"
+ zone_id = "x"
}
Plan: 2 to add, 0 to change, 0 to destroy.
You can do this as follows with count:
resource "aws_route53_record" "onprem_api_record" {
count = length(local.vm_fqdn)
zone_id = data.aws_route53_zone.dns_zone.zone_id
name = local.vm_fqdn[count.index]
type = "A"
records = [var.api_ips[terraform.workspace][count.index]]
ttl = "300"
}
I have several domains and I want to create subdomains with as much DRY as possible. This is the original structure:
variable "domain1" {
type = list(string)
default = ["www", "www2"]
}
variable "domain2" {
type = list(string)
default = ["www3", "www1"]
}
resource "aws_route53_record" "domain1" {
for_each = toset(var.domain1)
type = "A"
name = "${each.key}.domain1.com"
zone_id = ""
}
resource "aws_route53_record" "domain2" {
for_each = toset(var.domain2)
type = "A"
name = "${each.key}.domain2.com"
zone_id = ""
}
that I want to combine to one variable and one resource block:
variable "subdomains" {
type = map(list(string))
default = {
"domain1.com" = ["www", "www2"]
"domain2.com" = ["www3", "www1"]
}
}
resource "aws_route53_record" "domain1" {
for_each = var.subdomains // make magic happen here...
type = "A"
name = "${each.subdomain_part}.${each.domain_part}" // ...and here
zone_id = ""
}
Is there a way to achieve this?
You can flatten your var.subdomains as follows:
locals {
subdomains_flat = flatten([for domain, subdomains in var.subdomains:
[ for subdomain in subdomains:
{
domain_part = domain
subdomain_part = subdomain
}
]
])
}
then:
resource "aws_route53_record" "domain1" {
for_each = {for idx, val in local.subdomains_flat: idx => val }
type = "A"
name = "${each.value.subdomain_part}.${each.value.domain_part}"
zone_id = ""
}
Following up on the comment about a messy state, I would not say messy... but certainly there are some downsides, the index in that answer is numeric, a plan show that the resource ends up:
# aws_route53_record.domain1["0"] will be created
+ resource "aws_route53_record" "domain1" {
# aws_route53_record.domain1["1"] will be created
+ resource "aws_route53_record" "domain1" {
That can create problems when we add or remove subdomains to the list, the order can change and that will cause the resources to be destroyed and recreated, not ideal on route53 records...
Here is another approach that will create a different index in the resource name.
We still use flatten to extract the subdomains but on this case I'm concatenating right away, that local variable is ready for the aws_route53_record resource to consume it.
provider "aws" {
region = "us-east-2"
}
variable "subdomains" {
type = map(list(string))
default = {
"domain1.com" = ["www", "www2"]
"domain2.com" = ["www3", "www1"]
}
}
locals {
records = flatten([for d, subs in var.subdomains: [for s in subs: "${s}.${d}"]])
}
resource "aws_route53_record" "domain1" {
for_each = toset(local.records)
type = "A"
name = each.value
zone_id = "us-east-1"
}
A terraform plan of that looks like:
Terraform will perform the following actions:
# aws_route53_record.domain1["www.domain1.com"] will be created
+ resource "aws_route53_record" "domain1" {
+ allow_overwrite = (known after apply)
+ fqdn = (known after apply)
+ id = (known after apply)
+ name = "www.domain1.com"
+ type = "A"
+ zone_id = "us-east-1"
}
# aws_route53_record.domain1["www1.domain2.com"] will be created
+ resource "aws_route53_record" "domain1" {
+ allow_overwrite = (known after apply)
+ fqdn = (known after apply)
+ id = (known after apply)
+ name = "www1.domain2.com"
+ type = "A"
+ zone_id = "us-east-1"
}
...
I am using AWS provider. I've added transaction blocks in my lifecycle_rule block with the appropriate days and storage_class properties. Besides that change I've also increased the expiry_days from 30 to 180.
The variable looks like this:
variable "bucket_details" {
type = map(object({
bucket_name = string
purpose = string
infrequent_transition_days = number
infrequent_transition_storage = string
archive_transition_days = number
archive_transition_storage = string
expiry_days = number
versioning = bool
}))
}
The resource looks like this: (I've removed unrelated configs)
resource "aws_s3_bucket" "bucket-s3" {
for_each = var.bucket_details
bucket = "${each.key}-${var.region}-${var.environment}"
lifecycle_rule {
id = "clear"
enabled = true
transition {
days = each.value.infrequent_transition_days
storage_class = each.value.infrequent_transition_storage
}
transition {
days = each.value.archive_transition_days
storage_class = each.value.archive_transition_storage
}
expiration {
days = each.value.expiry_days
}
}
}
I've followed this transition example for reference.
When I run transaction plan I get the following output:
~ lifecycle_rule {
abort_incomplete_multipart_upload_days = 0
enabled = true
id = "clear"
tags = {}
+ expiration {
+ days = 180
}
- expiration {
- days = 30 -> null
- expired_object_delete_marker = false -> null
}
}
No transition changes listed. Could it be because transition is AWS-specific and thus Terraform does not catch it?
I tried your code as is and here is the response:
provider "aws" {
region = "us-west-2"
}
variable "region" {
default = "us-west-2"
}
variable "environment" {
default = "dev"
}
variable "bucket_details" {
type = map(object({
bucket_name = string
infrequent_transition_days = number
infrequent_transition_storage = string
archive_transition_days = number
archive_transition_storage = string
expiry_days = number
}))
default = {
hello_world = {
bucket_name: "demo-001",
infrequent_transition_days: 10,
infrequent_transition_storage: "STANDARD_IA",
archive_transition_days: 10,
archive_transition_storage: "GLACIER",
expiry_days = 30
}}
}
resource "aws_s3_bucket" "bucket-s3" {
for_each = var.bucket_details
bucket = "${each.key}-${var.region}-${var.environment}"
lifecycle_rule {
id = "clear"
enabled = true
transition {
days = each.value.infrequent_transition_days
storage_class = each.value.infrequent_transition_storage
}
transition {
days = each.value.archive_transition_days
storage_class = each.value.archive_transition_storage
}
expiration {
days = each.value.expiry_days
}
}
}
Response of Terraform plan:
Terraform used the selected providers to generate the following execution plan. Resource actions are indicated with the following symbols:
+ create
Terraform will perform the following actions:
# aws_s3_bucket.bucket-s3["hello_world"] will be created
+ resource "aws_s3_bucket" "bucket-s3" {
+ acceleration_status = (known after apply)
+ acl = "private"
+ arn = (known after apply)
+ bucket = "hello_world-us-west-2-dev"
+ bucket_domain_name = (known after apply)
+ bucket_regional_domain_name = (known after apply)
+ force_destroy = false
+ hosted_zone_id = (known after apply)
+ id = (known after apply)
+ region = (known after apply)
+ request_payer = (known after apply)
+ tags_all = (known after apply)
+ website_domain = (known after apply)
+ website_endpoint = (known after apply)
+ lifecycle_rule {
+ enabled = true
+ id = "clear"
+ expiration {
+ days = 30
}
+ transition {
+ days = 10
+ storage_class = "GLACIER"
}
+ transition {
+ days = 10
+ storage_class = "STANDARD_IA"
}
}
+ versioning {
+ enabled = (known after apply)
+ mfa_delete = (known after apply)
}
}
Plan: 1 to add, 0 to change, 0 to destroy.
──────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────
Note: You didn't use the -out option to save this plan, so Terraform can't guarantee to take exactly these actions if you run "terraform apply"
now.
As you can see there are transition changes. Can you try setting defaults vars and check the response.
I’m trying to add a new AWS account using terraform but when I run terraform plan, it’s saying the existing aliases of existing accounts must be replaced and referencing our master account.
I’m trying to figure out if this would cause issues once deployed, I don’t want any account numbers to change.
Here’s the plan output:
An execution plan has been generated and is shown below.
Resource actions are indicated with the following symbols:
+ create
-/+ destroy and then create replacement
Terraform will perform the following actions:
# aws_iam_account_alias.project-dev must be replaced
-/+ resource "aws_iam_account_alias" "project-dev" {
~ account_alias = "project-master" -> "project-dev" # forces replacement
~ id = "project-master" -> (known after apply)
}
# aws_iam_account_alias.project-platform must be replaced
-/+ resource "aws_iam_account_alias" "project-platform" {
~ account_alias = "project-master" -> "project-platform" # forces replacement
~ id = "project-master" -> (known after apply)
}
# aws_iam_account_alias.project-prod must be replaced
-/+ resource "aws_iam_account_alias" "project-prod" {
~ account_alias = "project-master" -> "project-prod" # forces replacement
~ id = "project-master" -> (known after apply)
}
# aws_iam_account_alias.project-new will be created
+ resource "aws_iam_account_alias" "project-new" {
+ account_alias = "project-new"
+ id = (known after apply)
}
# aws_iam_account_alias.project-stage must be replaced
-/+ resource "aws_iam_account_alias" "project-stage" {
~ account_alias = "project-master" -> "project-stage" # forces replacement
~ id = "project-master" -> (known after apply)
}
# aws_organizations_account.project-new will be created
+ resource "aws_organizations_account" "project-new" {
+ arn = (known after apply)
+ email = "aws-admins+project-new#project.io"
+ id = (known after apply)
+ joined_method = (known after apply)
+ joined_timestamp = (known after apply)
+ name = "PROJECT-NEW"
+ parent_id = (known after apply)
+ status = (known after apply)
+ tags = {
+ "env" = "new"
}
}
Plan: 6 to add, 0 to change, 4 to destroy.
Here’s the terraform code:
# ./providers.tf
terraform {
required_version = "0.12.12"
backend "s3" {
bucket = "{redacted-acc-no}-tfstate"
key = "core/accounts"
region = "eu-west-1"
profile = "PROJECT-MASTER"
}
}
provider aws {
region = "eu-west-1"
profile = "PROJECT-MASTER"
}
# ./accounts.tf
#dev
resource "aws_organizations_account" "project-dev" {
name = "PROJECT-DEV"
email = "aws-admins+project-dev#project.io"
tags = {
env = "dev"
}
}
resource "aws_iam_account_alias" "project-dev" {
account_alias = "project-dev"
}
#stage
resource "aws_organizations_account" "project-stage" {
name = "PROJECT-STAGE"
email = "aws-admins+project-stage#project.io"
tags = {
env = "stage"
}
}
resource "aws_iam_account_alias" "project-stage" {
account_alias = "project-stage"
}
#project-prod
resource "aws_organizations_account" "project-prod" {
name = "PROJECT-PROD"
email = "aws-admins+project-prod#project.io"
tags = {
env = "prod"
}
}
resource "aws_iam_account_alias" "project-prod" {
account_alias = "project-prod"
}
#project-new
resource "aws_organizations_account" "project-new" {
name = "PROJECT-NEW"
email = "aws-admins+project-new#project.io"
tags = {
env = "new"
}
}
resource "aws_iam_account_alias" "project-pepelatz" {
account_alias = "project-new"
}
#project-platform
resource "aws_organizations_account" "project-platform" {
name = "PROJECT-PLATFORM"
email = "aws-admins+project-platform#project.io"
tags = {
env = "shared"
}
}
resource "aws_iam_account_alias" "project-platform" {
account_alias = "project-platform"
}