If loop to ignore null value for conditional checks - if-statement

locals {
acl_out = flatten([
for opi, clsan in var.appr : [
for co, coo in clsan : [
for applo in coo : [
for op in ( co == "RO" ? ["READ","DESCRIBE"] : ["WRITE"]) : {
oper = op
appid_lo = applo
opic-name = opi
process = co
}
]
]
]
])
}
variable "appr" {
description = "A complex object "
type = map(object({
#displayname = string
RO = optional(list(string))
WO = optional(list(string))
}))
}
If co = RO then ["READ","DESCRIBE"]
If co = WO then [WRITE]
Above expression fails with null value, If WO not defined/unset in product 2, If statement need to corrected
Input :
Working
appr = {
product1 = { WO = ["6470"], RO = ["6471","5538"] },
product2 = { WO = ["5555"], RO = ["6472"]},
product3 = { WO = ["6473"], RO = ["6474"] },
}
Not Working
appr = {
product1 = { WO = ["6470"], RO = ["6471","5538"] },
product2 = { RO = ["6472"]},
product3 = { WO = ["6473"], RO = ["6474"] },
}
Error:
A null value cannot be used as the collection in a 'for' expression.
Tried this way also fails
for op in ( co == "RO" = ["READ","DESCRIBE"] || co == "WO" = ["WRITE"] ) : {
Desired Result :
{
opic-name = "product1"
oper = "WRITE"
appid_lo = 6470
},
{
opic-name = "product1"
oper = "READ"
appid_lo = 6471
},
{
opic-name = "product1"
oper = "DESCRIBE"
appid_lo = 6471
}
and so on

This question was a fun one!
Check if it is null before running the next for
locals {
acl_out = flatten([
for opi, clsan in var.appr : [
for co, coo in clsan : [
coo == null ? [] : [
for applo in coo : [
for op in ( co == "RO" ? ["READ","DESCRIBE"] : ["WRITE"]) : {
oper = op
appid_lo = applo
opic-name = opi
process = co
}
]
]]
]
])
}
Output for acl_out
acl_out = [
+ {
+ appid_lo = "6471"
+ oper = "READ"
+ opic-name = "product1"
+ process = "RO"
},
+ {
+ appid_lo = "6471"
+ oper = "DESCRIBE"
+ opic-name = "product1"
+ process = "RO"
},
+ {
+ appid_lo = "5538"
+ oper = "READ"
+ opic-name = "product1"
+ process = "RO"
},
+ {
+ appid_lo = "5538"
+ oper = "DESCRIBE"
+ opic-name = "product1"
+ process = "RO"
},
+ {
+ appid_lo = "6470"
+ oper = "WRITE"
+ opic-name = "product1"
+ process = "WO"
},
+ {
+ appid_lo = "6472"
+ oper = "READ"
+ opic-name = "product2"
+ process = "RO"
},
+ {
+ appid_lo = "6472"
+ oper = "DESCRIBE"
+ opic-name = "product2"
+ process = "RO"
},
+ {
+ appid_lo = "6474"
+ oper = "READ"
+ opic-name = "product3"
+ process = "RO"
},
+ {
+ appid_lo = "6474"
+ oper = "DESCRIBE"
+ opic-name = "product3"
+ process = "RO"
},
+ {
+ appid_lo = "6473"
+ oper = "WRITE"
+ opic-name = "product3"
+ process = "WO"
},
]
Cheers!

Related

Terraform for_each nested solution

I need to access the elements of a nested map but I can't do it and I get an error Can't access attributes on a primitive-typed value (string).
# GLUE
locals {
glue_catalog_resources = {
uni = {
name = "mst_business_units",
description = "Unidades de negocios"
columns = [
{
name = "codigouni"
type = "int"
comment = "Code"
},{
name = "descuni"
type = "varchar(256)"
comment = "Description"
},{
name = "estado"
type = "varchar(256)"
comment = "Current status"
}
]
}
}
}
resource "aws_glue_catalog_table" "glue_catalogs_redshift" {
for_each = local.glue_catalog_resources
name = each.value.name
database_name = aws_glue_catalog_database.cl_sales.name
description = each.value.description
retention = 0
table_type = "EXTERNAL_TABLE"
parameters = {
EXTERNAL = "TRUE"
"classification" = "parquet"
}
storage_descriptor {
location = ""
input_format = "org.apache.hadoop.hive.ql.io.parquet.MapredParquetInputFormat"
output_format = "org.apache.hadoop.hive.ql.io.parquet.MapredParquetOutputFormat"
number_of_buckets = 0
compressed = "false"
stored_as_sub_directories = "false"
ser_de_info {
serialization_library = "org.apache.hadoop.hive.ql.io.parquet.serde.ParquetHiveSerDe"
parameters = {
"serialization.format" = 1
}
}
parameters = {
"typeOfData": "kinesis"
}
columns {
name = [ for column in each.value.columns: [ for row in column: row.name ] ]
type = [ for column in each.value.columns: [ for row in column: row.type ] ]
comment = [ for column in each.value.columns: [ for row in column: row.comment ] ]
}
}
}
I need to include in the columns tag columns name, type and comment reading it from the map above and I can't do it, what would be the correct way to read it
columns {
name = [ for column in each.column.value: [ for row in column: row.name ] [ for row in column: row.name ] [ for row in column: row.name ] [ for row in column: row.name
type = [ for column in each.value.columns: [ for row in column: row.type ]
comment = [ for column in each.value.columns: [ for row in column: row.comment ] ] ]
}
In this case, columns is a list of key value pairs, so you need to adjust the second for a bit:
name = [for column in local.columns : [for k, v in column : v if k == "name"]]
type = [for column in local.columns : [for k, v in column : v if k == "type"]]
comment = [for column in local.columns : [for k, v in column : v if k == "comment"]]
EDIT: The first part of the answer resolves the issue with accessing the list elements. However, in order for this to work as you would want to, I would suggest a couple of changes. In the local variable, you can change columns to be a map of objects:
locals {
glue_catalog_resources = {
uni = {
name = "mst_business_units",
description = "Unidades de negocios"
columns = {
"codigouni" = {
name = "codigouni"
type = "int"
comment = "Code"
},
"descuni" = {
name = "descuni"
type = "varchar(256)"
comment = "Description"
},
"estado " = {
name = "estado"
type = "varchar(256)"
comment = "Current status"
}
}
}
}
}
Then, in the columns block you would do the following:
resource "aws_glue_catalog_table" "glue_catalogs_redshift" {
for_each = local.glue_catalog_resources
name = each.value.name
database_name = aws_glue_catalog_database.cl_sales.name
description = each.value.description
retention = 0
table_type = "EXTERNAL_TABLE"
parameters = {
EXTERNAL = "TRUE"
"classification" = "parquet"
}
storage_descriptor {
location = ""
input_format = "org.apache.hadoop.hive.ql.io.parquet.MapredParquetInputFormat"
output_format = "org.apache.hadoop.hive.ql.io.parquet.MapredParquetOutputFormat"
number_of_buckets = 0
compressed = "false"
stored_as_sub_directories = "false"
ser_de_info {
serialization_library = "org.apache.hadoop.hive.ql.io.parquet.serde.ParquetHiveSerDe"
parameters = {
"serialization.format" = 1
}
}
parameters = {
"typeOfData" : "kinesis"
}
dynamic "columns" {
for_each = each.value.columns
content {
name = columns.value.name
type = columns.value.type
comment = columns.value.comment
}
}
}
}
This approach is using dynamic [1] and for_each [2] to iterate over all of the columns and assign the wanted values to arguments.
[1] https://developer.hashicorp.com/terraform/language/expressions/dynamic-blocks
[2] https://developer.hashicorp.com/terraform/language/meta-arguments/for_each

Output list of maps in parent module from child module in Terraform

I have three child modules and one parent module for ssm patching. One module in particular is different than the rest (RHEL) it will never need to be changed, so the child module was written differently.
I have my baseline approval rules written as a dynamic object and have prepared the variable for the object.
resource "aws_ssm_patch_baseline" "baseline" {
name = format("%s-%s-%s-baseline", var.patch_baseline_label, var.env, lower(local.operating_system))
description = var.description
operating_system = local.operating_system
approved_patches = var.approved_patches
rejected_patches = var.rejected_patches
approved_patches_compliance_level = var.compliance_level
dynamic "approval_rule" {
for_each = var.baseline_approval_rules
content {
approve_after_days = approval_rule.value.approve_after_days
compliance_level = approval_rule.value.compliance_level
enable_non_security = approval_rule.value.enable_non_security
# patch filter values : https://docs.aws.amazon.com/cli/latest/reference/ssm/describe-patch-properties.html
dynamic "patch_filter" {
for_each = approval_rule.value.patch_baseline_filters
content {
key = patch_filter.value.name
values = patch_filter.value.values
}
}
}
}
tags = merge(var.tags, { Name = format("%s-%s-%s", var.patch_baseline_label, var.env, lower(local.operating_system)) })
}
The variable for the approval rules is written as such:
variable "baseline_approval_rules" {
description = "list of approval rules defined in the patch baseline (Max 10 rules). For compliance_level, it means that if an approved patch is reported as missing, this is the severity of the compliance violation. Valid compliance levels include the following: CRITICAL, HIGH, MEDIUM, LOW, INFORMATIONAL, UNSPECIFIED. The default value is UNSPECIFIED."
type = list(object({
approve_after_days : number
compliance_level : string
enable_non_security : bool
patch_baseline_filters : list(object({
name : string
values : list(string)
}))
}))
default = [
{
approve_after_days = 0
// The compliance level of a patch will default to unspecified if a patch isn't applied
compliance_level = "CRITICAL"
enable_non_security = false
patch_baseline_filters = [
{
name = "PRODUCT"
values = ["RedhatEnterpriseLinux6.10", "RedhatEnterpriseLinux6.5", "RedhatEnterpriseLinux6.6", "RedhatEnterpriseLinux6.7", "RedhatEnterpriseLinux6.8", "RedhatEnterpriseLinux6.9", "RedhatEnterpriseLinux7", "RedhatEnterpriseLinux7.0", "RedhatEnterpriseLinux7.1", "RedhatEnterpriseLinux7.2", "RedhatEnterpriseLinux7.3", "RedhatEnterpriseLinux7.4", "RedhatEnterpriseLinux7.5", "RedhatEnterpriseLinux7.6", "RedhatEnterpriseLinux7.7", "RedhatEnterpriseLinux7.8", "RedhatEnterpriseLinux8", "RedhatEnterpriseLinux8.0", "RedhatEnterpriseLinux8.1", "RedhatEnterpriseLinux8.2"]
},
{
name = "CLASSIFICATION"
values = ["Security"]
},
{
name = "SEVERITY"
values = ["Critical"]
}
]
}
]
}
In the parent module, I would like an output that generates a map for the baseline approval rules.. something similar to
[
{
name = "product",
valueFrom = <VALUE FROM PRODUCT IN patch_baseline_filters >,
{
name = "severity",
valueFrom = <VALUE FROM SEVERITY IN patch_baseline_filters>,
{
name = <etc>,
valueFrom = <ETC>
}
]
I'm trying to use functions like zipmap and sort to output the values to a map, but I have had no success.
Thanks!
EDIT:
Entire RHEL resource output looks like this:
+ entire_rhel_resource = [
+ {
+ approval_rule = [
+ {
+ approve_after_days = 0
+ approve_until_date = null
+ compliance_level = "CRITICAL"
+ enable_non_security = false
+ patch_filter = [
+ {
+ key = "PRODUCT"
+ values = [
+ "RedhatEnterpriseLinux6.10",
+ "RedhatEnterpriseLinux6.5",
+ "RedhatEnterpriseLinux6.6",
+ "RedhatEnterpriseLinux6.7",
+ "RedhatEnterpriseLinux6.8",
+ "RedhatEnterpriseLinux6.9",
+ "RedhatEnterpriseLinux7",
+ "RedhatEnterpriseLinux7.0",
+ "RedhatEnterpriseLinux7.1",
+ "RedhatEnterpriseLinux7.2",
+ "RedhatEnterpriseLinux7.3",
+ "RedhatEnterpriseLinux7.4",
+ "RedhatEnterpriseLinux7.5",
+ "RedhatEnterpriseLinux7.6",
+ "RedhatEnterpriseLinux7.7",
+ "RedhatEnterpriseLinux7.8",
+ "RedhatEnterpriseLinux8",
+ "RedhatEnterpriseLinux8.0",
+ "RedhatEnterpriseLinux8.1",
+ "RedhatEnterpriseLinux8.2",
]
},
+ {
+ key = "CLASSIFICATION"
+ values = [
+ "Security",
]
},
+ {
+ key = "SEVERITY"
+ values = [
+ "Critical",
]
},
]
},
]
+ approved_patches = null
+ approved_patches_compliance_level = "UNSPECIFIED"
+ approved_patches_enable_non_security = null
+ arn = (known after apply)
+ description = "RedHat Enterprise Linux Default Patch Baseline"
+ global_filter = []
+ id = (known after apply)
+ name = "SENSITIVEredhat_enterprise_linux-baseline"
+ operating_system = "REDHAT_ENTERPRISE_LINUX"
+ rejected_patches = null
+ rejected_patches_action = (known after apply)
+ source = []
+ tags = {
+ "Name" = "SENSITIVEredhat_enterprise_linux"
+ "owner" = "SENSITIVE"
+ "team" = "SENSITIVE"
+ "terraform" = "true"
}
+ tags_all = {
+ "Name" = "SENSITIVEredhat_enterprise_linux"
+ "owner" = "SENSITIVE"
+ "team" = "SENSITIVE"
+ "terraform" = "true"
}
},
]
Here is the best I could come up with:
output "rhel_server_types" {
description = "types of patches applied for rhel systems"
value = [for i in aws_ssm_patch_baseline.baseline.approval_rule[0].patch_filter : {
name = lower(i.key)
valueFrom = i.values
}]
}

Terraform module s3 lifecycle rule not not working

I have a s3 lifecycle rule that should delete the failed multipart upload after n number of days by using lifecycle rules. I want to use lookup instead of try
resource "aws_s3_bucket_lifecycle_configuration" "default" {
count = length(var.lifecycle_rule) != 0 ? 1 : 0
bucket = aws_s3_bucket.bucket.bucket
dynamic "rule" {
for_each = try(jsondecode(var.lifecycle_rule), var.lifecycle_rule)
content {
id = lookup(rule.value, "id", "default")
status = lookup(rule.value, "status", "Enabled")
dynamic "abort_incomplete_multipart_upload" {
for_each = lookup(rule.value, "abort_incomplete_multipart_upload", null) != null ? [rule.value.abort_incomplete_multipart_upload] : []
content {
days_after_initiation = abort_incomplete_multipart_upload.value.days_after_initiation
}
}
}
}
}
When I try to use this module resource in my child module, it does not work
module "test" {
source = "./s3"
bucket_name = "test"
lifecycle_rule = [
{
expiration = {
days = 7
}
},
{
id = "abort-incomplete-multipart-upload-lifecyle-rule"
abort_incomplete_multipart_upload_days = {
days_after_initiation = 6
}
}
]
}
terraform plan gives me
+ rule {
+ id = "abort-incomplete-multipart-upload-lifecyle-rule"
+ status = "Enabled"
+ filter {
}
}
expected output:
+ rule {
+ id = "abort-incomplete-multipart-upload-lifecyle-rule"
+ status = "Enabled"
+ abort_incomplete_multipart_upload {
+ days_after_initiation = 8
}
+ filter {
}
}
Here's the code that works:
resource "aws_s3_bucket_lifecycle_configuration" "default" {
count = length(var.lifecycle_rule) != 0 ? 1 : 0
bucket = aws_s3_bucket.bucket.bucket
dynamic "rule" {
for_each = try(jsondecode(var.lifecycle_rule), var.lifecycle_rule)
content {
id = lookup(rule.value, "id", "default")
status = lookup(rule.value, "status", "Enabled")
dynamic "abort_incomplete_multipart_upload" {
for_each = lookup(rule.value, "abort_incomplete_multipart_upload_days", null) != null ? [rule.value.abort_incomplete_multipart_upload_days] : []
content {
days_after_initiation = abort_incomplete_multipart_upload.value.days_after_initiation
}
}
}
}
}
There are basically two issues:
The lookup was looking for a non-existing key in your map, abort_incomplete_multipart_upload, instead of abort_incomplete_multipart_upload_days
Because of the first error, it was propagated to the value you wanted, i.e., rule.value.abort_incomplete_multipart_upload instead of rule.value.abort_incomplete_multipart_upload_days
This code yields the following output:
# aws_s3_bucket_lifecycle_configuration.default[0] will be created
+ resource "aws_s3_bucket_lifecycle_configuration" "default" {
+ bucket = (known after apply)
+ id = (known after apply)
+ rule {
+ id = "default"
+ status = "Enabled"
}
+ rule {
+ id = "abort-incomplete-multipart-upload-lifecyle-rule"
+ status = "Enabled"
+ abort_incomplete_multipart_upload {
+ days_after_initiation = 6
}
}
}
However, if you want it to be one rule (i.e., the example output you want), you need to make a change to your lifecycle_rule variable:
lifecycle_rule = [
{
expiration = {
days = 7
}
id = "abort-incomplete-multipart-upload-lifecyle-rule"
abort_incomplete_multipart_upload_days = {
days_after_initiation = 6
}
}
]
This gives:
+ resource "aws_s3_bucket_lifecycle_configuration" "default" {
+ bucket = (known after apply)
+ id = (known after apply)
+ rule {
+ id = "abort-incomplete-multipart-upload-lifecyle-rule"
+ status = "Enabled"
+ abort_incomplete_multipart_upload {
+ days_after_initiation = 6
}
}
}

Filtering variables map+list

I have a data structure and I need to extract a list out of a map of lists based on the maps key. Here is the sample data:
locals {
firwall_rules = ["first", "third"] # this is the filter used on firewall_rules_lookup
firewall_rules_lookup = {
type = map
"first" = [ { name ="rule1.1", start_ip="0.0.0.1" , end_ip = "0.0.0.2" },
{ name ="rule1.2", start_ip="0.0.0.4" , end_ip = "0.0.0.5" },
],
"second"= [ { name ="rule2.1", start_ip="0.0.1.1" , end_ip = "0.0.1.2" } ],
"third" = [ { name ="rule3.1", start_ip="0.0.3.1" , end_ip = "0.0.3.2" },
{ name ="rule3.2", start_ip="0.0.3.4" , end_ip = "0.0.3.5" },
]
}
fw_rules = flatten([
for rule_name in local.firewall_rules : {
for r in local.firewall_rules_lookup[rule_name] : {
name = r.name
start_ip = r.start_ip
end_ip = r.end_ip
}
}
])
}
Expected result:
fw_rules=
[ { name ="rule1.1", start_ip="0.0.0.1" , end_ip = "0.0.0.2" },
{ name ="rule1.2", start_ip="0.0.0.4" , end_ip = "0.0.0.5" },
{ name ="rule3.1", start_ip="0.0.3.1" , end_ip = "0.0.3.2" },
{ name ="rule3.2", start_ip="0.0.3.4" , end_ip = "0.0.3.5" }
]
The inner for loop is not working. Terraform gives me an error. I think the for loops
work only with maps. Is there a different solution to this problem?
It should be as follows:
locals {
firewall_rules = ["first", "third"] # this is the filter used on firewall_rules_lookup
firewall_rules_lookup = {
"first" = [ { name ="rule1.1", start_ip="0.0.0.1" , end_ip = "0.0.0.2" },
{ name ="rule1.2", start_ip="0.0.0.4" , end_ip = "0.0.0.5" },
],
"second"= [ { name ="rule2.1", start_ip="0.0.1.1" , end_ip = "0.0.1.2" } ],
"third" = [ { name ="rule3.1", start_ip="0.0.3.1" , end_ip = "0.0.3.2" },
{ name ="rule3.2", start_ip="0.0.3.4" , end_ip = "0.0.3.5" },
]
}
fw_rules = flatten([
for rule_name in local.firewall_rules : [
for r in local.firewall_rules_lookup[rule_name] : {
name = r.name
start_ip = r.start_ip
end_ip = r.end_ip
}
]
])
}

Adding Extra Routes to route table using terraform module

I am adding routes to route table using module. Below is my code. It runs successfully but routes don't get added.
module.tf: (This checks if the publicRoute & privateRoute has more than one item, it will add that many routes to route table)
resource "aws_route" "public_routes" {
count = length(var.ExtraRoutes.publicRoute) > 1 ? length(var.ExtraRoutes.publicRoute) : 0
route_table_id = aws_route_table.VPCPublicSubnetRouteTable[0].id
destination_cidr_block = length(regexall("^[0-9].*.[0-9].*",var.ExtraRoutes.publicRoute[count.index].destination)) != 0 ? var.ExtraRoutes.publicRoute[count.index].destination : null
gateway_id = length(regexall("^igw-.*",var.ExtraRoutes.publicRoute[count.index].target)) != 0 ? var.ExtraRoutes.publicRoute[count.index].target : null
}
resource "aws_route" "private_routes" {
count = length(var.ExtraRoutes.privateRoute) > 1 ? length(var.ExtraRoutes.privateRoute) : 0
route_table_id = aws_route_table.VPCPrivateSubnetRouteTable[0].id
destination_cidr_block = length(regexall("^[0-9].*.[0-9].*",var.ExtraRoutes.privateRoute[count.index].destination)) != 0 ? var.ExtraRoutes.privateRoute[count.index].destination : null
gateway_id = length(regexall("^igw-.*",var.ExtraRoutes.privateRoute[count.index].target)) != 0 ? var.ExtraRoutes.privateRoute[count.index].target : null
}
module_var.tf (I am keeping it only a map)
variable "ExtraRoutes" {
type = map
default = {
publicRoute = []
privateRoute = []
}
}
main.tf (As I need the first item in ExtraRoutes for something else I want from count.index + 1)
module "ExtraVPCs" {
source = "./modules/VPC"
count = length(var.ExtraRoutes)
ExtraRoutes = {
publicRoute = var.ExtraRoutes[count.index + 1].publicRoute
privateRoute = var.ExtraRoutes[count.index + 1].privateRoute
}
}
main_var.tf
variable "ExtraRoutes" {
type = list(object({
publicRoute = list(object({
destination = string
target = string
})
)
privateRoute = list(object({
destination = string
target = string
}))
}))
}
init.tfvars (There are 2 items in ExtraRoutes. It should add the 2nd item in Route table but it's not working as expected.
ExtraRoutes = [
{
publicRoute = [
{
destination = "10.0.0.0/32"
target = "igw-092aba6c187183f48"
}
]
privateRoute = [
{
destination = "10.0.0.0/32"
target = "igw-092aba6c187183f48"
}
]
},
{
publicRoute = [
{
destination = "10.0.0.0/32"
target = "igw-0acf4f7ac1e7eba47"
}
]
privateRoute = [
{
destination = "10.0.0.0/32"
target = "igw-0acf4f7ac1e7eba47"
}
]
}
]
You check the length of a list using >0, not >1:
count = length(var.ExtraRoutes.publicRoute) > 0 ? length(var.ExtraRoutes.publicRoute) : 0
TF counts items from 0. When you use >1, in your case you end up with count = 0.