Terraform create weird for_each - google-cloud-platform

I wanted to create a for_each loop that loops only over objects in array that have some specific key : value pair.
My input variables are:
inputs = {
names = ["first", "second"]
lifecycle_rules = [
{
name = first
condition = {
age = "1"
}
action = {
type = "Delete"
}
},{
condition = {
age = "2"
}
action = {
type = "Delete"
}
},
{
name = second
condition = {
age = "3"
}
action = {
type = "Delete"
}
},{
condition = {
age = "4"
}
action = {
type = "Delete"
}
}
]
and in my main.tf (btw for deploying gcp bucket for reference), I wanted to separate the lifecycle per bucket and wanted to apply only the rules that have the buckets name in it.
So if anyone has idea how to modify for_each code below to work, I would highly appreciate. I believe only the for_each needs to be changed to loop over the right elements (let's say only objects in that list that have name = first). from the var.lifecycle_rules set
resource "google_storage_bucket" "buckets" {
count = length(var.names)
name = "${lower(element(var.names, count.index))}"
...
dynamic "lifecycle_rule" {
#for_each = length(lookup(lifecycle_rules[lookup(element(var.names, count.index))])
for_each = lifecycle_rules
content {
action {
type = lifecycle_rule.value.action.type
storage_class = lookup(lifecycle_rule.value.action, "storage_class", null)
}
condition {
#age = lifecycle_rule.value.name == element(var.names, count.index) ? lookup(lifecycle_rule.value.condition, "age", null) : null
age = lookup(lifecycle_rule.value.condition, "age", null) : null
...

I think that this "wierd" look can be obtained in two stages.
Reorganize lifecycle_rules into a map based on names
variable "input" {
default = {
names = ["first", "second"],
lifecycle_rules = [
{
name = "first",
condition = {
age = "1"
},
action = {
type = "Delete"
}
},
{
condition = {
age = "2"
},
action = {
type = "Delete"
}
},
{
name = "second",
condition = {
age = "3"
},
action = {
type = "Delete"
}
},
{
condition = {
age = "4"
},
action = {
type = "Delete"
}
}
]
}
}
locals {
new = {
for name in var.input.names:
name => [for rule in var.input.lifecycle_rules:
contains(keys(rule), "name" ) ?
rule.name == name ? rule: null :
null ]
}
}
which will give local.new in the form of:
{
"first" = [
{
"action" = {
"type" = "Delete"
}
"condition" = {
"age" = "1"
}
"name" = "first"
},
null,
null,
null,
]
"second" = [
null,
null,
{
"action" = {
"type" = "Delete"
}
"condition" = {
"age" = "3"
}
"name" = "second"
},
null,
]
}
Perform the for_each
resource "google_storage_bucket" "buckets" {
for_each = toset(var.input.names)
name = each.key
dynamic "lifecycle_rule" {
# iterate for each name skipping null values
for_each = [for v in local.new[each.key]: v if v != null]
content {
action {
type = lifecycle_rule.value["action"].type
storage_class = lookup(lifecycle_rule.value["action"], "storage_class", null)
}
condition {
age = lookup( tag.value["condition"], "age", null)
}
}
}
}
I could only verify the first step and partial second (using aws_autoscaling_group and its multiple tag components). I don't access to google cloud to fully test the code.

Related

Terraform for_each nested solution

I need to access the elements of a nested map but I can't do it and I get an error Can't access attributes on a primitive-typed value (string).
# GLUE
locals {
glue_catalog_resources = {
uni = {
name = "mst_business_units",
description = "Unidades de negocios"
columns = [
{
name = "codigouni"
type = "int"
comment = "Code"
},{
name = "descuni"
type = "varchar(256)"
comment = "Description"
},{
name = "estado"
type = "varchar(256)"
comment = "Current status"
}
]
}
}
}
resource "aws_glue_catalog_table" "glue_catalogs_redshift" {
for_each = local.glue_catalog_resources
name = each.value.name
database_name = aws_glue_catalog_database.cl_sales.name
description = each.value.description
retention = 0
table_type = "EXTERNAL_TABLE"
parameters = {
EXTERNAL = "TRUE"
"classification" = "parquet"
}
storage_descriptor {
location = ""
input_format = "org.apache.hadoop.hive.ql.io.parquet.MapredParquetInputFormat"
output_format = "org.apache.hadoop.hive.ql.io.parquet.MapredParquetOutputFormat"
number_of_buckets = 0
compressed = "false"
stored_as_sub_directories = "false"
ser_de_info {
serialization_library = "org.apache.hadoop.hive.ql.io.parquet.serde.ParquetHiveSerDe"
parameters = {
"serialization.format" = 1
}
}
parameters = {
"typeOfData": "kinesis"
}
columns {
name = [ for column in each.value.columns: [ for row in column: row.name ] ]
type = [ for column in each.value.columns: [ for row in column: row.type ] ]
comment = [ for column in each.value.columns: [ for row in column: row.comment ] ]
}
}
}
I need to include in the columns tag columns name, type and comment reading it from the map above and I can't do it, what would be the correct way to read it
columns {
name = [ for column in each.column.value: [ for row in column: row.name ] [ for row in column: row.name ] [ for row in column: row.name ] [ for row in column: row.name
type = [ for column in each.value.columns: [ for row in column: row.type ]
comment = [ for column in each.value.columns: [ for row in column: row.comment ] ] ]
}
In this case, columns is a list of key value pairs, so you need to adjust the second for a bit:
name = [for column in local.columns : [for k, v in column : v if k == "name"]]
type = [for column in local.columns : [for k, v in column : v if k == "type"]]
comment = [for column in local.columns : [for k, v in column : v if k == "comment"]]
EDIT: The first part of the answer resolves the issue with accessing the list elements. However, in order for this to work as you would want to, I would suggest a couple of changes. In the local variable, you can change columns to be a map of objects:
locals {
glue_catalog_resources = {
uni = {
name = "mst_business_units",
description = "Unidades de negocios"
columns = {
"codigouni" = {
name = "codigouni"
type = "int"
comment = "Code"
},
"descuni" = {
name = "descuni"
type = "varchar(256)"
comment = "Description"
},
"estado " = {
name = "estado"
type = "varchar(256)"
comment = "Current status"
}
}
}
}
}
Then, in the columns block you would do the following:
resource "aws_glue_catalog_table" "glue_catalogs_redshift" {
for_each = local.glue_catalog_resources
name = each.value.name
database_name = aws_glue_catalog_database.cl_sales.name
description = each.value.description
retention = 0
table_type = "EXTERNAL_TABLE"
parameters = {
EXTERNAL = "TRUE"
"classification" = "parquet"
}
storage_descriptor {
location = ""
input_format = "org.apache.hadoop.hive.ql.io.parquet.MapredParquetInputFormat"
output_format = "org.apache.hadoop.hive.ql.io.parquet.MapredParquetOutputFormat"
number_of_buckets = 0
compressed = "false"
stored_as_sub_directories = "false"
ser_de_info {
serialization_library = "org.apache.hadoop.hive.ql.io.parquet.serde.ParquetHiveSerDe"
parameters = {
"serialization.format" = 1
}
}
parameters = {
"typeOfData" : "kinesis"
}
dynamic "columns" {
for_each = each.value.columns
content {
name = columns.value.name
type = columns.value.type
comment = columns.value.comment
}
}
}
}
This approach is using dynamic [1] and for_each [2] to iterate over all of the columns and assign the wanted values to arguments.
[1] https://developer.hashicorp.com/terraform/language/expressions/dynamic-blocks
[2] https://developer.hashicorp.com/terraform/language/meta-arguments/for_each

Terraform trouble with list in a map using for_each

I'm trying to take each value from the list in the map and iterate over it with the description in a map for a prefix list but can't work out how.
Variable:
users = {
"user1" = {
description = ""
secret_key_value = {
username = "user1"
home_directory = "/user1/"
}
allowlist = ["200.0.0.1/32"]
},
"user2" = {
description = ""
secret_key_value = {
username = "user2"
home_directory = "/user2/"
}
allowlist = ["200.0.0.5/32", "200.0.0.10/32"]
}
Resource:
resource "aws_ec2_managed_prefix_list" "sftp" {
count = local.prefix_list_enabled ? 1 : 0
name = "User Whitelist"
address_family = "IPv4"
max_entries = 10
dynamic "entry" {
for_each = {
for k, v in var.users : k => v
if v.allowlist != "" || v.description != ""
}
content {
cidr = entry.value.allowlist
description = entry.value.description
}
}
}
With the above, I'm getting "Inappropriate value for attribute "cidr": string required.". I need to break up the list values in the allowlist variable key and iterate through them with the description. Does anyone know how I can achieve this?
You have to flatten your users:
locals {
users_flat = merge([
for k,v in var.users: {
for cidr in v.allowlist:
"${k}-${cidr}" => {
description = v.description
secret_key_value = v.secret_key_value
"cidr" = cidr
}
}
]...)
}
then
dynamic "entry" {
for_each = local.users_flat
content {
cidr = entry.value.cidr
description = entry.value.description
}
}

Terraform Argument Block as a List of Maps

I am trying to pick up both elements in my query_string tuple. However, only the last element is being picked up. Below, is my test/main.tf file:
terraform {
required_version = ">= 0.13.0"
required_providers {
aws = ">= 3.58"
}
}
provider "aws" {
region = "us-east-1"
}
module "test_module" {
source = "../"
lb = [
{
name = "lb-name"
enable_deletion_protection = true
internal = false
load_balancer_type = "application"
lb_listener = [
{
port = "8080"
protocol = "HTTP"
ssl_policy = "ELBSecurityPolicy-2016-08"
default_action = {
type = "redirect"
order = 1
redirect = {
status_code = "HTTP_302"
}
}
lb_listener_rule = [
{
action = {
type = "fixed-response"
fixed_response = {
content_type = "text/plain"
message_body = "This is a message body."
}
}
condition = {
query_string = [
{
key = "health"
value = "check"
},
{
value = "bar"
}
]
}
}
]
}
]
}
]
}
Here is my lb.tf file:
variable "lb" {
description = <<DESCRIPTION
The AWS Elastic Load Balancing v2 module.
DESCRIPTION
type = any
default = []
}
locals {
lb_listener_flat_list = flatten([
for lb in var.lb : [
for lb_listener in lookup(lb, "lb_listener", []) : {
lb_name = lb.name
lb_listener_load_balancer_arn = aws_lb.lb[lb.name].arn
lb_listener_port = lookup(lb, "load_balancer_type", "application") != "gateway" ? lookup(lb_listener, "port", null) : null
lb_listener_protocol = lookup(lb, "load_balancer_type", "application") != "gateway" || lookup(lb, "ip_address_type", null) != "dualstack" ? lookup(lb_listener, "protocol", null) : null
lb_listener_ssl_policy = lookup(lb_listener, "protocol", null) != "HTTPS" || lookup(lb_listener, "protocol", null) != "TLS" ? lb_listener.ssl_policy : lookup(lb_listener, "ssl_policy", null)
lb_listener_default_action_type = lb_listener.default_action.type
lb_listener_default_action_order = lookup(lb_listener.default_action, "order", null)
lb_listener_default_action_redirect = lookup(lb_listener.default_action, "redirect", {}) != {} ? {
lb_listener_default_action_redirect_status_code = lb_listener.default_action.redirect.status_code
} : null
}
]
])
lb_listener_rule_flat_list = flatten([
for lb in var.lb : [
for lb_listener in lookup(lb, "lb_listener", []) : [
for lb_listener_rule in lookup(lb_listener, "lb_listener_rule", []) : [
# I've recently added the line below in an attempte to loop through the list of maps for query_string
for index in range(length(lookup(lb_listener_rule.condition, "query_string", []))) : {
lb_name = lb.name
lb_listener_rule_index = index
lb_listener_rule_listener_arn = aws_lb_listener.lb_listener[lb.name].arn
lb_listener_rule_action_type = lb_listener_rule.action.type
lb_listener_rule_action_fixed_response = lookup(lb_listener_rule.action, "fixed_response", {}) != {} ? {
lb_listener_rule_action_fixed_response_content_type = lb_listener_rule.action.fixed_response.content_type
lb_listener_rule_action_fixed_response_message_body = lookup(lb_listener_rule.action.fixed_response, "message_body", null)
} : null
lb_listener_rule_condition_query_string = lookup(lb_listener_rule.condition, "query_string", {}) != {} ? {
lb_listener_rule_condition_query_string_key = lookup(lb_listener_rule.condition.query_string[index], "key", null)
lb_listener_rule_condition_query_string_value = lb_listener_rule.condition.query_string[index].value
} : null
}
]
]
]
])
lb_map = { for lb in var.lb : lb.name => lb }
lb_listener_map = { for lb_listener in local.lb_listener_flat_list : lb_listener.lb_name => lb_listener }
lb_listener_rule_map = { for lb_listener_rule in local.lb_listener_rule_flat_list : "${lb_listener_rule.lb_name}-[${lb_listener_rule.lb_listener_rule_index}]" => lb_listener_rule }
}
resource "aws_lb_listener" "lb_listener" {
for_each = local.lb_listener_map
load_balancer_arn = each.value.lb_listener_load_balancer_arn
port = each.value.lb_listener_port
protocol = each.value.lb_listener_protocol
ssl_policy = each.value.lb_listener_ssl_policy
default_action {
type = each.value.lb_listener_default_action_type
order = each.value.lb_listener_default_action_order
dynamic "redirect" {
for_each = each.value.lb_listener_default_action_redirect != null ? [{}] : []
content {
status_code = each.value.lb_listener_default_action_redirect.lb_listener_default_action_redirect_status_code
}
}
}
}
resource "aws_lb_listener_rule" "lb_listener_rule" {
for_each = local.lb_listener_rule_map
listener_arn = each.value.lb_listener_rule_listener_arn
action {
type = each.value.lb_listener_rule_action_type
dynamic "fixed_response" {
for_each = each.value.lb_listener_rule_action_fixed_response != null ? [{}] : []
content {
content_type = each.value.lb_listener_rule_action_fixed_response.lb_listener_rule_action_fixed_response_content_type
message_body = each.value.lb_listener_rule_action_fixed_response.lb_listener_rule_action_fixed_response_message_body
}
}
}
condition {
dynamic "query_string" {
for_each = each.value.lb_listener_rule_condition_query_string != null ? [{}] : []
content {
key = each.value.lb_listener_rule_condition_query_string.lb_listener_rule_condition_query_string_key
value = each.value.lb_listener_rule_condition_query_string.lb_listener_rule_condition_query_string_value
}
}
}
}
resource "aws_lb" "lb" {
for_each = local.lb_map
name = lookup(each.value, "name", null)
enable_deletion_protection = lookup(each.value, "enable_deletion_protection", null)
internal = lookup(each.value, "internal", null)
ip_address_type = lookup(each.value, "ip_address_type", null)
load_balancer_type = lookup(each.value, "load_balancer_type", null)
}
Here is how the condition block looks when I run a terraform plan:
condition {
query_string {
value = "bar"
}
}
Here is how I would like the condition block to look:
condition {
query_string {
key = "health"
value = "check"
}
query_string {
value = "bar"
}
}
Your code, as complex as it is creates the two query_string correctly:
The code, in the form posted in the question has number of issues and it will not work. But then as you wrote that "My original files are very long. ", I guess that your real code actually works. Thus I only focused on its verification, rather then fixing all possible errors as I don't know what is your factual code.

How to define GCP log based metric with multiple labels using terraform , not able to define label_extractors block

resource "google_logging_metric" "logging_metric" {
for_each = { for inst in var.log_based_metrics : inst.name => inst }
name = each.value.name
filter = each.value.filter
metric_descriptor {
metric_kind = each.value.metric_kind
value_type = each.value.value_type
dynamic "labels" {
for_each = each.value.labels
content {
key = labels.value["label_key"]
value_type = labels.value["label_value_type"]
description = labels.value["label_description"]
}
}
display_name = each.value.display_name
}
label_extractors = {
<< How to define multiple label_keys with label extractors >>
}
}
I have tried for_each but it was referring to first for_each in the resource and we cant use dynamic as it creates multiple label_extractors blocks, which is not intended.
My variables file :
log_based_metrics = [
{
name = "name1",
filter = "something"
metric_kind = "DELTA",
value_type = "INT64",
labels = [
{
label_key = "deployment",
label_value_type = "STRING",
label_description = "deployment",
label_extractor = "REGEXP_EXTRACT(jsonPayload.involvedObject.name, \"(.*)-[^-]*-[^-]*$\")"
}
]
},
{
name = "name2",
filter = "something",
metric_kind = "DELTA",
value_type = "INT64",
labels = [
{
label_key = "deployment",
label_value_type = "STRING",
label_description = "deployment",
label_extractor = "REGEXP_EXTRACT(jsonPayload.involvedObject.name, \"(.*)-[^-]*-[^-]*$\")"
}
]
},
{
name = "name3",
filter = "something",
metric_kind = "DELTA",
value_type = "INT64",
labels = [
{
label_key = "deployment",
label_value_type = "STRING",
label_description = "deployment",
label_extractor = "REGEXP_EXTRACT(jsonPayload.involvedObject.name, \"(.*)-[^-]*-[^-]*$\")"
}
]
}
]
label_extractor should be something like this :(label_extractor ,label_key from each label block)
for example :
labels {
key = "mass"
value_type = "STRING"
description = "amount of matter"
}
labels {
key = "sku"
value_type = "INT64"
description = "Identifying number for item"
}
display_name = "My metric"
}
value_extractor = "EXTRACT(jsonPayload.request)"
label_extractors = {
"mass" = "EXTRACT(jsonPayload.request)"
"sku" = "EXTRACT(jsonPayload.id)"
}
label_extractors is an attribute, not a block. So you have to just create a map that you want. I don't know exact structure of your input data (not shown in the question), but it would be something as follows (rough example):
label_extractors = { for val in each.value.labels: val.label_key => val.label_extractor }

Terraform dynamodb error - all attributes must be indexed

I am trying to create a simple dynamodb table using following reource modules of terraform.
Get the following error while running terraform:
All attributes must be indexed. Unused attributes: ["pactitle" "ipadress" "Timestamp"].
why do we need to index all attributes ?
How to solve this ?
resource "aws_dynamodb_table" "this" {
count = var.create_table ? 1 : 0
name = var.name
billing_mode = var.billing_mode
hash_key = var.hash_key
range_key = var.range_key
read_capacity = var.read_capacity
write_capacity = var.write_capacity
//stream_enabled = var.stream_enabled
//stream_view_type = var.stream_view_type
dynamic "attribute" {
for_each = var.attributes
content {
name = attribute.value.name
type = attribute.value.type
}
}
server_side_encryption {
enabled = var.server_side_encryption_enabled
kms_key_arn = var.server_side_encryption_kms_key_arn
}
tags = merge(
var.tags,
{
"Name" = format("%s", var.name)
},
)
timeouts {
create = lookup(var.timeouts, "create", null)
delete = lookup(var.timeouts, "delete", null)
update = lookup(var.timeouts, "update", null)
}
}
calling module
module "dynamodb_table" {
source = "./../../../modules/dynamodb"
name = "pack-audit-cert"
hash_key = "id"
create_table= true
read_capacity=5
write_capacity=5
billing_mode = "PROVISIONED"
range_key = "pacid"
attributes = [
{
name = "id"
type = "N"
},
{
name = "pacid"
type = "S"
},
{
name = "pactitle"
type = "S"
},
{
name = "ipadress"
type = "S"
},
{
name = "Timestamp"
type = "S"
}
]
}
Thank you
That error message is a bit misleading. You should only define the indexed attributes when you are creating the table. Since DynamoDB is a schemaless database, it doesn't care about the other attributes at table creation time.