Terraform Multilevel Maps with list throwing error - google-cloud-platform

I am getting below error when I execute terraform plan. I don't see any error when I use a single volume name in volume_name, I am facing this error when I specify multiple volume names in volume_name= ["test-terraform-0", "test-terraform-1", "test-terraform-2"]. I request you to help me correct my issue or suggest an alternate idea/solution achieve my goal
terraform plan -var-file=customers/nike.tfvars
│ Error: Incorrect attribute value type
│
│ on gcp_compute_disk/gcp_compute_disk.tf line 4, in resource "google_compute_disk" "disk":
│ 4: name = each.value.volume_name
│ ├────────────────
│ │ each.value.volume_name is list of string with 3 elements
│
│ Inappropriate value for attribute "name": string required.
╵
│ Error: Incorrect attribute value type
│
│ on gcp_compute_disk/gcp_compute_disk.tf line 5, in resource "google_compute_disk" "disk":
│ 5: size = each.value.volume_size
│ ├────────────────
│ │ each.value.volume_size is list of number with 3 elements
│
│ Inappropriate value for attribute "size": number required.
╷
│ Error: Incorrect attribute value type
│
│ on gcp_compute_disk/gcp_compute_disk.tf line 6, in resource "google_compute_disk" "disk":
│ 6: type = each.value.volume_type
│ ├────────────────
│ │ each.value.volume_type is list of string with 3 elements
│
│ Inappropriate value for attribute "type": string required.
folder structure
├── gcp_compute_disk
│   ├── gcp_compute_disk.tf
│   └── variables.tf
├── gcp_instance
│   ├── gcp_instance.tf
│   └── variables.tf
├── main.tf
├── customers
│   └── nike.tfvars
└── variables.tf
vairable.tf
variable "instance_config" {
type = map(object({
name = string
image = string
type = string
tags = list(string)
deletion_protection = bool
startup_script = string
hostname = string
volume_name = list(string)
volume_size = list(number)
volume_type = list(string)
}))
default = {
test_vm = {
name = "test_vm"
image = "debian-cloud/debian-9"
type = "n1-standard-4"
tags = ["test_vm"]
deletion_protection = false
startup_script = "start-up.sh"
hostname = "test_vm"
volume_name = ["test-terraform-0", "test-terraform-1", "test-terraform-2"]
volume_size = [50, 50, 50]
volume_type = ["pd-standard", "pd-standard", "pd-standard", ]
}
}
}
.tfvars
instance_config = {
testvm1 = {
name = "solr"
image = "debian-cloud/debian-9"
type = "n1-standard-4"
tags = ["testvm1"]
deletion_protection = false
startup_script = "../scripts/start-up.sh"
hostname = "testvm1.terraform.test"
volume_name = ["testvm1-test-terraform-0", "testvm1-test-terraform-1", "testvm1-test-terraform-2"]
volume_size = [50, 50, 50]
volume_type = ["pd-standard", "pd-standard", "pd-standard", ]
},
testvm2 = {
name = "testvm2"
image = "debian-cloud/debian-9"
type = "f1-micro"
tags = ["testvm2"]
deletion_protection = false
startup_script = "../scripts/start-up.sh"
hostname = "testvm2.terraform.test"
volume_name = ["testvm2-test-terraform-0", "testvm2-test-terraform-1", "testvm2-test-terraform-2"]
volume_size = [50, 50, 50]
volume_type = ["pd-standard", "pd-standard", "pd-standard", ]
}
}
gcp_compute_disk.tf
resource "google_compute_disk" "disk" {
for_each = var.instance_config
name = each.value.volume_name
size = each.value.volume_size
type = each.value.volume_type
}
gcp_instance.tf
resource "google_compute_instance" "vm_instance" {
for_each = var.instance_config
name = each.value.name
machine_type = each.value.type
tags = each.value.tags
deletion_protection = each.value.deletion_protection
hostname = each.value.hostname
boot_disk {
initialize_params {
image = each.value.image
}
}
metadata_startup_script = file(each.value.startup_script)
attached_disk {
source = each.value.volume_name
}
network_interface {
network = var.gcp_network
}
}

You can check the following:
locals {
instance_volume_map = merge([for key, val in var.instance_config:
{
for idx in range(length(val.volume_size)):
"${key}-${idx}" => {
volume_name = val.volume_name[idx]
volume_size = val.volume_size[idx]
volume_type = val.volume_type[idx]
}
}
]...)
}
resource "google_compute_disk" "disk" {
for_each = local.instance_volume_map
name = each.value.volume_name
size = each.value.volume_size
type = each.value.volume_type
}
resource "google_compute_instance" "vm_instance" {
for_each = var.instance_config
name = each.value.name
machine_type = each.value.type
tags = each.value.tags
deletion_protection = each.value.deletion_protection
hostname = each.value.hostname
boot_disk {
initialize_params {
image = each.value.image
}
}
metadata_startup_script = file(each.value.startup_script)
dynamic "attached_disk" {
for_each = {for idx, val in range(length(each.value.volume_name)):
idx => val}
content {
source = local.instance_volume_map["${each.key}-${attached_disk.key}"].volume_name
}
}
network_interface {
network = var.gcp_network
}
}

Related

Facing Issue with same variable name "node_config" for two different resource while creating a module

Module giving "Reference to undeclared resource" when I am creating a Google Container cluster and I am using two resources with same dynamic input value "node_config"
This is my main.tf file
resource "google_container_cluster" "primary" {
name = var.name
location = var.location
description = var.description
project = var.project
# We can't create a cluster with no node pool defined, but we want to only use
# separately managed node pools. So we create the smallest possible default
# node pool and immediately delete it.
# default_max_pods_per_node = var.default_max_pods_per_node
enable_tpu = var.enable_tpu
enable_shielded_nodes = var.enable_shielded_nodes
enable_legacy_abac = var.enable_legacy_abac
enable_kubernetes_alpha = var.enable_kubernetes_alpha
enable_intranode_visibility = var.enable_intranode_visibility
node_locations = var.node_locations
resource_labels = var.resource_labels
remove_default_node_pool = var.remove_default_node_pool
initial_node_count = var.initial_node_count
dynamic "node_config" {
for_each = var.node_config
content {
disk_size_gb = node_config.value["disk_size_gb"]
disk_type = node_config.value["disk_type"]
image_type = node_config.value["image_type"]
labels = node_config.value["labels"]
local_ssd_count = node_config.value["local_ssd_count"]
machine_type = node_config.value["machine_type"]
metadata = node_config.value["metadata"]
min_cpu_platform = node_config.value["min_cpu_platform"]
oauth_scopes = node_config.value["oauth_scopes"]
preemptible = node_config.value["preemptible"]
dynamic "shielded_instance_config" {
for_each = node_config.value.shielded_instance_config
content {
enable_integrity_monitoring = shielded_instance_config.value["enable_integrity_monitoring"]
enable_secure_boot = shielded_instance_config.value["enable_secure_boot"]
}
}
tags = node_config.value["tags"]
}
}
}
resource "google_container_node_pool" "primary_preemptible_nodes" {
name = var.nodepool_name
location = var.location
project = var.project
cluster = google_container_cluster.primary.name
node_count = var.nodepool_node_count
node_locations = var.node_locations
dynamic "node_config" {
for_each = var.ndpool_node_config
content {
disk_size_gb = ndpool_node_config.value["disk_size_gb"]
disk_type = ndpool_node_config.value["disk_type"]
preemptible = ndpool_node_config.value["preemptible"]
image_type = ndpool_node_config.value["image_type"]
machine_type = ndpool_node_config.value["machine_type"]
oauth_scopes = ndpool_node_config.value["oauth_scopes"]
}
}
}
This my module definition inside module folder
module "google_container_cluster" {
source = "../"
name = var.name
location = var.location
description = var.description
project = var.project
# We can't create a cluster with no node pool defined, but we want to only use
# separately managed node pools. So we create the smallest possible default
# node pool and immediately delete it.
# default_max_pods_per_node = var.default_max_pods_per_node
enable_tpu = var.enable_tpu
enable_shielded_nodes = var.enable_shielded_nodes
enable_legacy_abac = var.enable_legacy_abac
enable_kubernetes_alpha = var.enable_kubernetes_alpha
enable_intranode_visibility = var.enable_intranode_visibility
node_locations = var.node_locations
resource_labels = var.resource_labels
remove_default_node_pool = var.remove_default_node_pool
initial_node_count = var.initial_node_count
node_config = var.node_config
nodepool_name = var.nodepool_name
nodepool_node_count = var.nodepool_node_count
}
My terraform.tfvars are as follows
name = "tf-gcp-cluster"
location = "us-central1-c"
description = "Cluster Creation using TF"
project = "gcp-terraform-prjt"
#default_max_pods_per_node = ""
enable_tpu = false
enable_shielded_nodes = false
enable_legacy_abac = false
enable_kubernetes_alpha = false
enable_intranode_visibility = false
node_locations = []
resource_labels = {
"test" = "tftestgcp"
}
remove_default_node_pool = true
initial_node_count = 1
node_config = [{
disk_size_gb = "10"
disk_type = "pd-standard"
image_type = "cos_containerd"
labels = {
"test" = "tf-container-cluster"
}
local_ssd_count = "0"
machine_type = "e2-micro"
metadata = {
"key" = "value"
}
min_cpu_platform = ""
oauth_scopes = [
"https://www.googleapis.com/auth/cloud-platform"
]
preemptible = true
shielded_instance_config = [{
enable_integrity_monitoring = false
enable_secure_boot = false
}]
tags = ["value"]
}]
nodepool_name = "tf-nodepool"
nodepool_node_count = 1
ndpool_node_config = [{
disk_size_gb = 10
disk_type = "pd-standard"
image_type = "cos_container"
machine_type = "e2-micro"
oauth_scopes = [
"https://www.googleapis.com/auth/cloud-platform"
]
preemptible = false
}]
I have supplied value for both "node_config" and "ndpool_node_config" but for some reason it is giving me following error when i run terraform plan command
╷
│ Error: Reference to undeclared resource
│
│ on ../main.tf line 57, in resource "google_container_node_pool" "primary_preemptible_nodes":
│ 57: disk_size_gb = ndpool_node_config.value["disk_size_gb"]
│
│ A managed resource "ndpool_node_config" "value" has not been declared in module.google_container_cluster.
╵
╷
│ Error: Reference to undeclared resource
│
│ on ../main.tf line 58, in resource "google_container_node_pool" "primary_preemptible_nodes":
│ 58: disk_type = ndpool_node_config.value["disk_type"]
│
│ A managed resource "ndpool_node_config" "value" has not been declared in module.google_container_cluster.
╵
╷
│ Error: Reference to undeclared resource
│
│ on ../main.tf line 59, in resource "google_container_node_pool" "primary_preemptible_nodes":
│ 59: preemptible = ndpool_node_config.value["preemptible"]
│
│ A managed resource "ndpool_node_config" "value" has not been declared in module.google_container_cluster.
╵
╷
│ Error: Reference to undeclared resource
│
│ on ../main.tf line 60, in resource "google_container_node_pool" "primary_preemptible_nodes":
│ 60: image_type = ndpool_node_config.value["image_type"]
│
│ A managed resource "ndpool_node_config" "value" has not been declared in module.google_container_cluster.
╵
╷
│ Error: Reference to undeclared resource
│
│ on ../main.tf line 61, in resource "google_container_node_pool" "primary_preemptible_nodes":
│ 61: machine_type = ndpool_node_config.value["machine_type"]
│
│ A managed resource "ndpool_node_config" "value" has not been declared in module.google_container_cluster.
╵
╷
│ Error: Reference to undeclared resource
│
│ on ../main.tf line 62, in resource "google_container_node_pool" "primary_preemptible_nodes":
│ 62: oauth_scopes = ndpool_node_config.value["oauth_scopes"]
│
│ A managed resource "ndpool_node_config" "value" has not been declared in module.google_container_cluster.
I want the value to be passed down as i have defined them, if i remove it created the node_pool with default values rather than the values defined by me.

Terraform loop over list of objects in dynamic block issue

I am trying to create a storage bucket in GCP using Terraform. Please see the below implementation and the .tfvars snippet foe the same
implementation logic
`
resource "google_storage_bucket" "cloud_storage" {
for_each = {for gcs in var.storage_buckets : gcs.name => gcs}
name = each.value.name
location = lookup(each.value, "location", "AUSTRALIA-SOUTHEAST1")
project = data.google_project.existing_projects[each.value.project].project_id
force_destroy = lookup(each.value, "force_destroy", false)
storage_class = lookup(each.value, "storage_class", "STANDARD")
labels = merge(
lookup(each.value, "labels", {}),
{
managed_by = "terraform"
}
)
dynamic "versioning" {
for_each = [for version in [lookup(each.value, "versioning", null)] : version if version != null]
content {
enabled = lookup(versioning.value, "enabled", true)
}
}
dynamic "lifecycle_rule" {
for_each = [for rule in [lookup(each.value, "lifecycle_rule", toset([]))] : rule if length(rule) != 0]
content {
action {
type = lifecycle_rule.value.action.type
storage_class = lookup(lifecycle_rule.value.action, "storage_class", null)
}
condition {
# matches_suffix = lookup(lifecycle_rule.value["condition"], "matches_suffix", null)
age = lookup(lifecycle_rule.value.condition, "age", null)
}
}
}
uniform_bucket_level_access = lookup(each.value, "uniform_bucket_level_access", false)
depends_on = [
data.google_project.existing_projects
]
}
.tfvars snippet
storage_buckets = [
# this 1st bucket is only defined in DEV tf vars. reason: this bucket is a onetime creation for all DWH cloud artifacts under ecx-cicd-tools project.
{
name = "ecx-dwh-artefacts"
localtion = "AUSTRALIA-SOUTHEAST1"
force_destroy = false
project = "ecx-cicd-tools"
storage_class = "STANDARD"
versioning = {
enabled = false
}
labels = {
app = "alation"
project = "resetx"
team = "dwh"
}
uniform_bucket_level_access = false
folders = ["alation/","alation/packages/","alation/packages/archive/",
"alation/backups/","alation/backups/data/","alation/backups/data/DEV/","alation/backups/data/PROD/"]
lifecycle_rule = [
{
action = {
type = "Delete"
}
condition = {
age = "10"
}
},
]
}
,
{
name = "eclipx-dwh-dev"
localtion = "AUSTRALIA-SOUTHEAST1"
force_destroy = false
project = "eclipx-dwh-dev"
storage_class = "STANDARD"
versioning = {}
labels = {
app = "dataflow"
project = "resetx"
team = "dwh"
}
uniform_bucket_level_access = false
folders = ["Data/","Data/stagingCustomDataFlow/","Data/temp/","Data/templatesCustomDataFlow/"]
lifecycle_rule = []
}
]
`
Some have I am unable to make the dynamic block working in the bucket provision logic for the lifecycle_rule section, I am passing a list of objects from .tfvars as I need to be able to add many rules to the same bucket.
It looks like the foreach loop is not iterating over the list of objects in the lifecycle_rule of .tfvars
Below are the errors its throwing. Can someone please assist.
Error: Unsupported attribute
│
│ on storage.tf line 56, in resource "google_storage_bucket" "cloud_storage":
│ 56: type = lifecycle_rule.value.action.type
│ ├────────────────
│ │ lifecycle_rule.value is list of object with 1 element
│
│ Can't access attributes on a list of objects. Did you mean to access attribute "action" for a specific element of the list, or across all elements of the list?
╵
╷
│ Error: Unsupported attribute
│
│ on storage.tf line 57, in resource "google_storage_bucket" "cloud_storage":
│ 57: storage_class = lookup(lifecycle_rule.value.action, "storage_class", null)
│ ├────────────────
│ │ lifecycle_rule.value is list of object with 1 element
│
│ Can't access attributes on a list of objects. Did you mean to access attribute "action" for a specific element of the list, or across all elements of the list?
╵
╷
│ Error: Unsupported attribute
│
│ on storage.tf line 61, in resource "google_storage_bucket" "cloud_storage":
│ 61: age = lookup(lifecycle_rule.value.condition, "age", null)
│ ├────────────────
│ │ lifecycle_rule.value is list of object with 1 element
│
│ Can't access attributes on a list of objects. Did you mean to access attribute "condition" for a specific element of the list, or across all elements of the list?
Thank you.
I am expecting it that the dynamic block loop over lifecycle_rule
Your for_each is incorrect. It should be:
dynamic "lifecycle_rule" {
for_each = length(each.value["lifecycle_rule"]) != 0 ? each.value["lifecycle_rule"] : []
content {
action {
type = lifecycle_rule.value.action.type
storage_class = lookup(lifecycle_rule.value.action, "storage_class", null)
}
condition {
# matches_suffix = lookup(lifecycle_rule.value["condition"], "matches_suffix", null)
age = lookup(lifecycle_rule.value.condition, "age", null)
}
}

read_replicas_mode & replica_count variables are not expected in google_redis_instance

We have used the memory store module for spinning up a redis instance on GCP. However, at a later stage, when we try to enable READ REPLICA via terraform it doesn't accept.
References:
https://github.com/terraform-google-modules/terraform-google-memorystore
module "memorystore_replica" {
source = "terraform-google-modules/memorystore/google"
version = "4.0.0"
redis_version = "REDIS_6_X"
name = "${module.project.project_name}-redisreadreplica-${module.locations.region_codes[var.region]}-instance"
project = module.project.project_id
region = var.region
authorized_network = local.vpc
connect_mode = var.redis_connection_type
memory_size_gb = var.redis_memory_size_gb
transit_encryption_mode = "DISABLED"
labels = var.labels
replica_count = 2
read_replicas_mode = "READ_REPLICAS_ENABLED"
}
resource "google_redis_instance" "default" {
depends_on = [module.enable_apis]
project = var.project
name = var.name
tier = var.tier
memory_size_gb = var.memory_size_gb
connect_mode = var.connect_mode
region = var.region
location_id = var.location_id
alternative_location_id = var.alternative_location_id
authorized_network = var.authorized_network
redis_version = var.redis_version
redis_configs = var.redis_configs
display_name = var.display_name
reserved_ip_range = var.reserved_ip_range
labels = var.labels
auth_enabled = var.auth_enabled
transit_encryption_mode = var.transit_encryption_mode
}
Error: Unsupported argument
│
│ on main.tf line 357, in module "memorystore_replica":
│ 357: replica_count = 2
│
│ An argument named "replica_count" is not expected here.
╵
╷
│ Error: Unsupported argument
│
│ on main.tf line 358, in module "memorystore_replica":
│ 358: read_replicas_mode = "READ_REPLICAS_ENABLED"
│
│ An argument named "read_replicas_mode" is not expected here
Any help appreciated!

Declaring nested variables in Terraform for aws_instance resource

I am having issues declaring my nested variables. I have a .tfvars for my variables, then I declare it in varaibles.tf, and lastly, I have my aws instance resource looping through my variables. My issue is that my nested volume blocks in my .tfvars are not being declared and coming up as an error.
terraform.tfvar
instances = {
instance-01a = {
ami = "ami-abcdefghijk1234",
type = "t3.medium"
key_name = "caws-dc-01"
private_ip = "10.0.0.5"
subnet_id = "subnet-abcdefghijk1234" #Temp
vpc_security_group_ids = ["sg-abcdefghijk1234"] #Temp
root_block_device = {
device_name = "/dev/sda1"
volume_size = 100
volume_type = "gp2"
tags = {
Name = "instance-01a /dev/sda1"
}
}
ebs_block_device = {
device_name = "/dev/sdf"
volume_size = 50
volume_type = "gp2"
tags = {
Name = "instance-01a /dev/sdf"
}
}
disable_api_termination = true
tags = {
Name = "instance-01a"
}
}
variable.tf
variable "instances" {}
instance.tf
resource "aws_instance" "specific_instances" {
for_each = var.instances
ami = each.value["ami"]
instance_type = each.value["type"]
key_name = each.value["key_name"]
private_ip = each.value["private_ip"]
subnet_id = each.value["subnet_id"]
vpc_security_group_ids = each.value["vpc_security_group_ids"]
root_block_device = each.value["root_block_device"]
ebs_block_device = each.value["ebs_block_device"]
disable_api_termination = each.value["disable_api_termination"]
tags = each.value["tags"]
}
Error Message
╷
│ Error: Unsupported argument
│
│ on instance.tf line 13, in resource "aws_instance" "specific_instances":
│ 13: root_block_device = each.value["root_block_device"]
│
│ An argument named "root_block_device" is not expected here. Did you mean to define a block of type
│ "root_block_device"?
╵
╷
│ Error: Unsupported argument
│
│ on instance.tf line 14, in resource "aws_instance" "specific_instances":
│ 14: ebs_block_device = each.value["ebs_block_device"]
│
│ An argument named "ebs_block_device" is not expected here. Did you mean to define a block of type
│ "ebs_block_device"?
╵
ebs_block_device is a block not an argument. So it means you have to do the following:
resource "aws_instance" "specific_instances" {
for_each = var.instances
# rest
# ....
ebs_block_device {
device_name = each.value["ebs_block_device"]["device_name"]
volume_size = each.value["ebs_block_device"]["volume_size"]
volume_type = each.value["ebs_block_device"]["volume_type"]
tags = each.value["ebs_block_device"]["tags"]
}
}

Terraform workspaces creation

I am trying to write a terraform code for creating workspaces and will be using the same for future creation as well. I am facing an issue while referencing the bundle_ids since there are multiple bundles available and it changes according to the req. each time. if someone can suggest a better approach to this.
resource "aws_workspaces_workspace" "this" {
directory_id = var.directory_id
for_each = var.workspace_user_names
user_name = each.key
bundle_id = [local.bundle_ids["${each.value}"]]
root_volume_encryption_enabled = true
user_volume_encryption_enabled = true
volume_encryption_key = var.volume_encryption_key
workspace_properties {
user_volume_size_gib = 50
root_volume_size_gib = 80
running_mode = "AUTO_STOP"
running_mode_auto_stop_timeout_in_minutes = 60
}
tags = var.tags
}
terraform.tfvars
directory_id = "d-xxxxxxx"
##Add the Workspace Username & bundle_id;
workspace_user_names = {
"User1" = "n"
"User2" = "y"
"User3" = "k"
}
locals.tf
locals {
bundle_ids = {
"n" = "wsb-nn"
"y" = "wsb-yy"
"k" = "wsb-kk"
}
}
Terraform plan
Error: Incorrect attribute value type
│
│ on r_aws_workspaces.tf line 8, in resource "aws_workspaces_workspace" "this":
│ 8: bundle_id = [local.bundle_ids["${each.value}"]]
│ ├────────────────
│ │ each.value will be known only after apply
│ │ local.bundle_ids is object with 3 attributes
│
│ Inappropriate value for attribute "bundle_id": string required.
At the movement you have a list, but it should be string. Assuming everything else is correct, the following should address your error:
bundle_id = local.bundle_ids[each.value]