Add values to deep nested map in Terraform - amazon-web-services

I'm using the AWS EKS module 18.20.5 and I'm trying to add values to a deeply nested map. The map is
variable "eks_managed_node_groups" {
description = "Map of managed node group definitions to create"
type = any
default = {
management_cluster_on_demand = {
desired_capacity = 3
max_capacity = 10
min_capacity = 3
instance_types = ["c5.2xlarge"]
capacity_type = "ON_DEMAND"
k8s_labels = {
Environment = "testing"
GithubRepo = "infrastructure-modules-kubernetes-cluster"
GithubSource = "terraform-aws-modules"
}
additional_tags = {
cluster = "management_cluster_new"
}
block_device_mappings = {
xvda = {
device_name = "/dev/xvda"
ebs = {
volume_size = 50
volume_type = "gp2"
delete_on_termination = true
}
}
}
}
}
}
What I am aiming to do is add some extra values into the ebs section, specifically
encrypted = true
kms_key_id = module.kms.arn
This would force any volumes added to a node group, to have their EBS volume encrypted with a KMS key.
I've tried using locals to add the values, but the issue is when I get to the xbda section, it tries to loop through the strings and fails
locals {
managed_nodes = flatten([
for group in var.eks_managed_node_groups: [
for vol in group.block_device_mappings: [
for settings in vol: [
for values in settings: values
]
]
]
])
}
Which when running Terraform plan, results in the following error
│ Error: Iteration over non-iterable value
│
│ on main.tf line 9, in locals:
│ 8: for settings in vol: [
│ 9: for values in settings: values
│ 10: ]
│
│ A value of type string cannot be used as the collection in a 'for' expression.
Is this even possible to accomplish?
Thanks.

I think the following should do the job:
locals {
eks_managed_node_groups = {
for group_name, group in var.eks_managed_node_groups:
group_name => merge(group, {block_device_mappings = {
for device_name, device in group.block_device_mappings:
device_name => merge(device,
{ebs=merge(device.ebs, {
encrypted = true
kms_key_id = "module.kms.arn"
})})
}})
}
}
resulting in:
{
"management_cluster_on_demand" = {
"additional_tags" = {
"cluster" = "management_cluster_new"
}
"block_device_mappings" = {
"xvda" = {
"device_name" = "/dev/xvda"
"ebs" = {
"delete_on_termination" = true
"encrypted" = true
"kms_key_id" = "module.kms.arn"
"volume_size" = 50
"volume_type" = "gp2"
}
}
}
"capacity_type" = "ON_DEMAND"
"desired_capacity" = 3
"instance_types" = [
"c5.2xlarge",
]
"k8s_labels" = {
"Environment" = "testing"
"GithubRepo" = "infrastructure-modules-kubernetes-cluster"
"GithubSource" = "terraform-aws-modules"
}
"max_capacity" = 10
"min_capacity" = 3
}
}
I don't have your module.kms.arn, so I just use it as string "module.kms.arn". So you have to change it back to module.kms.arn.

Related

How can I create several ebs volumes each of a different size using a map in terraform?

I have a terraform tfvars file with a map of values that looks like this:
name_map = [
{
name = "devbox"
device_names = ["/dev/xvdg", "/dev/xvdh"]
volume_size = ["900", "200"]
group = "hosts"
instance_type = "m5a.2xlarge"
},
{
name = "devbox2"
device_names = ["/dev/xvdg", "/dev/xvdh"]
volume_size = ["300", "200"]
group = "hosts"
instance_type = "m5a.2xlarge"
}
]
]
My tf file looks like this:
resource "aws_instance" "node" {
count = length(var.name_map)
dynamic "ebs_block_device" {
for_each = [for device in var.name_map.*.device_names[count.index] : {
device_name = device,
volume_size = var.name_map.*.volume_size[count.index]
}]
content {
device_name = ebs_block_device.value.device_name
volume_type = "gp2"
volume_size = ebs_block_device.value.volume_size
delete_on_termination = true
}
}
So basically for the "devbox" instance I'd like "/dev/xvdg" to be 900 gbs, and "/dev/xvdh" to be 200 gbs. I'd like The current setup works to iterate through the device names of each mapping and get a single volume size but I'm trying to expand it to include different volume sizes for each device.
How would I do this?
I've tried a nested for_each statement but I keep getting errors. Would a flatten structure be the solution here? I'd love to see an example of what this would look like.
I think what you want to do is the following:
resource "aws_instance" "node" {
count = length(var.name_map)
instance_type = var.name_map[count.index].instance_type
ami = "..." # Fill in a valid AMI here
dynamic "ebs_block_device" {
for_each = [for i, device in var.name_map[count.index].device_names : {
device_name = device,
volume_size = var.name_map[count.index].volume_size[i]
}]
content {
device_name = ebs_block_device.value.device_name
volume_type = "gp2"
volume_size = ebs_block_device.value.volume_size
delete_on_termination = true
}
}
}
While this works, I suggest you do the following:
variable "name_map" {
default = [
{
name = "devbox"
devices = [
{
device_name = "/dev/xvdg",
volume_size = 900
},
{
device_name = "/dev/xvdh",
volume_size = 200
}
]
group = "hosts"
instance_type = "m5a.2xlarge"
},
{
name = "devbox2"
devices = [
{
device_name = "/dev/xvdg",
volume_size = 900
},
{
device_name = "/dev/xvdh",
volume_size = 200
}
]
group = "hosts"
instance_type = "m5a.2xlarge"
}
]
}
Note, the device_name and the volume_size are grouped together. Now we can use a simple foor loop where we don't have to rely on indexing:
resource "aws_instance" "node" {
count = length(var.name_map)
instance_type = var.name_map[count.index].instance_type
ami = "..." # fill in a valid AMI name
dynamic "ebs_block_device" {
# Notice the i variable (index) was dropped here
for_each = [for device in var.name_map[count.index].devices : {
device_name = device.device_name,
volume_size = device.volume_size
}]
content {
device_name = ebs_block_device.value.device_name
volume_type = "gp2"
volume_size = ebs_block_device.value.volume_size
delete_on_termination = true
}
}
}
I would nest your map further to create something like this:
name_map = [
{
name = "devbox"
root_block_device = {
...settings
}
ebs_block_devices = toSet([
{
name = "/dev/xvdg"
size = "900"
},{
name = "/dev/xvdh"
size = "200"
}
])
group = "hosts"
instance_type = "m5a.2xlarge"
},
...
]
and then in your resource code you can loop over the set for each instance:
resource "aws_instance" "instance" {
count = length(var.name_map)
...
root_block_device {
...settings from var.name_map[count.index].root_block_device
}
dynamic "ebs_block_device" {
for_each = var.name_map[count.index].ebs_block_devices
content {
device_name = ebs_block_device.value.name
volume_size = ebs_block_device.value.size
}
}
}
If you want the root volume to persist post termination I would suggest adding an EBS root volume, otherwise you can ignore the root_block_device and it will create an ephemeral device that contains the image.

Terraform, dynamic attach multiple disk to 1 vps

My problem is that I can't dynamically connect the created disks to the vps. The google_compute_disk_attach module cannot be used
Here is my code
What is the correct way in this situation?
resource "google_compute_instance" "vps" {
name = var.server_name
description = var.server_description
machine_type = var.server_type
zone = var.server_datacenter
deletion_protection = var.server_delete_protection
labels = var.server_labels
metadata = var.server_metadata
tags = var.server_tags
boot_disk {
auto_delete = false
initialize_params {
size = var.boot_volume_size
type = var.boot_volume_type
image = var.boot_volume_image
labels = var.boot_volume_labels
}
}
dynamic "attached_disk" {
for_each = { for vol in var.volumes : vol.volume_name => vol }
content {
source = element(var.volumes[*].volume_name, 0)
}
}
network_interface {
subnetwork = var.server_network
access_config {
nat_ip = google_compute_address.static_ip.address
}
}
resource "google_compute_disk" "volume" {
for_each = { for vol in var.volumes : vol.volume_name => vol }
name = each.value.volume_name
type = each.value.volume_type
size = each.value.volume_size
zone = var.server_datacenter
labels = each.value.volume_labels
}
volumes variables
volumes = [{
volume_name = "v3-postgres-saga-import-test-storage"
volume_size = "40"
volume_type = "pd-ssd"
volume_labels = {
environment = "production"
project = "v3"
type = "storage"
}
}, {
volume_name = "volume-vpstest2"
volume_size = "20"
volume_type = "pd-ssd"
volume_labels = {
environment = "production"
project = "v2"
type = "storage"
}
}]
if do something like that - error
source = google_compute_disk.volume[*].self_link
This object does not have an attribute named "self_link".
Since you've used for_each in google_compute_disk.volume, it will be a map, not a list. Thus you can list all self_link as follows:
source = values(google_compute_disk.volume)[*].self_link
You can also use the volume variable directly as map instead of Array :
variables.tf file :
variable "volumes" {
default = {
postgres_saga = {
volume_name = "v3-postgres-saga-import-test-storage"
volume_size = "40"
volume_type = "pd-ssd"
volume_labels = {
environment = "production"
project = "v3"
type = "storage"
}
},
volume_vpstest2 = {
volume_name = "volume-vpstest2"
volume_size = "20"
volume_type = "pd-ssd"
volume_labels = {
environment = "production"
project = "v2"
type = "storage"
}
}
}
}
Instead of variable, you can also use a local variable from json configuration. Example of structure of Terraform module :
project
module
main.tf
locals.tf
resource
volumes.json
volumes.json file
{
"volumes": {
"postgres_saga" : {
"volume_name" : "v3-postgres-saga-import-test-storage"
"volume_size" : "40"
"volume_type" : "pd-ssd"
"volume_labels" : {
"environment" : "production"
"project" : "v3"
"type" : "storage"
}
},
"volume_vpstest2" : {
"volume_name" : "volume-vpstest2"
"volume_size" : "20"
"volume_type" : "pd-ssd"
"volume_labels" : {
"environment" : "production"
"project" : "v2"
"type" : "storage"
}
}
}
}
locals.tf file
locals {
tables = jsondecode(file("${path.module}/resource/volumes.json"))["volumes"]
}
main.tf file :
resource "google_compute_instance" "vps" {
name = var.server_name
description = var.server_description
machine_type = var.server_type
zone = var.server_datacenter
deletion_protection = var.server_delete_protection
labels = var.server_labels
metadata = var.server_metadata
tags = var.server_tags
boot_disk {
auto_delete = false
initialize_params {
size = var.boot_volume_size
type = var.boot_volume_type
image = var.boot_volume_image
labels = var.boot_volume_labels
}
}
dynamic "attached_disk" {
for_each = [
var.volumes
# local.volumes
]
content {
source = attached_disk.value["volume_name"]
}
}
network_interface {
subnetwork = var.server_network
access_config {
nat_ip = google_compute_address.static_ip.address
}
}
}
resource "google_compute_disk" "volume" {
for_each = var.volumes
# local.volumes
name = each.value["volume_name"]
type = each.value["volume_type"]
size = each.value["volume_size"]
zone = var.server_datacenter
labels = each.value["volume_labels"]
}
With a map, you can directly use foreach without any transformation on google_compute_disk/volume resource.
You can also use this map in a dynamic bloc.

Access all resource created by for_each in other for loop Terraform

I want to create multiple acm certificates and route53 records for its validation, just cant figure out how can I reference to all acm resources created by for_each in route53 resource block which is looping in acm resource to get all the DNS _validation attributes, code is working fine if I will set one cert in variable and reference it directly with name, but how can I loop to all domain names for referencing it in for loop?
Issue is this line
for dvo in aws_acm_certificate.web[for i in keys(var.certificates) : i]
which is returning
The index operator must end with a closing bracket ("]").
Adding a second bracket like this
for dvo in aws_acm_certificate.web[[for i in keys(var.certificates) : i]]
returns error
│
│ on main.tf line 21, in resource "aws_route53_record" "domain_validation":
│ 21: for dvo in aws_acm_certificate.web[[for i in keys(var.certificates) : i]].domain_validation_options : dvo.domain_name => {
│ ├────────────────
│ │ aws_acm_certificate.web is object with 2 attributes
│ │ var.certificates is object with 2 attributes
│
│ The given key does not identify an element in this collection value: string
│ required.```
resource "aws_acm_certificate" "web" {
for_each = var.certificates
domain_name = "${replace(each.key, var.search_period, var.replace_period)}"
subject_alternative_names = each.value.subject_alternative_names
validation_method = "DNS"
}
resource "aws_route53_record" "domain_validation" {
for_each = var.dns_validation ? {
for dvo in aws_acm_certificate.web[[for i in keys(var.certificates) : i]].domain_validation_options : dvo.domain_name => {
name = dvo.resource_record_name
record = dvo.resource_record_value
type = dvo.resource_record_type
}
} : {}
allow_overwrite = true
name = each.value.name #aws_acm_certificate.web[each.key].domain_validation_options.0.resource_record_name
records = [each.value.record] #aws_acm_certificate.web[each.key].domain_validation_options.0.resource_record_value
ttl = 60
type = each.value.type #aws_acm_certificate.web[each.key].domain_validation_options.0.resource_record_type
zone_id = data.aws_route53_zone.selected[[for i in keys(var.certificates) : i]].zone_id
}
var.certificates looks like this:
certificates = {
"xxx.com" = {
subject_alternative_names = ["*.xxx.com","*.dev.xxx.com","*.stage.xxx.com","*.preprod.xxx.com"]
},
"zzz.com" = {
subject_alternative_names = ["*.dev.zzz.com","*.zzz.com"]
},
}
aws_acm_certificate.web in console looks like this:
```
> aws_acm_certificate.web
{
"test.com" = {
"arn" = "arn:aws:acm:eu-west-1:584637875403:certificate/a6fed-01c6-4f2c-ad87-59c04877bd0b"
"certificate_authority_arn" = ""
"certificate_body" = tostring(null)
"certificate_chain" = tostring(null)
"domain_name" = "test.com"
"domain_validation_options" = toset([
{
"domain_name" = "*.test.com"
"resource_record_name" = "_d5e2266fa07c911501b806f3d19e.test.com."
"resource_record_type" = "CNAME"
"resource_record_value" = "_fcc7913c9269201f77625b7f71ec.ltyvprtsjl.acm-validations.aws."
},
{
"domain_name" = "*.dev.test.com"
"resource_record_name" = "_f2aa63aabaae8cd721bf0143dee6.dev.test.com."
"resource_record_type" = "CNAME"
"resource_record_value" = "_f461eca5849d2a3e218dea91955.ltyvprtsjl.acm-validations.aws."
},
{
"domain_name" = "*.preprod.test.com"
"resource_record_name" = "_2747174805245587c6f9811a1180.preprod.test.com."
"resource_record_type" = "CNAME"
"resource_record_value" = "_7d3ccdf1006b12074ebcbc9c3d1.ltyvprtsjl.acm-validations.aws."
},
{
"domain_name" = "*.stage.test.com"
"resource_record_name" = "_6f571d29f334dcccfe098a2371c.stage.test.com."
"resource_record_type" = "CNAME"
"resource_record_value" = "_9c9657a4839d827d1ff6db0ffd0.ltyvprtsjl.acm-validations.aws."
},
{
"domain_name" = "test.com"
"resource_record_name" = "_d5e2266fa07c91bad71501bd19e.test.com."
"resource_record_type" = "CNAME"
"resource_record_value" = "_fcc7913c926926efc05b7f71ec.ltyvprtsjl.acm-validations.aws."
},
])
"id" = "arn:aws:acm:eu-west-1:584637875403:certificate/a6fed5981c6-4f2c-ad87-59c04877bd0b"
"options" = tolist([
{
"certificate_transparency_logging_preference" = "ENABLED"
},
])
"private_key" = (sensitive)
"status" = "ISSUED"
"subject_alternative_names" = toset([
"*.test.com",
"*.dev.test.com",
"*.preprod.test.com",
"*.stage.test.com",
])
"tags" = tomap({})
"tags_all" = tomap({})
"validation_emails" = tolist([])
"validation_method" = "DNS"
}
}
```
You have to flatten your aws_acm_certificate.web first. For example:
locals {
certificate_web_flat = merge([
for hzone, certs in aws_acm_certificate.web: {
for domain_validation_option in certs.domain_validation_options:
"${hzone}-${domain_validation_option.domain_name}" => {
"hzone" = hzone
"domain_name" = domain_validation_option.domain_name
resource_record_name = domain_validation_option.resource_record_name
resource_record_value = domain_validation_option.resource_record_value
resource_record_type = domain_validation_option.resource_record_type
}
}
]...)
}
then
resource "aws_route53_record" "domain_validation" {
for_each = var.dns_validation ? local.certificate_web_flat : {}
allow_overwrite = true
name = each.value.resource_record_name
records = [each.value.resource_record_value]
ttl = 60
type = each.value.resource_record_type
zone_id = data.aws_route53_zone.selected[each.value.hzone].zone_id
}

terraform multiple aws route53 alias records under locals

I have a set of AliasRecords under terraform locals and wanted to map them under terraform's resource "aws_route53_record" . Below is the locals value:
locals {
AWSAliasRecordSets = [
{
Name = "api-dev.example.com.",
Type = "A",
AliasTarget = {
HostedZoneId = "EXAMPLE",
DNSName = "kjhskdjhf.cloudfront.net.",
EvaluateTargetHealth = false
}
},
{
Name = "api.example.com.",
Type = "A",
AliasTarget = {
HostedZoneId = "EXAMPLE",
DNSName = "jsdhgfjkdshf.cloudfront.net.",
EvaluateTargetHealth = false
}
}
]
}
What I am doing is :
locals {
FlatAWSAliasRecordSets = merge([
for idx, AWSAliasRecordSet in local.AWSAliasRecordSets:
{
for AliasTarget in AWSAliasRecordSet.AliasTarget:
"${idx}-${AliasTarget}" => {
HostedZoneId = AliasTarget["HostedZoneId"]
DNSName = AliasTarget["DNSName"]
EvaluateTargetHealth = AliasTarget["EvaluateTargetHealth"]
}
}
]...)
}
resource "aws_route53_record" "alias_records" {
for_each = local.FlatAWSAliasRecordSets
zone_id = each.value["HostedZoneId"]
name = each.value["AliasTarget"].Name
type = each.value["AliasTarget"].Type
alias {
zone_id = each.value["HostedZoneId"]
name = each.value["AliasTarget"].Name
evaluate_target_health = each.value["EvaluateTargetHealth"]
}
}
and when pushing to AWS ( terraform apply), it fails with below error:
│ on main.tf line 508, in locals:
│ 508: EvaluateTargetHealth = AliasTarget["EvaluateTargetHealth"]
│ This value does not have any indices.
Your AWSAliasRecordSets does not require flattening, as it is already flat. Thus you can go use regular count for it.
resource "aws_route53_record" "alias_records" {
count = length(local.AWSAliasRecordSets)
zone_id = local.AWSAliasRecordSets[count.index]["AliasTarget"].HostedZoneId
name = local.AWSAliasRecordSets[count.index].Name
type = local.AWSAliasRecordSets[count.index].Type
alias {
zone_id = local.AWSAliasRecordSets[count.index]["AliasTarget"].HostedZoneId
name = local.AWSAliasRecordSets[count.index].DNSName
evaluate_target_health = each.value["AliasTarget"].EvaluateTargetHealth
}
}
You also have to double check your use of Name and DNSName. Your current usage does not seem right to me, but this would be a new issue if this is really the case.

EKS Terraform scale using cpu

Using terraform to instantiate an eks. How to configure when to scale the nodes? I would like to customize if the cpu reaches 40% for example.
My module eks:
module "eks" {
......
worker_groups = [
{
name = "worker-group-1"
instance_type = "t3a.medium"
root_volume_size = "20"
asg_desired_capacity = 1
asg_max_size = 1
asg_recreate_on_change = true
kubelet_extra_args = "--node-labels=node.kubernetes.io/lifecycle=normal,instance_type=normal"
tags = [.....
]
}
]
worker_groups_launch_template = [
{
name = "spot-family-t-low"
override_instance_types = ["t3a.medium", "t2.medium","t3.medium"]
spot_instance_pools = 3
root_volume_size = "8"
asg_recreate_on_change = true
autoscaling_enabled = true
asg_max_size = 2
asg_desired_capacity = 1
kubelet_extra_args = "--node-labels=node.kubernetes.io/lifecycle=spot,type=t-low"
public_ip = false
},
]
workers_additional_policies = [aws_iam_policy.worker_policy.id]
}