I am writing terraform script to automate the provision of acm for domains, the issue that I am facing is how can I merge the domain and subject_alternative_names like it should pick first domain from domain_name and merge it with first block in subject_alternative_name and go on.
Variable.tf
variable "domain_name" {
description = "Configuration for alb settings"
default = [
"domain.com",
"helloworld.com",
"helloworld2.com",
]
}
variable "subject_alternative_names" {
description = "subject_alternative_names"
default = [ {
domain.com = {
"domain.com",
"domain2.com",
"domain3.com",
},
helloworld.com = {
"helloworld1.com",
"helloworld2.com"
},
hiworld.com = {
"hiworld1.com",
"hiworld2.com"
}
}]
}
variable "region" {
description = "name of the region"
default = "us-east-1"
}
variable "validation_method" {
description = "name of the region"
default = "DNS"
}
variable "tags" {
description = "name of the region"
default = "Test"
}
working variable.tf
variable "domain_name" {
description = "Configuration for alb settings"
default = [
"domain.com",
"helloworld.com",
"helloworld2.com",
"helloworld1.com",
"helloworld3.com",
]
}
variable "subject_alternative_names"{
description = "subject_alternative_names"
default = [
"domain.com",
"helloworld.com",
"helloworld2.com",
"helloworld1.com",
"helloworld3.com",
]
}
variable "region" {
description = "name of the region"
default = "us-east-1"
}
variable "validation_method" {
description = "name of the region"
default = "DNS"
}
variable "tags" {
description = "name of the region"
default = "Test"
}
main.tf
module "acm" {
count = length(var.domain_name)
source = "./modules/acm"
domain_name = var.domain_name[count.index]
validation_method = var.validation_method
tags = var.tags
subject_alternative_names = var.subject_alternative_names
}
resource.tf
variable "domain_name" {
default = ""
description = "Nmae of the domain"
}
variable "validation_method" {
default = ""
description = "Validation method DNS or EMAIL"
}
variable "tags" {
default = ""
description = "tags for the ACM certificate"
}
variable "subject_alternative_names" {
default = ""
description = "subject_alternative_names"
}
resource "aws_acm_certificate" "acm_cert" {
domain_name = var.domain_name
validation_method = var.validation_method
subject_alternative_names = var.subject_alternative_names
lifecycle {
create_before_destroy = true
}
tags = {
Name = var.tags
}
}
The easiest way would be to use a single map:
variable "domain_name_with_alternate_names" {
default = {
"domain.com" = [
"domain.com",
"domain2.com",
"domain3.com",
],
"helloworld.com" = [
"helloworld1.com",
"helloworld2.com"
],
"hiworld.com" = [
"hiworld1.com",
"hiworld2.com"
],
"hiwodd4.com" = []
}
}
module "acm" {
for_each = var.domain_name_with_alternate_names
source = "./modules/acm"
domain_name = each.key
validation_method = var.validation_method
tags = var.tags
subject_alternative_names = each.value
}
Related
My problem is that I can't dynamically connect the created disks to the vps. The google_compute_disk_attach module cannot be used
Here is my code
What is the correct way in this situation?
resource "google_compute_instance" "vps" {
name = var.server_name
description = var.server_description
machine_type = var.server_type
zone = var.server_datacenter
deletion_protection = var.server_delete_protection
labels = var.server_labels
metadata = var.server_metadata
tags = var.server_tags
boot_disk {
auto_delete = false
initialize_params {
size = var.boot_volume_size
type = var.boot_volume_type
image = var.boot_volume_image
labels = var.boot_volume_labels
}
}
dynamic "attached_disk" {
for_each = { for vol in var.volumes : vol.volume_name => vol }
content {
source = element(var.volumes[*].volume_name, 0)
}
}
network_interface {
subnetwork = var.server_network
access_config {
nat_ip = google_compute_address.static_ip.address
}
}
resource "google_compute_disk" "volume" {
for_each = { for vol in var.volumes : vol.volume_name => vol }
name = each.value.volume_name
type = each.value.volume_type
size = each.value.volume_size
zone = var.server_datacenter
labels = each.value.volume_labels
}
volumes variables
volumes = [{
volume_name = "v3-postgres-saga-import-test-storage"
volume_size = "40"
volume_type = "pd-ssd"
volume_labels = {
environment = "production"
project = "v3"
type = "storage"
}
}, {
volume_name = "volume-vpstest2"
volume_size = "20"
volume_type = "pd-ssd"
volume_labels = {
environment = "production"
project = "v2"
type = "storage"
}
}]
if do something like that - error
source = google_compute_disk.volume[*].self_link
This object does not have an attribute named "self_link".
Since you've used for_each in google_compute_disk.volume, it will be a map, not a list. Thus you can list all self_link as follows:
source = values(google_compute_disk.volume)[*].self_link
You can also use the volume variable directly as map instead of Array :
variables.tf file :
variable "volumes" {
default = {
postgres_saga = {
volume_name = "v3-postgres-saga-import-test-storage"
volume_size = "40"
volume_type = "pd-ssd"
volume_labels = {
environment = "production"
project = "v3"
type = "storage"
}
},
volume_vpstest2 = {
volume_name = "volume-vpstest2"
volume_size = "20"
volume_type = "pd-ssd"
volume_labels = {
environment = "production"
project = "v2"
type = "storage"
}
}
}
}
Instead of variable, you can also use a local variable from json configuration. Example of structure of Terraform module :
project
module
main.tf
locals.tf
resource
volumes.json
volumes.json file
{
"volumes": {
"postgres_saga" : {
"volume_name" : "v3-postgres-saga-import-test-storage"
"volume_size" : "40"
"volume_type" : "pd-ssd"
"volume_labels" : {
"environment" : "production"
"project" : "v3"
"type" : "storage"
}
},
"volume_vpstest2" : {
"volume_name" : "volume-vpstest2"
"volume_size" : "20"
"volume_type" : "pd-ssd"
"volume_labels" : {
"environment" : "production"
"project" : "v2"
"type" : "storage"
}
}
}
}
locals.tf file
locals {
tables = jsondecode(file("${path.module}/resource/volumes.json"))["volumes"]
}
main.tf file :
resource "google_compute_instance" "vps" {
name = var.server_name
description = var.server_description
machine_type = var.server_type
zone = var.server_datacenter
deletion_protection = var.server_delete_protection
labels = var.server_labels
metadata = var.server_metadata
tags = var.server_tags
boot_disk {
auto_delete = false
initialize_params {
size = var.boot_volume_size
type = var.boot_volume_type
image = var.boot_volume_image
labels = var.boot_volume_labels
}
}
dynamic "attached_disk" {
for_each = [
var.volumes
# local.volumes
]
content {
source = attached_disk.value["volume_name"]
}
}
network_interface {
subnetwork = var.server_network
access_config {
nat_ip = google_compute_address.static_ip.address
}
}
}
resource "google_compute_disk" "volume" {
for_each = var.volumes
# local.volumes
name = each.value["volume_name"]
type = each.value["volume_type"]
size = each.value["volume_size"]
zone = var.server_datacenter
labels = each.value["volume_labels"]
}
With a map, you can directly use foreach without any transformation on google_compute_disk/volume resource.
You can also use this map in a dynamic bloc.
I am trying to pass the values s3 name and create_user into local block in main.tf so that both of them have the value in list and then I am passing list_of_bucket in local block in module s3 to create the buckets and looping of user_to_create in module s3_user to create the user if the boolean is set to true. All of these values are passed to variable.tf and then to main.tf
dev.tfvars
wea-nonprod = {
services = {
s3 = [
sthree = {
create_user = true,
}
sfour = {
create_user = true,
}
sfive = {
create_user = true,
}
]
}
}
variable.tf
variable "s3_buckets" {
type = list(map)
}
main.tf
locals {
users_to_create = ""
list_of_buckets = ""
}
module "s3" {
source = "../../s3"
name = join("-", [var.name_prefix, "s3"])
tags = merge(var.tags, {Name = join("-", [var.name_prefix, "s3"])})
buckets = list_of_buckets
sse_algorithm = "AES256"
access_log_bucket_name = var.access_log_bucket_name
}
module "s3_user" {
for_each = local.users_to_create
source = "./service-s3-bucket-user"
name = join("-", [var.name_prefix, each.key])
tags = var.tags
bucket_arn = module.s3.bucket_arns[each.key]
depends_on = [module.s3]
}
Just iterate over your wea-nonprod map:
locals {
users_to_create = [ for name in var.wea-nonprod.services.s3 if name.create_user == true ]
list_of_buckets = [ for bucket in var.wea-nonprod.services.s3 ]
}
And a few changes to your module blocks:
module "s3" {
source = "../../s3"
name = "${var.name_prefix}-s3"
tags = merge(var.tags, { Name = "${var.name_prefix}-s3" })
buckets = local.list_of_buckets
sse_algorithm = "AES256"
access_log_bucket_name = var.access_log_bucket_name
}
module "s3_user" {
count = length(local.users_to_create)
source = "./service-s3-bucket-user"
name = "${var.name_prefix}${local.users_to_create[count.index]}"
tags = var.tags
bucket_arn = module.s3.bucket_arns[local.users_to_create[count.index]]
depends_on = [module.s3]
}
Error: Error creating WAFv2 WebACL: WAFInvalidParameterException: Error reason: You have used none or multiple values for a field that requires exactly one value., field: RULE_ACTION, parameter: RuleAction(block=null, allow=null, count=null)
{
RespMetadata: {
StatusCode: 400,
RequestID: "24106754-b0db-4497-8e19-e72f8908dc19"
},
Field: "RULE_ACTION",
Message_: "Error reason: You have used none or multiple values for a field that requires exactly one value., field: RULE_ACTION, parameter: RuleAction(block=null, allow=null, count=null)",
Parameter: "RuleAction(block=null, allow=null, count=null)",
Reason: "You have used none or multiple values for a field that requires exactly one value."
}
on .terraform/modules/wafv2/main.tf line 18, in resource "aws_wafv2_web_acl" "main":
18: resource "aws_wafv2_web_acl" "main" {
I am having this error while trying to deploy a WAFV2 with terraform any help is appreciated please.
Here is a little portion of the WAFv2 code:
resource "aws_wafv2_web_acl" "main" {
name = var.name
description = "WAFv2 ACL for ${var.name}"
scope = var.scope
default_action {
allow {}
}
visibility_config {
cloudwatch_metrics_enabled = true
sampled_requests_enabled = true
metric_name = var.name
}
dynamic "rule" {
for_each = var.managed_rules
content {
name = rule.value.name
priority = rule.value.priority
override_action {
dynamic "none" {
for_each = rule.value.override_action == "none" ? [1] : []
content {}
}
I am trying to figure out why the error is still reflecting maybe a problem with my WAFV2?
There may be a number of reasons why this error happens, so without seeing the full Terraform it is a bit hard to tell what is going on.
I've seen this happen where my ACL contained two rules: a rule_group_reference_statement and a rate_based_statement.
My problem was the rule group reference needed an override_action:
override_action {
none {}
}
I didn't realize either that or an action was required, but I found out about that here: https://github.com/hashicorp/terraform-provider-aws/issues/14094#issuecomment-655625254
I was using the Java version of the CDK and the way to get the "none {}" override action is unclear from the documentation. But this works:
.overrideAction(CfnWebACL.OverrideActionProperty.builder()
.none(new HashMap<String, Object>())
.build())
resource "aws_wafv2_ip_set" "ip_whitelist" {
name = "ip-whitelist"
scope = "REGIONAL"
ip_address_version = "IPV4"
addresses = ["100.12.10.20/32"] # your ip
}
resource "aws_wafv2_web_acl" "web_acl" {
name = "waf-rules"
description = "waf rules"
scope = "REGIONAL"
default_action {
allow {}
}
# ipsets
rule {
name = "ip-whitelist"
priority = 0
action {
allow {}
}
statement {
ip_set_reference_statement {
arn = aws_wafv2_ip_set.ip_whitelist.arn
}
}
visibility_config {
cloudwatch_metrics_enabled = true
metric_name = "Whitelist-ip"
sampled_requests_enabled = true
}
}
visibility_config {
cloudwatch_metrics_enabled = true
metric_name = "white-list-ip"
sampled_requests_enabled = true
}
dynamic "rule" {
for_each = var.rules
content {
name = rule.value.name
priority = rule.value.priority
override_action {
none {}
}
statement {
managed_rule_group_statement {
name = rule.value.managed_rule_group_statement_name
vendor_name = rule.value.managed_rule_group_statement_vendor_name
}
}
visibility_config {
cloudwatch_metrics_enabled = true
metric_name = rule.value.metric_name
sampled_requests_enabled = true
}
}
}
}
# variables.tf
variable "rules" {
type = list(any)
default = [
{
name = "AWS-AWSManagedRulesAdminProtectionRuleSet"
priority = 1
managed_rule_group_statement_name = "AWSManagedRulesAdminProtectionRuleSet"
managed_rule_group_statement_vendor_name = "AWS"
metric_name = "foo_name"
},
{
name = "AWS-AWSManagedRulesPHPRuleSet"
priority = 2
managed_rule_group_statement_name = "AWSManagedRulesPHPRuleSet"
managed_rule_group_statement_vendor_name = "AWS"
metric_name = "foo_name"
},
{
name = "AWS-AWSManagedRulesLinuxRuleSet"
priority = 3
managed_rule_group_statement_name = "AWSManagedRulesLinuxRuleSet"
managed_rule_group_statement_vendor_name = "AWS"
metric_name = "foo_name"
},
{
name = "AWS-AWSManagedRulesAmazonIpReputationList"
priority = 4
managed_rule_group_statement_name = "AWSManagedRulesAmazonIpReputationList"
managed_rule_group_statement_vendor_name = "AWS"
metric_name = "foo_name"
},
{
name = "AWS-AWSManagedRulesSQLiRuleSet"
priority = 5
managed_rule_group_statement_name = "AWSManagedRulesSQLiRuleSet"
managed_rule_group_statement_vendor_name = "AWS"
metric_name = "foo_name"
},
{
name = "AWS-AWSManagedRulesUnixRuleSet"
priority = 6
managed_rule_group_statement_name = "AWSManagedRulesUnixRuleSet"
managed_rule_group_statement_vendor_name = "AWS"
metric_name = "foo_name"
},
{
name = "AWS-AWSManagedRulesCommonRuleSet"
priority = 7
managed_rule_group_statement_name = "AWSManagedRulesCommonRuleSet"
managed_rule_group_statement_vendor_name = "AWS"
metric_name = "foo_name"
}
]
}
I am using version 2.0.50 of the AWS cli on Linux, and trying to create a v2 AWS WAF. Running the command aws wafv2 create-web-acl --cli-input-json file://waf.json results in the following response:
An error occurred (WAFInvalidParameterException) when calling the CreateWebACL operation: Error reason: Your statement has multiple values set for a field that requires exactly one value., field: RULE, parameter: Rule
Can somebody identify what is wrong with the following JSON, or confirm that they are seeing the same issue?
{
"DefaultAction": {
"Allow": {}
},
"Name": "test-web-acl",
"Rules": [
{
"Name": "rule-one",
"Priority": 1,
"Statement": {
"ManagedRuleGroupStatement": {
"Name": "AWSManagedRulesUnixRuleSet",
"VendorName": "AWS"
}
},
"VisibilityConfig": {
"CloudWatchMetricsEnabled": false,
"MetricName": "rule-one-metric",
"SampledRequestsEnabled": false
}
}
],
"Scope": "REGIONAL",
"VisibilityConfig": {
"CloudWatchMetricsEnabled": false,
"MetricName": "test-web-acl-metric",
"SampledRequestsEnabled": false
}
}
I can't see what is incorrect about the JSON according to the syntax described here CreateWebACL
The answer is that the OverrideAction attribute is missing from the Rule object. When adding "OverrideAction":{"None":{}} to the Rule object, then the ACL was created. The error message is misleading.
# resources.tf
resource "aws_wafv2_ip_set" "ip_whitelist" {
name = var.waf_name
scope = var.waf_scope
ip_address_version = var.waf_ip_address_version
addresses = [var.waf_addresses]
}
resource "aws_wafv2_web_acl" "web_acl" {
name = var.waf_web_acl_name
description = var.waf_description
scope = var.waf_scope
default_action {
allow {}
}
# ipsets
rule {
name = var.waf_name
priority = 0
action {
allow {}
}
statement {
ip_set_reference_statement {
arn = aws_wafv2_ip_set.ip_whitelist.arn
}
}
visibility_config {
cloudwatch_metrics_enabled = true
metric_name = var.waf_name
sampled_requests_enabled = true
}
}
visibility_config {
cloudwatch_metrics_enabled = true
metric_name = var.waf_name
sampled_requests_enabled = true
}
dynamic "rule" {
for_each = var.rules
content {
name = rule.value.name
priority = rule.value.priority
override_action {
none {}
}
statement {
managed_rule_group_statement {
name = rule.value.managed_rule_group_statement_name
vendor_name = rule.value.managed_rule_group_statement_vendor_name
}
}
visibility_config {
cloudwatch_metrics_enabled = true
metric_name = rule.value.metric_name
sampled_requests_enabled = true
}
}
}
}
resource "aws_wafv2_web_acl_association" "waf_alb" {
resource_arn = aws_alb.alb.arn
web_acl_arn = aws_wafv2_web_acl.web_acl.arn
}
# variables.tf
# waf ipset name
variable "waf_name" {
type = string
default = "ip-whitelist"
}
variable "waf_scope" {
type = string
default = "REGIONAL"
}
variable "waf_ip_address_version" {
default = "IPV4"
type=string
}
variable "waf_addresses" {
default = "your-ip/32"
type = string
}
# waf details
variable "waf_web_acl_name" {
type=string
default = "waf-rules"
}
variable "waf_description" {
type=string
default = "waf rules"
}
# waf multiple rules
variable "rules" {
type = list(any)
default = [
{
name = "AWS-AWSManagedRulesAdminProtectionRuleSet"
priority = 1
managed_rule_group_statement_name = "AWSManagedRulesAdminProtectionRuleSet"
managed_rule_group_statement_vendor_name = "AWS"
metric_name = "foo_name"
},
{
name = "AWS-AWSManagedRulesPHPRuleSet"
priority = 2
managed_rule_group_statement_name = "AWSManagedRulesPHPRuleSet"
managed_rule_group_statement_vendor_name = "AWS"
metric_name = "foo_name"
},
{
name = "AWS-AWSManagedRulesLinuxRuleSet"
priority = 3
managed_rule_group_statement_name = "AWSManagedRulesLinuxRuleSet"
managed_rule_group_statement_vendor_name = "AWS"
metric_name = "foo_name"
},
{
name = "AWS-AWSManagedRulesAmazonIpReputationList"
priority = 4
managed_rule_group_statement_name = "AWSManagedRulesAmazonIpReputationList"
managed_rule_group_statement_vendor_name = "AWS"
metric_name = "foo_name"
},
{
name = "AWS-AWSManagedRulesSQLiRuleSet"
priority = 5
managed_rule_group_statement_name = "AWSManagedRulesSQLiRuleSet"
managed_rule_group_statement_vendor_name = "AWS"
metric_name = "foo_name"
},
{
name = "AWS-AWSManagedRulesUnixRuleSet"
priority = 6
managed_rule_group_statement_name = "AWSManagedRulesUnixRuleSet"
managed_rule_group_statement_vendor_name = "AWS"
metric_name = "foo_name"
},
{
name = "AWS-AWSManagedRulesCommonRuleSet"
priority = 7
managed_rule_group_statement_name = "AWSManagedRulesCommonRuleSet"
managed_rule_group_statement_vendor_name = "AWS"
metric_name = "foo_name"
}
]
}
I have created an EKS cluster using terraform after creation I am trying to update one parameter which is endpoint_public_access=false
But I am getting the following error
Error: error updating EKS Cluster
(ec1-default-ics-common-alz-eks-cluster) config:
InvalidParameterException: Cluster is already at the desired
configuration with endpointPrivateAccess: false ,
endpointPublicAccess: true, and Public Endpoint Restrictions:
[0.0.0.0/0] { ClusterName: "ec1-default-ics-common-alz-eks-cluster",
Message_: "Cluster is already at the desired configuration with
endpointPrivateAccess: false , endpointPublicAccess: true, and Public
Endpoint Restrictions: [0.0.0.0/0]" } on
../../terraform-hli-aws-eks/eks_cluster/main.tf line 1, in resource
"aws_eks_cluster" "eks_cluster": 1: resource "aws_eks_cluster"
"eks_cluster" {
Here is the terraform plan
~ resource "aws_eks_cluster" "eks_cluster" {
arn = "<arn>"
certificate_authority = [
{
data = "<datat>"
},
]
created_at = "2020-03-09 08:59:28 +0000 UTC"
enabled_cluster_log_types = [
"api",
"audit",
]
endpoint = "<url>.eks.amazonaws.com"
id = "ec1-default-ics-common-alz-eks-cluster"
identity = [
{
oidc = [
{
issuer = "<url>"
},
]
},
]
name = "ec1-default-ics-common-alz-eks-cluster"
platform_version = "eks.9"
role_arn = "<url>"
status = "ACTIVE"
tags = {
"Environment" = "common"
"Project" = "ics-dlt"
"Terraform" = "true"
}
version = "1.14"
~ vpc_config {
cluster_security_group_id = "sg-05ab244e50689862a"
endpoint_private_access = false
endpoint_public_access = true
~ public_access_cidrs = [
- "0.0.0.0/0",
]
security_group_ids = [
"sg-081527f14bf1a6646",
]
subnet_ids = [
"subnet-08011850bb5b7d7ca",
"subnet-0fab8917fdc533eb3",
]
vpc_id = "vpc-07ba84e4a6f54d91f"
}
}
Terraform code
resource "aws_eks_cluster" "eks_cluster" {
name = var.name
role_arn = aws_iam_role.eks_cluster_role.arn
vpc_config {
subnet_ids = var.cluster_subnet_ids
endpoint_private_access = var.endpoint_private_access
endpoint_public_access = var.endpoint_public_access
public_access_cidrs = var.public_access_cidrs
security_group_ids = var.security_group_ids
}
enabled_cluster_log_types = var.enabled_cluster_log_types
tags = var.tags
depends_on = [
aws_iam_role_policy_attachment.eks_cluster_role-AmazonEKSClusterPolicy,
aws_iam_role_policy_attachment.eks_cluster_role-AmazonEKSServicePolicy,
]
}
data "template_file" "eks_cluster_role" {
template = "${file("${path.module}/roles/cluster_role.json")}"
}
resource "aws_iam_role" "eks_cluster_role" {
name = var.cluster_role_name
assume_role_policy = data.template_file.eks_cluster_role.rendered
}
resource "aws_iam_role_policy_attachment" "eks_cluster_role-AmazonEKSClusterPolicy" {
policy_arn = "arn:aws:iam::aws:policy/AmazonEKSClusterPolicy"
role = aws_iam_role.eks_cluster_role.name
}
resource "aws_iam_role_policy_attachment" "eks_cluster_role-AmazonEKSServicePolicy" {
policy_arn = "arn:aws:iam::aws:policy/AmazonEKSServicePolicy"
role = aws_iam_role.eks_cluster_role.name
}
if the cluster has this setting:
endpoint_public_access = true
then you need to "disable" this setting:
public_access_cidrs = null
you could do something like this:
public_access_cidrs = var.endpoint_public_access == true ? var.public_access_cidrs : null