I'm trying to create certain BigQuery tables with time_partitioning with the dynamic block and I want to use the values from tfvars in runtime as follows:
./tables/tables.tf:
resource "google_bigquery_table" "tables" {
for_each = var.tables == [] ? [] : toset(var.tables)
dataset_id = var.db_id
deletion_protection = false
table_id = each.key
dynamic "time_partitioning" {
for_each = var.partitioned_tables
content {
type = "DAY"
field = time_partitioning.value.field
}
}
labels = {
environment = var.environment
application = var.application
}
schema = fileexists("${path.module}/${var.db_id}/${each.key}.json") ? file("${path.module}/${var.db_id}/${each.key}.json") : null
}
main.tf:
resource "google_bigquery_dataset" "database" {
count = length(var.dbs)
dataset_id = var.dbs[count.index].db_id
friendly_name = var.dbs[count.index].db_name
description = "TF"
location = "US"
delete_contents_on_destroy = var.delete_contents_on_destroy
labels = {
environment = var.environment
application = var.dbs[count.index].app_name
}
}
module "tables" {
source = "./tables"
count = length(var.dbs)
db_id = google_bigquery_dataset.database[count.index].dataset_id
environment = var.environment
application = var.dbs[count.index].app_name
tables = var.dbs[count.index].tables
partitioned_tables = var.dbs[count.index].partitioned_tables
}
module "iam" {
source = "./iam"
count = length(var.dbs)
db_id = google_bigquery_dataset.database[count.index].dataset_id
iam_members = var.dbs[count.index].iam_members
}
dev.tfvars:
region = "us-central1"
project_id = "some-project"
dbs = [
{
db_id = "dataset1"
db_name = "dataset1"
app_name = "hello"
iam_members = [
{
role = "roles/bigquery.dataEditor"
member = "serviceAccount:ser-sa#some-project.iam.gserviceaccount.com",
}
]
tables = ["daily_inventory", "dc_inventory", "products", "daily_sales", "planned_inventory", "stores", "stores_in_program"]
partitioned_tables = [
{
table = "daily_sales"
field = "sales_timestamp"
},
{
table = "daily_inventory"
field = "inventory_timestamp"
}
]
},
{
db_id = "dataset2"
db_name = "dataset2"
app_name = "hello"
iam_members = [
{
role = "roles/bigquery.dataEditor"
member = "serviceAccount:ser-sa#some-project.iam.gserviceaccount.com"
}
]
tables = []
}
]
environment = "development"
delete_contents_on_destroy = true
var.dbs is type = list(any)
Getting:
The given value is not suitable for var.dbs declared at
variables.tf:9,1-15: all list elements must have the same type.
Thanks in advance!
list(any) does not mean that you can have elements of "any" type in your list. All elements must have same type, and you can't mix types, as you do now (i.e. second element is missing partitioned_tables). any only means that TF will infer the single type for the elements, but all elements must be of that single type. So you have three choices:
remove type = list(any)
Fully define your type with optional arguments, instead of using any
Add partitioned_tables to the second element:
[
{
db_id = "dataset1"
db_name = "dataset1"
app_name = "hello"
iam_members = [
{
role = "roles/bigquery.dataEditor"
member = "serviceAccount:ser-sa#some-project.iam.gserviceaccount.com",
}
]
tables = ["daily_inventory", "dc_inventory", "products", "daily_sales", "planned_inventory", "stores", "stores_in_program"]
partitioned_tables = [
{
table = "daily_sales"
field = "sales_timestamp"
},
{
table = "daily_inventory"
field = "inventory_timestamp"
}
]
},
{
db_id = "dataset2"
db_name = "dataset2"
app_name = "hello"
iam_members = [
{
role = "roles/bigquery.dataEditor"
member = "serviceAccount:ser-sa#some-project.iam.gserviceaccount.com"
}
]
partitioned_tables = []
tables = []
}
]
Related
I'm trying to create a dynamic method to create multiple dynamodb tables with their own attributes. I tried for loops with dynamic blocks, list objects, etc but was not able to iterate attributes for each table. The goal is to have multiple tables with different attributes and global indexes for each table in one go. I have terraform.tfvars and main.tf with the following structure:
Variable declaration:
variable "dynamodb_table" {
type = list(object({
table_name = string
billing_mode = string
read_capacity = optional(number)
write_capacity = optional(string)
hash_key = string
ttl_attribute_name = string
ttl_enabled = string
range_key = optional(string)
attribute = object({
name = string
type = string
})
}))
}
variable "global_secondary_indexes" {
description = "Describe a GSI for the table; subject to the normal limits on the number of GSIs, projected attributes, etc."
type = list(object({
index_name = string
index_projection_type = string
index_range_key = string
index_hash_key = string
index_write_capacity = optional(string)
index_read_capacity = optional(string)
index_non_key_attributes = list(string)
}))
default = []
}
dynamodb_table = [
{
table_name = "devops-test-01",
billing_mode = "PAY_PER_REQUEST",
hash_key = "UserId",
range_key = "GameTitle",
ttl_attribute_name = "ttl_attribute_name",
ttl_enabled = "false"
attribute = [
{
name = "UserId"
type = "S"
},
{
name = "GameTitle"
type = "S"
}
]
},
{
table_name = "devops-test-02",
billing_mode = "PAY_PER_REQUEST",
hash_key = "GameTitle",
ttl_attribute_name = "ttl_attribute_name",
ttl_enabled = "false"
}
]
global_secondary_indexes = [
{
index_name = "TitleIndex"
index_hash_key = "UserId"
index_range_key = "GameTitle"
index_projection_type = "INCLUDE"
index_non_key_attributes = ["Id"]
}
]
default_tags = {
"Environment" = "Dev",
"Owner" = "xxx"
}
resource "aws_dynamodb_table" "basic-dynamodb-table" {
for_each = { for key, value in var.dynamodb_table : key => value }
name = each.value.table_name
billing_mode = each.value.billing_mode
read_capacity = each.value.read_capacity
write_capacity = each.value.write_capacity
hash_key = each.value.hash_key
range_key = each.value.range_key
ttl {
attribute_name = each.value.ttl_attribute_name
enabled = each.value.ttl_enabled
}
dynamic "attribute" {
for_each = { for key, value in var.attributes : key => value }
content {
name = attribute.value.name
type = attribute.value.type
}
}
dynamic "global_secondary_index" {
for_each = var.global_secondary_indexes
content {
name = global_secondary_index.value.index_name
hash_key = global_secondary_index.value.index_hash_key
projection_type = global_secondary_index.value.index_projection_type
range_key = lookup(global_secondary_index.value, "index_range_key", null)
read_capacity = lookup(global_secondary_index.value, "index_read_capacity", null)
write_capacity = lookup(global_secondary_index.value, "index_write_capacity", null)
non_key_attributes = lookup(global_secondary_index.value, "index_non_key_attributes", null)
}
}
tags = merge(
var.default_tags,
{
Name = each.value.table_name
})
}
This code produces the following error:
The given value is not suitable for var.dynamodb_table declared at variable.tf:6,1-26: element 0: attribute │ "attribute": object required
You did not share your attributes variable but I have used attributes in dynamodb_table variable.
Your main problem is attribute property in dynamodb_table variable is requeired but you did not provide any value for it in devops-test-02 table values.
variables.tf
variable "dynamodb_table" {
type = list(object({
table_name = string
billing_mode = string
// read_capacity = optional(number)
//write_capacity = optional(string)
hash_key = string
ttl_attribute_name = string
ttl_enabled = string
//range_key = optional(string)
attribute = list(object({
name = string
type = string
}))
}))
default = [
{
table_name = "devops-test-01",
billing_mode = "PAY_PER_REQUEST",
hash_key = "UserId",
range_key = "GameTitle",
ttl_attribute_name = "ttl_attribute_name",
ttl_enabled = "false"
attribute = [
{
name = "UserId"
type = "S"
},
{
name = "GameTitle"
type = "S"
}
]
},
{
table_name = "devops-test-02",
billing_mode = "PAY_PER_REQUEST",
hash_key = "GameTitle",
ttl_attribute_name = "ttl_attribute_name",
ttl_enabled = "false"
attribute = [
{
name = "UserId"
type = "S"
},
{
name = "GameTitle"
type = "S"
}
]
}
]
}
variable "global_secondary_indexes" {
description = "Describe a GSI for the table; subject to the normal limits on the number of GSIs, projected attributes, etc."
type = list(object({
index_name = string
index_projection_type = string
index_range_key = string
index_hash_key = string
//index_write_capacity = optional(string)
//index_read_capacity = optional(string)
index_non_key_attributes = list(string)
}))
default = [
{
index_name = "TitleIndex"
index_hash_key = "UserId"
index_range_key = "GameTitle"
index_projection_type = "INCLUDE"
index_non_key_attributes = ["Id"]
}
]
}
variable "default_tags" {
default = {
"Environment" = "Dev",
"Owner" = "xxx"
}
}
dynamodb.tf
resource "aws_dynamodb_table" "basic-dynamodb-table" {
for_each = { for key, value in var.dynamodb_table : value.table_name => value }
name = each.value.table_name
billing_mode = each.value.billing_mode
read_capacity = lookup(each.value, "read_capacity", null)
write_capacity = lookup(each.value, "write_capacity", null)
hash_key = each.value.hash_key
range_key = lookup(each.value, "range_key", null)
ttl {
attribute_name = each.value.ttl_attribute_name
enabled = each.value.ttl_enabled
}
dynamic "attribute" {
for_each = { for key, value in each.value.attribute : key => value }
content {
name = attribute.value.name
type = attribute.value.type
}
}
dynamic "global_secondary_index" {
for_each = var.global_secondary_indexes
content {
name = global_secondary_index.value.index_name
hash_key = global_secondary_index.value.index_hash_key
projection_type = global_secondary_index.value.index_projection_type
range_key = lookup(global_secondary_index.value, "index_range_key", null)
read_capacity = lookup(global_secondary_index.value, "index_read_capacity", null)
write_capacity = lookup(global_secondary_index.value, "index_write_capacity", null)
non_key_attributes = lookup(global_secondary_index.value, "index_non_key_attributes", null)
}
}
tags = merge(
var.default_tags,
{
Name = each.value.table_name
})
}
UPDATE 2023-01-17
Add Kinesis streaming destination resource to dynamodb tables.
resource "aws_kinesis_stream" "example" {
for_each = aws_dynamodb_table.basic-dynamodb-table
name = "${each.key}_table_stream"
shard_count = 1
}
resource "aws_dynamodb_kinesis_streaming_destination" "example" {
for_each = aws_dynamodb_table.basic-dynamodb-table
stream_arn = aws_kinesis_stream.example[each.key].arn
table_name = each.key
}
i'm trying to account for an annoying case when creating my own private hosted zones where the kinesis-streams endpoint has several non-standard DNS records. I am aiming to create a custom map of objects for each endpoint with information that I need (below):
locals {
endpoint_service_names = {
ec2 = {
name = "com.amazonaws.${data.aws_region.current.name}.ec2"
type = "Interface"
private_dns = false
phz_names = ["ec2.${data.aws_region.current.name}.amazonaws.com"]
phz_wildcard = false
}
"ecr.dkr" = {
name = "com.amazonaws.${data.aws_region.current.name}.ecr.dkr"
type = "Interface"
private_dns = false
phz_names = ["dkr.ecr.${data.aws_region.current.name}.amazonaws.com"]
phz_wildcard = true
}
"ecr.api" = {
name = "com.amazonaws.${data.aws_region.current.name}.ecr.api"
type = "Interface"
private_dns = false
phz_names = ["api.ecr.${data.aws_region.current.name}.amazonaws.com"]
phz_wildcard = false
}
kinesis-streams = {
name = "com.amazonaws.${data.aws_region.current.name}.kinesis-streams"
type = "Interface"
private_dns = false
phz_names = [
"kinesis.${data.aws_region.current.name}.amazonaws.com",
"data-kinesis.${data.aws_region.current.name}.amazonaws.com",
"control-kinesis.${data.aws_region.current.name}.amazonaws.com"
]
phz_wildcard = true
}
}
In order to use this in my impl, however, I would need to perform a nested for_each which Terraform does not directly allow, so I need to merge my map info into a new map with the service and DNS names in the one object. My problem is similar to: Looping in for_each nested resources with terraform
except I do not have a list in each map element, only on the phz_names. I can't figure out how to get the syntax right to produce something like this:
endpoint_service_dns_list = {
kinesis-streams = [
"kinesis.${data.aws_region.current.name}.amazonaws.com",
"data-kinesis.${data.aws_region.current.name}.amazonaws.com",
"control-kinesis.${data.aws_region.current.name}.amazonaws.com"
]
[...]
}
}
My attempt:
endpoint_service_dns_list = merge([
for servicename, service in local.endpoint_service_names : {
for phz_name in service[0].phz_names :
"${servicename}-${phz_name}" => {
service_name = servicename
phz_name = phz.phz_names
}
}
])
but the syntax highlighting/attempt to indexing obviously fails because I do not have a list for each service, but I am not sure what to replace it with.
Maybe I have missed the point but based on your comments and your kind of expected result in the question you could have something like this. I have added a format that maps then service name to all its urls as a list. Or incase you wanted a mapping of each url to its service name I added an additional local var.
locals {
endpoint_service_names = {
ec2 = {
name = "com.amazonaws.${data.aws_region.current.name}.ec2"
type = "Interface"
private_dns = false
phz_names = ["ec2.${data.aws_region.current.name}.amazonaws.com"]
phz_wildcard = false
}
"ecr.dkr" = {
name = "com.amazonaws.${data.aws_region.current.name}.ecr.dkr"
type = "Interface"
private_dns = false
phz_names = ["dkr.ecr.${data.aws_region.current.name}.amazonaws.com"]
phz_wildcard = true
}
"ecr.api" = {
name = "com.amazonaws.${data.aws_region.current.name}.ecr.api"
type = "Interface"
private_dns = false
phz_names = ["api.ecr.${data.aws_region.current.name}.amazonaws.com"]
phz_wildcard = false
}
kinesis-streams = {
name = "com.amazonaws.${data.aws_region.current.name}.kinesis-streams"
type = "Interface"
private_dns = false
phz_names = [
"kinesis.${data.aws_region.current.name}.amazonaws.com",
"data-kinesis.${data.aws_region.current.name}.amazonaws.com",
"control-kinesis.${data.aws_region.current.name}.amazonaws.com"
]
phz_wildcard = true
}
}
endpoint_service_dns_list = {for service, attrs in local.endpoint_service_names : service => attrs.phz_names}
endpoint_dns_service_list = merge([for service, attrs in local.endpoint_service_names : {for url in attrs["phz_names"] : url => service}]...)
}
data "aws_region" "current" {}
output "endpoint_service_dns_list" {
value = local.endpoint_service_dns_list
}
output "endpoint_dns_service_list" {
value = local.endpoint_dns_service_list
}
This would map the phz_names with the service as seen in the output, or map each url to its service
endpoint_dns_service_list = {
"api.ecr.eu-west-2.amazonaws.com" = "ecr.api"
"control-kinesis.eu-west-2.amazonaws.com" = "kinesis-streams"
"data-kinesis.eu-west-2.amazonaws.com" = "kinesis-streams"
"dkr.ecr.eu-west-2.amazonaws.com" = "ecr.dkr"
"ec2.eu-west-2.amazonaws.com" = "ec2"
"kinesis.eu-west-2.amazonaws.com" = "kinesis-streams"
}
endpoint_service_dns_list = {
"ec2" = [
"ec2.eu-west-2.amazonaws.com",
]
"ecr.api" = [
"api.ecr.eu-west-2.amazonaws.com",
]
"ecr.dkr" = [
"dkr.ecr.eu-west-2.amazonaws.com",
]
"kinesis-streams" = [
"kinesis.eu-west-2.amazonaws.com",
"data-kinesis.eu-west-2.amazonaws.com",
"control-kinesis.eu-west-2.amazonaws.com",
]
}
Hopefully one of these is the kind of thing you were looking to do
Say that I have two modules that require the same source (append_only_history_bq_tables). Their difference is only the from_dwh_core variable which the default set to false
module "dwh_core_append_only_history_tables" {
source = "../append_only_history_bq_tables"
gcp_project = var.gcp_project
bq_location = var.bq_location
dataset_id = google_bigquery_dataset.jago_pagerduty.dataset_id
module_base_path = path.module
tables = {
"list_incident" = { "business_key_columns" = "id", "partition_type" = "DAY" },
"raw_incident_information" = { "business_key_columns" = "id", "partition_type" = "DAY" }
}
}
module "daily_closing_balance" {
source = "../append_only_history_bq_tables"
gcp_project = var.gcp_project
bq_location = var.bq_location
dataset_id = google_bigquery_dataset.dwh_core_dataset.dataset_id
module_base_path = path.module
use_source_ord = false
from_dwh_core = true
tables = {
"daily_closing_balance" = { "business_key_columns" = "full_date,account_number,customer_id", "partition_type" = "DAY" }
}
depends_on = [google_bigquery_dataset.dwh_core_dataset]
}
The append_only_history_bq_tables contains this resource
resource "google_bigquery_table" "dwh_core_snapshot_daily" {
for_each = var.tables
dataset_id = var.dataset_id
project = var.gcp_project
table_id = "${each.key}_snapshot_current_daily"
schema = file("${local.schema_def_folder}/${each.key}.json")
deletion_protection=false
}
How to conditionally call the dwh_core_snapshot_daily resource only if the from_dwh_core variable is set to true?
Thank you for your help
You can use Conditional Expression:
resource "google_bigquery_table" "dwh_core_snapshot_daily" {
for_each = var.from_dwh_core == true ? var.tables : {}
dataset_id = var.dataset_id
project = var.gcp_project
table_id = "${each.key}_snapshot_current_daily"
schema = file("${local.schema_def_folder}/${each.key}.json")
deletion_protection=false
}
I am trying to pass the values s3 name and create_user into local block in main.tf so that both of them have the value in list and then I am passing list_of_bucket in local block in module s3 to create the buckets and looping of user_to_create in module s3_user to create the user if the boolean is set to true. All of these values are passed to variable.tf and then to main.tf
dev.tfvars
wea-nonprod = {
services = {
s3 = [
sthree = {
create_user = true,
}
sfour = {
create_user = true,
}
sfive = {
create_user = true,
}
]
}
}
variable.tf
variable "s3_buckets" {
type = list(map)
}
main.tf
locals {
users_to_create = ""
list_of_buckets = ""
}
module "s3" {
source = "../../s3"
name = join("-", [var.name_prefix, "s3"])
tags = merge(var.tags, {Name = join("-", [var.name_prefix, "s3"])})
buckets = list_of_buckets
sse_algorithm = "AES256"
access_log_bucket_name = var.access_log_bucket_name
}
module "s3_user" {
for_each = local.users_to_create
source = "./service-s3-bucket-user"
name = join("-", [var.name_prefix, each.key])
tags = var.tags
bucket_arn = module.s3.bucket_arns[each.key]
depends_on = [module.s3]
}
Just iterate over your wea-nonprod map:
locals {
users_to_create = [ for name in var.wea-nonprod.services.s3 if name.create_user == true ]
list_of_buckets = [ for bucket in var.wea-nonprod.services.s3 ]
}
And a few changes to your module blocks:
module "s3" {
source = "../../s3"
name = "${var.name_prefix}-s3"
tags = merge(var.tags, { Name = "${var.name_prefix}-s3" })
buckets = local.list_of_buckets
sse_algorithm = "AES256"
access_log_bucket_name = var.access_log_bucket_name
}
module "s3_user" {
count = length(local.users_to_create)
source = "./service-s3-bucket-user"
name = "${var.name_prefix}${local.users_to_create[count.index]}"
tags = var.tags
bucket_arn = module.s3.bucket_arns[local.users_to_create[count.index]]
depends_on = [module.s3]
}
I am trying to create a simple dynamodb table using following reource modules of terraform.
Get the following error while running terraform:
All attributes must be indexed. Unused attributes: ["pactitle" "ipadress" "Timestamp"].
why do we need to index all attributes ?
How to solve this ?
resource "aws_dynamodb_table" "this" {
count = var.create_table ? 1 : 0
name = var.name
billing_mode = var.billing_mode
hash_key = var.hash_key
range_key = var.range_key
read_capacity = var.read_capacity
write_capacity = var.write_capacity
//stream_enabled = var.stream_enabled
//stream_view_type = var.stream_view_type
dynamic "attribute" {
for_each = var.attributes
content {
name = attribute.value.name
type = attribute.value.type
}
}
server_side_encryption {
enabled = var.server_side_encryption_enabled
kms_key_arn = var.server_side_encryption_kms_key_arn
}
tags = merge(
var.tags,
{
"Name" = format("%s", var.name)
},
)
timeouts {
create = lookup(var.timeouts, "create", null)
delete = lookup(var.timeouts, "delete", null)
update = lookup(var.timeouts, "update", null)
}
}
calling module
module "dynamodb_table" {
source = "./../../../modules/dynamodb"
name = "pack-audit-cert"
hash_key = "id"
create_table= true
read_capacity=5
write_capacity=5
billing_mode = "PROVISIONED"
range_key = "pacid"
attributes = [
{
name = "id"
type = "N"
},
{
name = "pacid"
type = "S"
},
{
name = "pactitle"
type = "S"
},
{
name = "ipadress"
type = "S"
},
{
name = "Timestamp"
type = "S"
}
]
}
Thank you
That error message is a bit misleading. You should only define the indexed attributes when you are creating the table. Since DynamoDB is a schemaless database, it doesn't care about the other attributes at table creation time.