Terraform dynamic loop to create multiple dynamodb - amazon-web-services

I'm trying to create a dynamic method to create multiple dynamodb tables with their own attributes. I tried for loops with dynamic blocks, list objects, etc but was not able to iterate attributes for each table. The goal is to have multiple tables with different attributes and global indexes for each table in one go. I have terraform.tfvars and main.tf with the following structure:
Variable declaration:
variable "dynamodb_table" {
type = list(object({
table_name = string
billing_mode = string
read_capacity = optional(number)
write_capacity = optional(string)
hash_key = string
ttl_attribute_name = string
ttl_enabled = string
range_key = optional(string)
attribute = object({
name = string
type = string
})
}))
}
variable "global_secondary_indexes" {
description = "Describe a GSI for the table; subject to the normal limits on the number of GSIs, projected attributes, etc."
type = list(object({
index_name = string
index_projection_type = string
index_range_key = string
index_hash_key = string
index_write_capacity = optional(string)
index_read_capacity = optional(string)
index_non_key_attributes = list(string)
}))
default = []
}
dynamodb_table = [
{
table_name = "devops-test-01",
billing_mode = "PAY_PER_REQUEST",
hash_key = "UserId",
range_key = "GameTitle",
ttl_attribute_name = "ttl_attribute_name",
ttl_enabled = "false"
attribute = [
{
name = "UserId"
type = "S"
},
{
name = "GameTitle"
type = "S"
}
]
},
{
table_name = "devops-test-02",
billing_mode = "PAY_PER_REQUEST",
hash_key = "GameTitle",
ttl_attribute_name = "ttl_attribute_name",
ttl_enabled = "false"
}
]
global_secondary_indexes = [
{
index_name = "TitleIndex"
index_hash_key = "UserId"
index_range_key = "GameTitle"
index_projection_type = "INCLUDE"
index_non_key_attributes = ["Id"]
}
]
default_tags = {
"Environment" = "Dev",
"Owner" = "xxx"
}
resource "aws_dynamodb_table" "basic-dynamodb-table" {
for_each = { for key, value in var.dynamodb_table : key => value }
name = each.value.table_name
billing_mode = each.value.billing_mode
read_capacity = each.value.read_capacity
write_capacity = each.value.write_capacity
hash_key = each.value.hash_key
range_key = each.value.range_key
ttl {
attribute_name = each.value.ttl_attribute_name
enabled = each.value.ttl_enabled
}
dynamic "attribute" {
for_each = { for key, value in var.attributes : key => value }
content {
name = attribute.value.name
type = attribute.value.type
}
}
dynamic "global_secondary_index" {
for_each = var.global_secondary_indexes
content {
name = global_secondary_index.value.index_name
hash_key = global_secondary_index.value.index_hash_key
projection_type = global_secondary_index.value.index_projection_type
range_key = lookup(global_secondary_index.value, "index_range_key", null)
read_capacity = lookup(global_secondary_index.value, "index_read_capacity", null)
write_capacity = lookup(global_secondary_index.value, "index_write_capacity", null)
non_key_attributes = lookup(global_secondary_index.value, "index_non_key_attributes", null)
}
}
tags = merge(
var.default_tags,
{
Name = each.value.table_name
})
}
This code produces the following error:
The given value is not suitable for var.dynamodb_table declared at variable.tf:6,1-26: element 0: attribute │ "attribute": object required

You did not share your attributes variable but I have used attributes in dynamodb_table variable.
Your main problem is attribute property in dynamodb_table variable is requeired but you did not provide any value for it in devops-test-02 table values.
variables.tf
variable "dynamodb_table" {
type = list(object({
table_name = string
billing_mode = string
// read_capacity = optional(number)
//write_capacity = optional(string)
hash_key = string
ttl_attribute_name = string
ttl_enabled = string
//range_key = optional(string)
attribute = list(object({
name = string
type = string
}))
}))
default = [
{
table_name = "devops-test-01",
billing_mode = "PAY_PER_REQUEST",
hash_key = "UserId",
range_key = "GameTitle",
ttl_attribute_name = "ttl_attribute_name",
ttl_enabled = "false"
attribute = [
{
name = "UserId"
type = "S"
},
{
name = "GameTitle"
type = "S"
}
]
},
{
table_name = "devops-test-02",
billing_mode = "PAY_PER_REQUEST",
hash_key = "GameTitle",
ttl_attribute_name = "ttl_attribute_name",
ttl_enabled = "false"
attribute = [
{
name = "UserId"
type = "S"
},
{
name = "GameTitle"
type = "S"
}
]
}
]
}
variable "global_secondary_indexes" {
description = "Describe a GSI for the table; subject to the normal limits on the number of GSIs, projected attributes, etc."
type = list(object({
index_name = string
index_projection_type = string
index_range_key = string
index_hash_key = string
//index_write_capacity = optional(string)
//index_read_capacity = optional(string)
index_non_key_attributes = list(string)
}))
default = [
{
index_name = "TitleIndex"
index_hash_key = "UserId"
index_range_key = "GameTitle"
index_projection_type = "INCLUDE"
index_non_key_attributes = ["Id"]
}
]
}
variable "default_tags" {
default = {
"Environment" = "Dev",
"Owner" = "xxx"
}
}
dynamodb.tf
resource "aws_dynamodb_table" "basic-dynamodb-table" {
for_each = { for key, value in var.dynamodb_table : value.table_name => value }
name = each.value.table_name
billing_mode = each.value.billing_mode
read_capacity = lookup(each.value, "read_capacity", null)
write_capacity = lookup(each.value, "write_capacity", null)
hash_key = each.value.hash_key
range_key = lookup(each.value, "range_key", null)
ttl {
attribute_name = each.value.ttl_attribute_name
enabled = each.value.ttl_enabled
}
dynamic "attribute" {
for_each = { for key, value in each.value.attribute : key => value }
content {
name = attribute.value.name
type = attribute.value.type
}
}
dynamic "global_secondary_index" {
for_each = var.global_secondary_indexes
content {
name = global_secondary_index.value.index_name
hash_key = global_secondary_index.value.index_hash_key
projection_type = global_secondary_index.value.index_projection_type
range_key = lookup(global_secondary_index.value, "index_range_key", null)
read_capacity = lookup(global_secondary_index.value, "index_read_capacity", null)
write_capacity = lookup(global_secondary_index.value, "index_write_capacity", null)
non_key_attributes = lookup(global_secondary_index.value, "index_non_key_attributes", null)
}
}
tags = merge(
var.default_tags,
{
Name = each.value.table_name
})
}
UPDATE 2023-01-17
Add Kinesis streaming destination resource to dynamodb tables.
resource "aws_kinesis_stream" "example" {
for_each = aws_dynamodb_table.basic-dynamodb-table
name = "${each.key}_table_stream"
shard_count = 1
}
resource "aws_dynamodb_kinesis_streaming_destination" "example" {
for_each = aws_dynamodb_table.basic-dynamodb-table
stream_arn = aws_kinesis_stream.example[each.key].arn
table_name = each.key
}

Related

for_each loop with dynamic block and values from tfvars

I'm trying to create certain BigQuery tables with time_partitioning with the dynamic block and I want to use the values from tfvars in runtime as follows:
./tables/tables.tf:
resource "google_bigquery_table" "tables" {
for_each = var.tables == [] ? [] : toset(var.tables)
dataset_id = var.db_id
deletion_protection = false
table_id = each.key
dynamic "time_partitioning" {
for_each = var.partitioned_tables
content {
type = "DAY"
field = time_partitioning.value.field
}
}
labels = {
environment = var.environment
application = var.application
}
schema = fileexists("${path.module}/${var.db_id}/${each.key}.json") ? file("${path.module}/${var.db_id}/${each.key}.json") : null
}
main.tf:
resource "google_bigquery_dataset" "database" {
count = length(var.dbs)
dataset_id = var.dbs[count.index].db_id
friendly_name = var.dbs[count.index].db_name
description = "TF"
location = "US"
delete_contents_on_destroy = var.delete_contents_on_destroy
labels = {
environment = var.environment
application = var.dbs[count.index].app_name
}
}
module "tables" {
source = "./tables"
count = length(var.dbs)
db_id = google_bigquery_dataset.database[count.index].dataset_id
environment = var.environment
application = var.dbs[count.index].app_name
tables = var.dbs[count.index].tables
partitioned_tables = var.dbs[count.index].partitioned_tables
}
module "iam" {
source = "./iam"
count = length(var.dbs)
db_id = google_bigquery_dataset.database[count.index].dataset_id
iam_members = var.dbs[count.index].iam_members
}
dev.tfvars:
region = "us-central1"
project_id = "some-project"
dbs = [
{
db_id = "dataset1"
db_name = "dataset1"
app_name = "hello"
iam_members = [
{
role = "roles/bigquery.dataEditor"
member = "serviceAccount:ser-sa#some-project.iam.gserviceaccount.com",
}
]
tables = ["daily_inventory", "dc_inventory", "products", "daily_sales", "planned_inventory", "stores", "stores_in_program"]
partitioned_tables = [
{
table = "daily_sales"
field = "sales_timestamp"
},
{
table = "daily_inventory"
field = "inventory_timestamp"
}
]
},
{
db_id = "dataset2"
db_name = "dataset2"
app_name = "hello"
iam_members = [
{
role = "roles/bigquery.dataEditor"
member = "serviceAccount:ser-sa#some-project.iam.gserviceaccount.com"
}
]
tables = []
}
]
environment = "development"
delete_contents_on_destroy = true
var.dbs is type = list(any)
Getting:
The given value is not suitable for var.dbs declared at
variables.tf:9,1-15: all list elements must have the same type.
Thanks in advance!
list(any) does not mean that you can have elements of "any" type in your list. All elements must have same type, and you can't mix types, as you do now (i.e. second element is missing partitioned_tables). any only means that TF will infer the single type for the elements, but all elements must be of that single type. So you have three choices:
remove type = list(any)
Fully define your type with optional arguments, instead of using any
Add partitioned_tables to the second element:
[
{
db_id = "dataset1"
db_name = "dataset1"
app_name = "hello"
iam_members = [
{
role = "roles/bigquery.dataEditor"
member = "serviceAccount:ser-sa#some-project.iam.gserviceaccount.com",
}
]
tables = ["daily_inventory", "dc_inventory", "products", "daily_sales", "planned_inventory", "stores", "stores_in_program"]
partitioned_tables = [
{
table = "daily_sales"
field = "sales_timestamp"
},
{
table = "daily_inventory"
field = "inventory_timestamp"
}
]
},
{
db_id = "dataset2"
db_name = "dataset2"
app_name = "hello"
iam_members = [
{
role = "roles/bigquery.dataEditor"
member = "serviceAccount:ser-sa#some-project.iam.gserviceaccount.com"
}
]
partitioned_tables = []
tables = []
}
]

Codepipeline Error using Environment Variables with Terraform

So I am running into an error with AWS Codepipeline:
Error: Error creating CodePipeline: ValidationException:
ActionConfiguration Map value must satisfy constraint: [Member must
have length less than or equal to 1000, Member must have a length
greater than or equal to 1]
Google it tells me that I have too many Pipeline Environment variables. It tells me I have a character limit of 1000 characters. I am not sure what that means, does it mean my values for my Environment variables can not exceed 100 characters or does it mean that the json that makes up the environment variables can't exceed 1000 characters?
Appreciate the help here.
Terraform code as requested:
resource "aws_codepipeline" "cp_plan_pipeline" {
name = "${local.cp_name}-cp"
role_arn = aws_iam_role.cp_service_role.arn
artifact_store {
type = var.cp_artifact_type
location = module.S3.bucket_name
}
stage {
name = "Initialize"
action {
run_order = 1
name = "Source"
category = "Source"
owner = "AWS"
provider = "CodeCommit"
version = "1"
input_artifacts = []
output_artifacts = ["CodeWorkspace"]
configuration = {
RepositoryName = var.cp_repo_name
BranchName = var.cp_branch_name
PollForSourceChanges = var.cp_poll_sources
OutputArtifactFormat = var.cp_ouput_format
}
}
}
stage {
name = "Build"
action {
run_order = 1
name = "Combine_Binaries"
category = "Build"
owner = "AWS"
provider = "CodeBuild"
version = "1"
namespace = "BINARYVARIABLE"
input_artifacts = ["CodeWorkspace"]
output_artifacts = ["CodeSource"]
configuration = {
ProjectName = var.cp_binary_project_name
EnvironmentVariables = jsonencode([
{
name = "PIPELINE_EXECUTION_ID"
type = "PLAINTEXT"
value = "#{codepipeline.PipelineExecutionId}"
},
{
name = "PL_BUCKET_KEY"
type = "PLAINTEXT"
value = "global/state/${var.bucketlocation}/"
},
{
name = "PL_DYNAMODB_TABLE_NAME"
type = "PLAINTEXT"
value = "${var.project}-${var.env}-${var.tenant}-db-${var.bucketlocation}"
},
{
name = "PL_JQ_VERSION"
type = "PLAINTEXT"
value = var.JQ_VER
},
{
name = "PL_PY_VERSION"
type = "PLAINTEXT"
value = var.PY_VER
},
{
name = "PL_GO_VERSION"
type = "PLAINTEXT"
value = var.TF_VER
},
{
name = "PL_TF_VERSION"
type = "PLAINTEXT"
value = var.TF_VER
},
{
name = "PL_GROUP_NAME"
type = "PLAINTEXT"
value = var.group_name
},
{
name = "PL_GROUP_EMAIL"
type = "PLAINTEXT"
value = var.group_email
},
{
name = "PL_PROJECT"
type = "PLAINTEXT"
value = var.project
},
{
name = "PL_TENANT"
type = "PLAINTEXT"
value = var.tenant
},
{
name = "PL_APPENV"
type = "PLAINTEXT"
value = ""
},
{
name = "PL_AWSACCOUNTNAME"
type = "PLAINTEXT"
value = ""
},
{
name = "PL_AWSACCOUNTNUMB"
type = "PLAINTEXT"
value = ""
},
{
name = "PL_PERMISSION_SETS_DIR"
type = "PLAINTEXT"
value = ""
},
])
}
}
}
stage {
name = "Code_Validation"
action {
run_order = 1
name = "Build_Lint_Py"
category = "Build"
owner = "AWS"
provider = "CodeBuild"
version = "1"
input_artifacts = ["CodeSource"]
output_artifacts = ["pyReport"]
configuration = {
ProjectName = var.cp_lintpy_project_name
EnvironmentVariables = jsonencode([
{
name = "PIPELINE_EXECUTION_ID"
type = "PLAINTEXT"
value = "#{codepipeline.PipelineExecutionId}"
},
{
name = "PL_PY_VERSION"
type = "PLAINTEXT"
value = "#{BINARYVARIABLE.PL_PY_VERSION}"
},
{
name = "PL_PERMISSION_SETS_DIR"
type = "PLAINTEXT"
value = "#{BINARYVARIABLE.PL_PERMISSION_SETS_DIR}"
},
])
}
}
action {
run_order = 1
name = "Build_TF_Plan"
category = "Build"
owner = "AWS"
provider = "CodeBuild"
version = "1"
input_artifacts = ["CodeSource"]
output_artifacts = ["buildPlan"]
configuration = {
ProjectName = var.cp_build_tf_validate
#PrimarySource = "CodeSource"
EnvironmentVariables = jsonencode([
{
name = "PIPELINE_EXECUTION_ID"
type = "PLAINTEXT"
value = "#{codepipeline.PipelineExecutionId}"
},
{
name = "PL_APP_NAME"
type = "PLAINTEXT"
value = var.bucketlocation
},
{
name = "PL_BUCKET_KEY"
type = "PLAINTEXT"
value = "#{BINARYVARIABLE.PL_BUCKET_KEY}"
},
{
name = "PL_DYNAMODB_TABLE_NAME"
type = "PLAINTEXT"
value = "#{BINARYVARIABLE.PL_DYNAMODB_TABLE_NAME}"
},
{
name = "PL_JQ_VERSION"
type = "PLAINTEXT"
value = "#{BINARYVARIABLE.PL_JQ_VERSION}"
},
{
name = "PL_PY_VERSION"
type = "PLAINTEXT"
value = "#{BINARYVARIABLE.PL_PY_VERSION}"
},
{
name = "PL_TF_VERSION"
type = "PLAINTEXT"
value = "#{BINARYVARIABLE.PL_TF_VERSION}"
},
{
name = "PL_GROUP_NAME"
type = "PLAINTEXT"
value = "#{BINARYVARIABLE.PL_GROUP_NAME}"
},
{
name = "PL_GROUP_EMAIL"
type = "PLAINTEXT"
value = "#{BINARYVARIABLE.PL_GROUP_EMAIL}"
},
{
name = "PL_PROJECT"
type = "PLAINTEXT"
value = "#{BINARYVARIABLE.PL_PROJECT}"
},
{
name = "PL_TENANT"
type = "PLAINTEXT"
value = "#{BINARYVARIABLE.PL_TENANT}"
},
{
name = "PL_APPENV"
type = "PLAINTEXT"
value = "#{BINARYVARIABLE.PL_APPENV}"
},
{
name = "PL_AWSACCOUNTNUMB"
type = "PLAINTEXT"
value = "#{BINARYVARIABLE.PL_AWSACCOUNTNUMB}"
},
{
name = "PL_PERMISSION_SETS_DIR"
type = "PLAINTEXT"
value = "#{BINARYVARIABLE.PL_PERMISSION_SETS_DIR}"
},
])
}
}
action {
run_order = 1
name = "Build_Lint_TF"
category = "Build"
owner = "AWS"
provider = "CodeBuild"
version = "1"
input_artifacts = ["CodeSource"]
output_artifacts = ["tfReport"]
configuration = {
ProjectName = var.cp_linttf_project_name
#PrimarySource = "CodeSource"
EnvironmentVariables = jsonencode([
{
name = "PIPELINE_EXECUTION_ID"
type = "PLAINTEXT"
value = "#{codepipeline.PipelineExecutionId}"
},
{
name = "PL_BUCKET_KEY"
type = "PLAINTEXT"
value = "#{BINARYVARIABLE.PL_BUCKET_KEY}"
},
{
name = "PL_DYNAMODB_TABLE_NAME"
type = "PLAINTEXT"
value = "#{BINARYVARIABLE.PL_DYNAMODB_TABLE_NAME}"
},
{
name = "PL_TF_VERSION"
type = "PLAINTEXT"
value = var.TF_VER
},
{
name = "PL_TF_LINT_VERSION"
type = "PLAINTEXT"
value = "#{BINARYVARIABLE.PL_TF_LINT_VERSION}"
},
{
name = "PL_PERMISSION_SETS_DIR"
type = "PLAINTEXT"
value = "#{BINARYVARIABLE.PL_PERMISSION_SETS_DIR}"
},
])
}
}
}
stage {
name = "Test"
action {
run_order = 1
name = "Static_Analysis_Py"
category = "Test"
owner = "AWS"
provider = "CodeBuild"
version = "1"
input_artifacts = ["CodeSource"]
output_artifacts = ["pySecReport"]
configuration = {
ProjectName = var.cp_test_static_py
PrimarySource = "CodeSource"
EnvironmentVariables = jsonencode([
{
name = "PIPELINE_EXECUTION_ID"
type = "PLAINTEXT"
value = "#{codepipeline.PipelineExecutionId}"
},
{
name = "PL_JQ_VERSION"
type = "PLAINTEXT"
value = "#{BINARYVARIABLE.PL_JQ_VERSION}"
},
{
name = "PL_PY_VERSION"
type = "PLAINTEXT"
value = "#{BINARYVARIABLE.PL_PY_VERSION}"
},
{
name = "PL_PERMISSION_SETS_DIR"
type = "PLAINTEXT"
value = "#{BINARYVARIABLE.PL_PERMISSION_SETS_DIR}"
},
])
}
}
action {
run_order = 1
name = "Static_Analysis_TFSec"
category = "Test"
owner = "AWS"
provider = "CodeBuild"
version = "1"
namespace = "TESTVARIABLE"
input_artifacts = ["CodeSource"]
output_artifacts = ["tfSecReport"]
configuration = {
ProjectName = var.cp_test_static_tf
#PrimarySource = "CodeSource"
EnvironmentVariables = jsonencode([
{
name = "PIPELINE_EXECUTION_ID"
type = "PLAINTEXT"
value = "#{codepipeline.PipelineExecutionId}"
},
{
name = "PL_JQ_VERSION"
type = "PLAINTEXT"
value = "#{BINARYVARIABLE.PL_JQ_VERSION}"
},
{
name = "PL_TFSEC_VERSION"
type = "PLAINTEXT"
value = "#{BINARYVARIABLE.PL_TFSEC_VERSION}"
},
{
name = "PL_PERMISSION_SETS_DIR"
type = "PLAINTEXT"
value = "#{BINARYVARIABLE.PL_PERMISSION_SETS_DIR}"
},
#{
# name = "PL_ARTIFACTBUCKET"
# type = "PLAINTEXT"
# value = "${var.project}-${var.env}-${var.tenant}-${var.cp_name}-cp-artifacts"
#},
#{
# name = "PL_TFSECAPPROVALLINK"
# type = "PLAINTEXT"
# value = ""
#},
])
}
}
}
stage {
name = "Manual_Approval_Action"
action {
run_order = 1
name = "Manual_Review_Action-${var.project}-${var.env}-${var.tenant}-${var.cp_name}"
category = "Approval"
owner = "AWS"
provider = "Manual"
version = "1"
input_artifacts = []
output_artifacts = []
configuration = {
NotificationArn = module.sns_cp.op_sns_topic_arn
CustomData = "Please review the static code analysis and the repoistory before code is deployed."
}
}
}
stage {
name = "Deploy"
action {
run_order = 1
name = "Terraform-Apply"
category = "Build"
owner = "AWS"
provider = "CodeBuild"
input_artifacts = ["CodeSource","buildPlan"]
output_artifacts = []
version = "1"
configuration = {
ProjectName = var.cp_apply_project_name
PrimarySource = "CodeSource"
EnvironmentVariables = jsonencode([
{
name = "PIPELINE_EXECUTION_ID"
value = "#{codepipeline.PipelineExecutionId}"
type = "PLAINTEXT"
},
{
name = "PL_PERMISSION_SETS_DIR"
type = "PLAINTEXT"
value = "#{BINARYVARIABLE.PL_PERMISSION_SETS_DIR}"
},
{
name = "PL_BUCKET_KEY"
type = "PLAINTEXT"
value = "#{BINARYVARIABLE.PL_BUCKET_KEY}"
},
{
name = "PL_DYNAMODB_TABLE_NAME"
type = "PLAINTEXT"
value = "#{BINARYVARIABLE.PL_DYNAMODB_TABLE_NAME}"
},
{
name = "PL_TF_VERSION"
type = "PLAINTEXT"
value = "#{BINARYVARIABLE.PL_TF_VERSION}"
},
{
name = "PL_GROUP_NAME"
type = "PLAINTEXT"
value = "#{BINARYVARIABLE.PL_GROUP_NAME}"
},
{
name = "PL_GROUP_EMAIL"
type = "PLAINTEXT"
value = "#{BINARYVARIABLE.PL_GROUP_EMAIL}"
},
{
name = "PL_PROJECT"
type = "PLAINTEXT"
value = "#{BINARYVARIABLE.PL_PROJECT}"
},
{
name = "PL_TENANT"
type = "PLAINTEXT"
value = "#{BINARYVARIABLE.PL_TENANT}"
},
{
name = "PL_APPENV"
type = "PLAINTEXT"
value = "#{BINARYVARIABLE.PL_APPENV}"
},
])
}
}
}
}
Okay, after days of looking into this my colleague, who gets all the credit figured out what the 1000 character limit is. So keep in mind this is 1000 characters per stage. So without confirmation from Hashicorp, what we came up with is the following:
If you want to open the state file in a text editor, make sure you are viewing the file and not modifying it. Inside the state file search f,or "EnvironmentVariables" You will find a JSON syntax, the example shown below of the output.
"EnvironmentVariables": "[{\"name\":\"PIPELINE_EXECUTION_ID\",\"type\":\"PLAINTEXT\",\"value\":\"#{codepipeline.PipelineExecutionId}\"},{\"name\":\"PL_APP_NAME\",\"type\":\"PLAINTEXT\",\"value\":\"deploy_pl\"},{\"name\":\"PL_BUCKET_KEY\",\"type\":\"PLAINTEXT\",\"value\":\"#{BIN.PL_BUCKET_KEY}\"},{\"name\":\"PL_DYNAMODB_TABLE_NAME\",\"type\":\"PLAINTEXT\",\"value\":\"#{BIN.PL_DYNAMODB_TABLE_NAME}\"},{\"name\":\"PL_GROUP_NAME\",\"type\":\"PLAINTEXT\",\"value\":\"#{BIN.PL_GROUP_NAME}\"},{\"name\":\"PL_GROUP_EMAIL\",\"type\":\"PLAINTEXT\",\"value\":\"#{BIN.PL_GROUP_EMAIL}\"},{\"name\":\"PL_PROJECT\",\"type\":\"PLAINTEXT\",\"value\":\"#{BIN.PL_PROJECT}\"},{\"name\":\"PL_TENANT\",\"type\":\"PLAINTEXT\",\"value\":\"#{BIN.PL_TENANT}\"},{\"name\":\"PL_APPENV\",\"type\":\"PLAINTEXT\",\"value\":\"#{BIN.PL_APPENV}\"},{\"name\":\"PL_ACCT_NUMB\",\"type\":\"PLAINTEXT\",\"value\":\"#{BIN.PL_ACCT_NUMB}\"},{\"name\":\"PL_PERMISSION_SETS_DIR\",\"type\":\"PLAINTEXT\",\"value\":\"#{BIN.PL_PERMISSION_SETS_DIR}\"},{\"name\":\"PL_IS_MGMT_ACCT\",\"type\":\"PLAINTEXT\",\"value\":\"#{BIN.PL_IS_MGMT_ACCT}\"}]",
If you remove "EnvironmentVariables": and the " \ " that gives you a count of the characters within the environment variable section. It has allowed me to rename and refactor my variables accurately.
So advise going forward:
keep namespaces to four or fewer characters
keep variables short to save space
only use variables in the stage where appropriate

using a single Dynamodb resource in terraform and create 3 tables with different names

I have a used case where,
I need to create 3 dynamodb tables but only the naming convention changes
resource "aws_dynamodb_table" "GB_SKU_COLOR" {
name = "GB_SKU_COLOR_${var.stage}"
billing_mode = "PAY_PER_REQUEST"
hash_key = "PRODUCT_ID"
stream_enabled = true
stream_view_type = "NEW_AND_OLD_IMAGES"
attribute {
name = "PRODUCT_ID"
type = "S"
}
}
I need to create the same table with 2 different names, like MN_SKU_COLOR and CH_SKU_COLOR
Currently, am replicating the resource and giving a new name
resource "aws_dynamodb_table" "MN_SKU_COLOR" {
name = "MN_SKU_COLOR_${var.stage}"
billing_mode = "PAY_PER_REQUEST"
hash_key = "PRODUCT_ID"
stream_enabled = true
stream_view_type = "NEW_AND_OLD_IMAGES"
attribute {
name = "PRODUCT_ID"
type = "S"
}
}
resource "aws_dynamodb_table" "CH_SKU_COLOR" {
name = "CH_SKU_COLOR_${var.stage}"
billing_mode = "PAY_PER_REQUEST"
hash_key = "PRODUCT_ID"
stream_enabled = true
stream_view_type = "NEW_AND_OLD_IMAGES"
attribute {
name = "PRODUCT_ID"
type = "S"
}
}
What is the best way to create the resource 3 times without replicating the code ?
If the only difference is in name, then you can do:
variable "names" {
default = ["MN_SKU_COLOR", "GB_SKU_COLOR", "CH_SKU_COLOR"]
}
resource "aws_dynamodb_table" "table" {
for_each = toset(var.names)
name = "${each.key}_${var.stage}"
billing_mode = "PAY_PER_REQUEST"
hash_key = "PRODUCT_ID"
stream_enabled = true
stream_view_type = "NEW_AND_OLD_IMAGES"
attribute {
name = "PRODUCT_ID"
type = "S"
}
}
Then you refer to the individual tables using their name from variabile, e.g.
aws_dynamodb_table.table["MN_SKU_COLOR"].id
Use for_each along with a set or list of names: https://www.terraform.io/language/meta-arguments/for_each

flatten object made of nested list in terraform

I have the following variable that I try to parse :
variable.tf
variable "rbac_roles" {
type = object(
{
view = list(object({
group_name = string,
group_id = string,
namespaces = list(string)
})),
edit = list(object({
group_name = string,
group_id = string,
namespaces = list(string)
})),
admin = list(object({
group_name = string,
group_id = string,
namespaces = list(string)
}))
}
)
}
variable.tfvars
rbac_roles = {
view = [
{
group_name = "group1",
group_id = "123",
namespaces = ["default", "namespace1"]
},
{
group_name = "group2",
group_id = "456",
namespaces = ["namespace2"]
}
],
edit = [
{
group_name = "group1",
group_id = "123",
namespaces = ["namespace2"]
}
],
admin = [
{
group_name = "group3",
group_id = "789",
namespaces = ["default, namespace1, namespace2"]
},
]
}
I try to create the following resources :
resource "kubernetes_role_binding" "view_cluster_role_binding" {
metadata {
name = ${group}-${namespace}-viewer-binding
namespace = ${namespace}
}
role_ref {
api_group = "rbac.authorization.k8s.io"
kind = "Role"
name = "view"
}
subject {
kind = "Group"
name = ${group}
api_group = "rbac.authorization.k8s.io"
}
}
resource "kubernetes_role_binding" "edit_cluster_role_binding" {
metadata {
name = ${group}-${namespace}-viewer-binding
namespace = ${namespace}
}
role_ref {
api_group = "rbac.authorization.k8s.io"
kind = "Role"
name = "edit"
}
subject {
kind = "Group"
name = ${group}
api_group = "rbac.authorization.k8s.io"
}
}
resource "kubernetes_role_binding" "admin_cluster_role_binding" {
metadata {
name = ${group}-${namespace}-viewer-binding
namespace = ${namespace}
}
role_ref {
api_group = "rbac.authorization.k8s.io"
kind = "Role"
name = "admin"
}
subject {
kind = "Group"
name = ${group}
api_group = "rbac.authorization.k8s.io"
}
}
So far I have tried to flatten() my list and to loop over it with for and foreach but I haven't been successful yet. From what I understand I need to use a locals{} to reach my goal, but I can't get the right syntax. Any help will be appreciated !
First this is wrong ["default, namespace1, namespace2"]. It should be ["default", "namespace1", "namespace2"]. Once you fix that, you can flatten your data structure as follows:
locals {
flat_rbac_roles = merge([
for role, groups in var.rbac_roles:
merge([
for group_idx, group in groups:
{
for namespace_idx, namespace in group["namespaces"]:
"${role}-${group_idx}-${namespace_idx}" => {
role_name = role
group_name = group["group_name"]
group_id = group["group_id"]
namespace = namespace
}
}
]...)
]...)
}
which gives:
{
"admin-0-0" = {
"group_id" = "789"
"group_name" = "group3"
"namespace" = "default"
"role_name" = "admin"
}
"admin-0-1" = {
"group_id" = "789"
"group_name" = "group3"
"namespace" = "namespace1"
"role_name" = "admin"
}
"admin-0-2" = {
"group_id" = "789"
"group_name" = "group3"
"namespace" = "namespace2"
"role_name" = "admin"
}
"edit-0-0" = {
"group_id" = "123"
"group_name" = "group1"
"namespace" = "namespace2"
"role_name" = "edit"
}
"view-0-0" = {
"group_id" = "123"
"group_name" = "group1"
"namespace" = "default"
"role_name" = "view"
}
"view-0-1" = {
"group_id" = "123"
"group_name" = "group1"
"namespace" = "namespace1"
"role_name" = "view"
}
"view-1-0" = {
"group_id" = "456"
"group_name" = "group2"
"namespace" = "namespace2"
"role_name" = "view"
}
}
Using the flatten() approach :
resource "kubernetes_role_binding" "default_roles_binding" {
for_each = {
for binding in flatten([
for role_name, groups in var.rbac_roles : [
for group in groups : [
for ns in group.namespaces : [
{
binding_name = lower("${ns}-${group.group_name}-${role_name}")
role = role_name
group_id = group.group_id
group_name = group.group_name
ns = ns
}
]
]
]]) : binding.binding_name => binding }
metadata {
namespace = each.value.ns
name = each.value.binding_name
annotations = { "group_name" : each.value.group_name }
}
role_ref {
api_group = "rbac.authorization.k8s.io"
kind = "Role"
name = each.value.role
}
subject {
kind = "Group"
name = each.value.group_id
}
depends_on = [
azurerm_kubernetes_cluster.aks
]
}

Terraform dynamodb error - all attributes must be indexed

I am trying to create a simple dynamodb table using following reource modules of terraform.
Get the following error while running terraform:
All attributes must be indexed. Unused attributes: ["pactitle" "ipadress" "Timestamp"].
why do we need to index all attributes ?
How to solve this ?
resource "aws_dynamodb_table" "this" {
count = var.create_table ? 1 : 0
name = var.name
billing_mode = var.billing_mode
hash_key = var.hash_key
range_key = var.range_key
read_capacity = var.read_capacity
write_capacity = var.write_capacity
//stream_enabled = var.stream_enabled
//stream_view_type = var.stream_view_type
dynamic "attribute" {
for_each = var.attributes
content {
name = attribute.value.name
type = attribute.value.type
}
}
server_side_encryption {
enabled = var.server_side_encryption_enabled
kms_key_arn = var.server_side_encryption_kms_key_arn
}
tags = merge(
var.tags,
{
"Name" = format("%s", var.name)
},
)
timeouts {
create = lookup(var.timeouts, "create", null)
delete = lookup(var.timeouts, "delete", null)
update = lookup(var.timeouts, "update", null)
}
}
calling module
module "dynamodb_table" {
source = "./../../../modules/dynamodb"
name = "pack-audit-cert"
hash_key = "id"
create_table= true
read_capacity=5
write_capacity=5
billing_mode = "PROVISIONED"
range_key = "pacid"
attributes = [
{
name = "id"
type = "N"
},
{
name = "pacid"
type = "S"
},
{
name = "pactitle"
type = "S"
},
{
name = "ipadress"
type = "S"
},
{
name = "Timestamp"
type = "S"
}
]
}
Thank you
That error message is a bit misleading. You should only define the indexed attributes when you are creating the table. Since DynamoDB is a schemaless database, it doesn't care about the other attributes at table creation time.