I need to create TF CloudWatch Metrices only of the env is QAT and PROD. Currently using TF modules to create those in all env.
module "aws_cloudwatch_log_metric_filter" {
source = "https://github.com/modules.git//aws-cloudwatch-log-metric-filter"
log_group_name = "/aws/lambda/${var.lambda_name}"
pattern = "{$.message = \"---------------- Message ----------------\"}"
}
locals {
base_tags = {
environment = var.environment
}
}
Main resource where I am calling module.
resource "aws_cloudwatch_log_metric_filter" "log_metric" {
count = var.count
name = "Metric"
pattern = var.pattern
log_group_name = var.log_group_name
metric_transformation {
name = "name"
namespace = "namespace"
value = "1"
default_value = "0"
}
}
You could do this in the module:
resource "aws_cloudwatch_log_metric_filter" "log_metric" {
count = var.environment == "QAT" || var.environment == "PROD" ? 1 : 0
name = "Metric"
pattern = var.pattern
log_group_name = "${var.environment}-${var.log_group_name}"
metric_transformation {
name = "name"
namespace = "namespace"
value = "1"
default_value = "0"
}
}
The log_group_name = "${var.environment}-${var.log_group_name}" will ensure you don't get issues with the same log group name.
A simple but good solution :
locals {
cw_env = ["QUA", "PROD"]
}
resource "aws_cloudwatch_log_metric_filter" "log_metric" {
count = contains(local.cw_env, var.env) ? 1 : 0
name = "Metric"
pattern = var.pattern
log_group_name = var.log_group_name
metric_transformation {
name = "name"
namespace = "namespace"
value = "1"
default_value = "0"
}
}
Note you can also use it at the above level :
module "aws_cloudwatch_log_metric_filter" {
source = "https://github.com/modules.git//aws-cloudwatch-log-metric-filter"
count = contains(local.cw_env, local.base_tags.environment) ? 1 : 0
log_group_name = "/aws/lambda/${var.lambda_name}"
pattern = "{$.message = \"---------------- Message ----------------\"}"
}
locals {
base_tags = {
environment = var.environment
}
cw_env = ["QUA", "PROD"]
}
Related
Im trying to add retention policy but I want to enable it conditionally, as you can see from the code
buckets.tf
locals {
team_buckets = {
arc = { app_id = "20390", num_buckets = 2, retention_period = null }
ana = { app_id = "25402", num_buckets = 2, retention_period = 631139040 }
cha = { app_id = "20391", num_buckets = 2, retention_period = 631139040 } #20 year
}
}
module "team_bucket" {
source = "../../../../modules/gcs_bucket"
for_each = {
for bucket in flatten([
for product_name, bucket_info in local.team_buckets : [
for i in range(bucket_info.num_buckets) : {
name = format("%s-%02d", product_name, i + 1)
team = "ei_${product_name}"
app_id = bucket_info.app_id
retention_period = bucket_info.retention_period
}
]
]) : bucket.name => bucket
}
project_id = var.project
name = "teambucket-${each.value.name}"
app_id = each.value.app_id
team = each.value.team
retention_period = each.value.retention_period
}
root module is defined as follows
main.tf
resource "google_storage_bucket" "bucket" {
project = var.project_id
name = "${var.project_id}-${var.name}"
location = var.location
labels = {
app_id = var.app_id
ei_team = var.team
cost_center = var.cost_center
}
uniform_bucket_level_access = var.uniform_bucket_level_access
dynamic "retention_policy" {
for_each = var.retention_policy == null ? [] : [var.retention_period]
content {
retention_period = var.retention_period
}
}
}
but I can't seem to make the code pick up the value,
for example as you see below the value doesn't get implemented
~ resource "google_storage_bucket" "bucket" {
id = "teambucket-cha-02"
name = "teambucket-cha-02"
# (11 unchanged attributes hidden)
- retention_policy {
- is_locked = false -> null
- retention_period = 3155760000 -> null
}
}
variables.tf for retention policy is as follows
variable "retention_policy" {
description = "Configuation of the bucket's data retention policy for how long objects in the bucket should be retained"
type = any
default = null
}
variable "retention_period" {
default = null
}
Your var.retention_policy is always null, as its default value. You are not changing the default value at all. Probably you wanted the following:
for_each = var.retention_period == null ? [] : [var.retention_period]
instead of
for_each = var.retention_policy == null ? [] : [var.retention_period]
My problem is that I can't dynamically connect the created disks to the vps. The google_compute_disk_attach module cannot be used
Here is my code
What is the correct way in this situation?
resource "google_compute_instance" "vps" {
name = var.server_name
description = var.server_description
machine_type = var.server_type
zone = var.server_datacenter
deletion_protection = var.server_delete_protection
labels = var.server_labels
metadata = var.server_metadata
tags = var.server_tags
boot_disk {
auto_delete = false
initialize_params {
size = var.boot_volume_size
type = var.boot_volume_type
image = var.boot_volume_image
labels = var.boot_volume_labels
}
}
dynamic "attached_disk" {
for_each = { for vol in var.volumes : vol.volume_name => vol }
content {
source = element(var.volumes[*].volume_name, 0)
}
}
network_interface {
subnetwork = var.server_network
access_config {
nat_ip = google_compute_address.static_ip.address
}
}
resource "google_compute_disk" "volume" {
for_each = { for vol in var.volumes : vol.volume_name => vol }
name = each.value.volume_name
type = each.value.volume_type
size = each.value.volume_size
zone = var.server_datacenter
labels = each.value.volume_labels
}
volumes variables
volumes = [{
volume_name = "v3-postgres-saga-import-test-storage"
volume_size = "40"
volume_type = "pd-ssd"
volume_labels = {
environment = "production"
project = "v3"
type = "storage"
}
}, {
volume_name = "volume-vpstest2"
volume_size = "20"
volume_type = "pd-ssd"
volume_labels = {
environment = "production"
project = "v2"
type = "storage"
}
}]
if do something like that - error
source = google_compute_disk.volume[*].self_link
This object does not have an attribute named "self_link".
Since you've used for_each in google_compute_disk.volume, it will be a map, not a list. Thus you can list all self_link as follows:
source = values(google_compute_disk.volume)[*].self_link
You can also use the volume variable directly as map instead of Array :
variables.tf file :
variable "volumes" {
default = {
postgres_saga = {
volume_name = "v3-postgres-saga-import-test-storage"
volume_size = "40"
volume_type = "pd-ssd"
volume_labels = {
environment = "production"
project = "v3"
type = "storage"
}
},
volume_vpstest2 = {
volume_name = "volume-vpstest2"
volume_size = "20"
volume_type = "pd-ssd"
volume_labels = {
environment = "production"
project = "v2"
type = "storage"
}
}
}
}
Instead of variable, you can also use a local variable from json configuration. Example of structure of Terraform module :
project
module
main.tf
locals.tf
resource
volumes.json
volumes.json file
{
"volumes": {
"postgres_saga" : {
"volume_name" : "v3-postgres-saga-import-test-storage"
"volume_size" : "40"
"volume_type" : "pd-ssd"
"volume_labels" : {
"environment" : "production"
"project" : "v3"
"type" : "storage"
}
},
"volume_vpstest2" : {
"volume_name" : "volume-vpstest2"
"volume_size" : "20"
"volume_type" : "pd-ssd"
"volume_labels" : {
"environment" : "production"
"project" : "v2"
"type" : "storage"
}
}
}
}
locals.tf file
locals {
tables = jsondecode(file("${path.module}/resource/volumes.json"))["volumes"]
}
main.tf file :
resource "google_compute_instance" "vps" {
name = var.server_name
description = var.server_description
machine_type = var.server_type
zone = var.server_datacenter
deletion_protection = var.server_delete_protection
labels = var.server_labels
metadata = var.server_metadata
tags = var.server_tags
boot_disk {
auto_delete = false
initialize_params {
size = var.boot_volume_size
type = var.boot_volume_type
image = var.boot_volume_image
labels = var.boot_volume_labels
}
}
dynamic "attached_disk" {
for_each = [
var.volumes
# local.volumes
]
content {
source = attached_disk.value["volume_name"]
}
}
network_interface {
subnetwork = var.server_network
access_config {
nat_ip = google_compute_address.static_ip.address
}
}
}
resource "google_compute_disk" "volume" {
for_each = var.volumes
# local.volumes
name = each.value["volume_name"]
type = each.value["volume_type"]
size = each.value["volume_size"]
zone = var.server_datacenter
labels = each.value["volume_labels"]
}
With a map, you can directly use foreach without any transformation on google_compute_disk/volume resource.
You can also use this map in a dynamic bloc.
Can you please help here on how to create iam-user module in terraform to cover 3 type of iam-user scenarios ?
PS: I don't want to create nested directory under modules/iam/iam-user/ to make each iam-user cases separately.
Following are the scenarios:
// Type 1
resource "aws_iam_user" "aws_iam_user_000" {
name = "user-000"
permissions_boundary = data.aws_iam_policy.permission_boundary.arn
}
resource "aws_iam_user_policy_attachment" "aws_iam_user_000" {
policy_arn = aws_iam_policy.s3_iam_policy.arn
user = aws_iam_user.aws_iam_user_000.name
}
// Type 2
resource "aws_iam_user" "aws_iam_user_001" {
path = "/"
for_each = toset(var.user_lists)
name = each.value
force_destroy = true
permissions_boundary = data.aws_iam_policy.permission_boundary.arn
}
resource "aws_iam_group" "aws_iam_group_001" {
name = "group-0001"
}
resource "aws_iam_user_group_membership" "group-membership" {
for_each = toset(var.user_lists)
user = aws_iam_user.aws_iam_user_001[each.value].name
groups = [aws_iam_group.aws_iam_group_001.name]
}
// Type 3
resource "aws_iam_user" "aws_iam_user_0002" {
name = "user-002"
tags = { "user_type" = "admin_account" }
permissions_boundary = data.aws_iam_policy.permission_boundary.arn
}
If I understand you correctly, you should be able to accomplish this using count and for_each with variables as below.
variables.tf
variable "is_admin" {
type = bool
default = false
}
variable "user_lists" {
type = list(any)
default = null
}
main.tf
// Type 1 and Type 3
resource "aws_iam_user" "this" {
count = var.user_lists == null ? 1 : 0
name = var.is_admin ? "user-000" : "user-002"
permissions_boundary = data.aws_iam_policy.permission_boundary.arn
tags = var.is_admin ? { "user_type" = "admin_account" } : null
}
resource "aws_iam_user_policy_attachment" "this" {
count = var.user_lists == null ? 1 : 0
policy_arn = aws_iam_policy.s3_iam_policy.arn
user = aws_iam_user.this[0].name
}
// Type 2
resource "aws_iam_user" "from_list" {
for_each = var.user_lists != null ? toset(var.user_lists) : []
path = "/"
name = each.value
force_destroy = true
permissions_boundary = data.aws_iam_policy.permission_boundary.arn
}
resource "aws_iam_group" "from_list" {
count = var.user_lists == null ? 1 : 0
name = "group-0001"
}
resource "aws_iam_user_group_membership" "this" {
for_each = var.user_lists != null ? toset(var.user_lists) : []
user = aws_iam_user.from_list[each.value].name
groups = [aws_iam_group.from_list[0].name]
}
TF project:
main.tf
inputs.tf
The contents are:
main.tf
locals {
common_tags = {
SECRET_MGR_HOST = "${var.SECRET_MGR_HOST}",
SECRET_MGR_SAFE = "${var.SECRET_MGR_SAFE}",
SECRET_MGR_SECRET_KEY_NAME = "${var.SECRET_MGR_SECRET_KEY_NAME}",
SECRET_MGR_USER_NAME = "${var.SECRET_MGR_USER_NAME}",
LOGON_URL = "${var.LOGON_URL}",
PLATFORM_SECRET_NAME = "${var.PLATFORM_SECRET_NAME}"
}
vpc_config_vars = {
subnet_ids = "${var.SUBNET_IDS}",
security_group_ids = "${var.SECURITY_GROUP_IDS}"
}
}
module "lambda" {
source = "git::https://corpsource.io/corp-cloud-platform-team/corpcloudv2/terraform/lambda-modules.git?ref=dev"
lambda_name = var.name
lambda_role = "arn:aws:iam::${var.ACCOUNT}:role/${var.lambda_role}"
lambda_handler = var.handler
lambda_runtime = var.runtime
default_lambda_timeout = var.timeout
ACCOUNT = var.ACCOUNT
vpc_config_vars = merge(
local.vpc_config_vars
)
env = merge(
local.common_tags,
{ DEFAULT_ROLE = "corp-platform" }
)
}
module "lambda_iam" {
source = "git::https://corpsource.io/corp-cloud-platform-team/corpcloudv2/terraform/iam-modules/lambda-iam.git?ref=dev"
lambda_policy = var.lambda_policy
ACCOUNT = var.ACCOUNT
lambda_role = var.lambda_role
}
and inputs.tf
variable "handler" {
type = string
default = "handler.lambda_handler"
}
variable "runtime" {
type = string
default = "python3.8"
}
variable "name" {
type = string
default = "create-SECRET_MGR-entry"
}
variable "timeout"{
type = string
default = "120"
}
variable "lambda_role" {
type = string
default = "create-SECRET_MGR-entry-role"
}
variable "ACCOUNT" {
type = string
default = ""
}
variable "SECRET_MGR_HOST" {
type = string
default = ""
}
variable "SECRET_MGR_SAFE" {
type = string
default = ""
}
variable "SUBNET_IDS" {
type = string
default = ""
}
variable "subnet_ids" {
type = string
default = ""
}
variable "security_group_ids" {
type = string
default = ""
}
variable "SECURITY_GROUP_IDS" {
type = string
default = ""
}
variable "SECRET_MGR_SECRET_KEY_NAME" {
type = string
default = ""
}
variable "SECRET_MGR_USER_NAME" {
type = string
default = ""
}
variable "LOGON_URL" {
type = string
default = ""
}
variable "PLATFORM_SECRET_NAME" {
type = string
default = ""
}
variable "lambda_policy" {
default = "{\"Version\": \"2012-10-17\",\"Statement\": [{\"Sid\":\"VisualEditor0\",\"Effect\":\"Allow\",\"Action\":[\"logs:CreateLogStream\",\"logs:CreateLogGroup\"],\"Resource\":\"*\"},{\"Sid\":\"UseKMSKey\",\"Effect\":\"Allow\",\"Action\":\"kms:Decrypt\",\"Resource\":\"*\"},{\"Sid\":\"GetSecret\",\"Effect\":\"Allow\",\"Action\":\"secretsmanager:GetSecretValue\",\"Resource\":\"*\"},{\"Sid\":\"ConnectToVPC\",\"Effect\":\"Allow\",\"Action\":[\"ec2:CreateNetworkInterface\",\"ec2:DescribeNetworkInterfaces\",\"ec2:DeleteNetworkInterface\"],\"Resource\":\"*\"},{\"Sid\":\"VisualEditor1\",\"Effect\":\"Allow\",\"Action\":\"logs:PutLogEvents\",\"Resource\":\"*\"},{\"Effect\": \"Allow\",\"Action\": [\"logs:*\"],\"Resource\": \"arn:aws:logs:*:*:*\"},{\"Effect\": \"Allow\",\"Action\": [\"s3:GetObject\",\"s3:PutObject\"],\"Resource\": \"arn:aws:s3:::*\"}]}"
}
As you see, main.tf references a module in another project referenced via source argument. The structure of the module project is also:
main.tf
inputs.tf
main.tf
data "archive_file" "lambda_handler" {
type = "zip"
output_path = "lambda_package.zip"
source_dir = "lambda_code/"
}
resource "aws_lambda_function" "lambda_function" {
filename = "lambda_package.zip"
function_name = var.lambda_name
role = var.lambda_role
handler = var.lambda_handler
runtime = var.lambda_runtime
memory_size = 256
timeout = var.default_lambda_timeout
source_code_hash = filebase64sha256("lambda_code/lambda_package.zip")
dynamic "vpc_config" {
for_each = length(keys(var.vpc_config_vars)) == 0 ? [] : [true]
content {
variables = var.vpc_config_vars
}
}
dynamic "environment" {
for_each = length(keys(var.env)) == 0 ? [] : [true]
content {
variables = var.env
}
}
}
inputs.tf
variable "lambda_name" {
type = string
}
variable "lambda_runtime" {
type = string
}
variable "lambda_role" {
type = string
}
variable "default_lambda_timeout" {
type = string
}
variable "lambda_handler" {
type = string
}
variable "vpc_config_vars" {
type = map(string)
default = {}
}
variable "env" {
type = map(string)
default = {}
}
variable "tags" {
default = {
blc = "1539"
costcenter = "54111"
itemid = "obfuscated"
owner = "cloudengineer#company.com"
}
}
variable "ACCOUNT" {
type = string
}
Error when my pipeline runs the project:
Error: Missing required argument
(and 7 more similar warnings elsewhere)
on .terraform/modules/lambda/main.tf line 18, in resource "aws_lambda_function" "lambda_function":
18: content {
The argument "subnet_ids" is required, but no definition was found.
Error: Missing required argument
on .terraform/modules/lambda/main.tf line 18, in resource "aws_lambda_function" "lambda_function":
18: content {
The argument "security_group_ids" is required, but no definition was found.
Error: Unsupported argument
on .terraform/modules/lambda/main.tf line 19, in resource "aws_lambda_function" "lambda_function":
19: variables = var.vpc_config_vars
An argument named "variables" is not expected here.
Oh and I'm passing in the value for subnet_ids and security_group_ids as an environment variable using my gitlab ci file. And log statements confirm that those values are defined.
What is wrong? thank you
You need to pass the required arguments for the vpc_config child block, which are subnet_ids and security_group_ids. You cannot use the entire map variable as it is inside the nested content block. You need to use the equals sign "=" to introduce the argument value.
Try the below code snippet
###################
# Root Module
###################
locals {
vpc_config_vars = {
vpc_config = {
subnet_ids = ["subnet-072297c000a32e200"],
security_group_ids = ["sg-05d06431bd25870b4"]
}
}
}
module "lambda" {
source = "./modules"
...
......
vpc_config_vars = local.vpc_config_vars
}
###################
# Child Module
###################
variable "vpc_config_vars" {
default = {}
}
resource "aws_lambda_function" "lambda_function" {
filename = "lambda_package.zip"
function_name = var.lambda_name
role = var.lambda_role
handler = var.lambda_handler
runtime = var.lambda_runtime
memory_size = 256
timeout = var.default_lambda_timeout
source_code_hash = filebase64sha256("lambda_code/lambda_package.zip")
dynamic "vpc_config" {
for_each = var.vpc_config_vars != {} ? var.vpc_config_vars : {}
content {
subnet_ids = vpc_config.value["subnet_ids"]
security_group_ids = vpc_config.value["security_group_ids"]
}
}
}
I am trying to pass the values s3 name and create_user into local block in main.tf so that both of them have the value in list and then I am passing list_of_bucket in local block in module s3 to create the buckets and looping of user_to_create in module s3_user to create the user if the boolean is set to true. All of these values are passed to variable.tf and then to main.tf
dev.tfvars
wea-nonprod = {
services = {
s3 = [
sthree = {
create_user = true,
}
sfour = {
create_user = true,
}
sfive = {
create_user = true,
}
]
}
}
variable.tf
variable "s3_buckets" {
type = list(map)
}
main.tf
locals {
users_to_create = ""
list_of_buckets = ""
}
module "s3" {
source = "../../s3"
name = join("-", [var.name_prefix, "s3"])
tags = merge(var.tags, {Name = join("-", [var.name_prefix, "s3"])})
buckets = list_of_buckets
sse_algorithm = "AES256"
access_log_bucket_name = var.access_log_bucket_name
}
module "s3_user" {
for_each = local.users_to_create
source = "./service-s3-bucket-user"
name = join("-", [var.name_prefix, each.key])
tags = var.tags
bucket_arn = module.s3.bucket_arns[each.key]
depends_on = [module.s3]
}
Just iterate over your wea-nonprod map:
locals {
users_to_create = [ for name in var.wea-nonprod.services.s3 if name.create_user == true ]
list_of_buckets = [ for bucket in var.wea-nonprod.services.s3 ]
}
And a few changes to your module blocks:
module "s3" {
source = "../../s3"
name = "${var.name_prefix}-s3"
tags = merge(var.tags, { Name = "${var.name_prefix}-s3" })
buckets = local.list_of_buckets
sse_algorithm = "AES256"
access_log_bucket_name = var.access_log_bucket_name
}
module "s3_user" {
count = length(local.users_to_create)
source = "./service-s3-bucket-user"
name = "${var.name_prefix}${local.users_to_create[count.index]}"
tags = var.tags
bucket_arn = module.s3.bucket_arns[local.users_to_create[count.index]]
depends_on = [module.s3]
}