Aws_iam_account_alias must be replaced when adding new accounts - amazon-web-services

I’m trying to add a new AWS account using terraform but when I run terraform plan, it’s saying the existing aliases of existing accounts must be replaced and referencing our master account.
I’m trying to figure out if this would cause issues once deployed, I don’t want any account numbers to change.
Here’s the plan output:
An execution plan has been generated and is shown below.
Resource actions are indicated with the following symbols:
+ create
-/+ destroy and then create replacement
Terraform will perform the following actions:
# aws_iam_account_alias.project-dev must be replaced
-/+ resource "aws_iam_account_alias" "project-dev" {
~ account_alias = "project-master" -> "project-dev" # forces replacement
~ id = "project-master" -> (known after apply)
}
# aws_iam_account_alias.project-platform must be replaced
-/+ resource "aws_iam_account_alias" "project-platform" {
~ account_alias = "project-master" -> "project-platform" # forces replacement
~ id = "project-master" -> (known after apply)
}
# aws_iam_account_alias.project-prod must be replaced
-/+ resource "aws_iam_account_alias" "project-prod" {
~ account_alias = "project-master" -> "project-prod" # forces replacement
~ id = "project-master" -> (known after apply)
}
# aws_iam_account_alias.project-new will be created
+ resource "aws_iam_account_alias" "project-new" {
+ account_alias = "project-new"
+ id = (known after apply)
}
# aws_iam_account_alias.project-stage must be replaced
-/+ resource "aws_iam_account_alias" "project-stage" {
~ account_alias = "project-master" -> "project-stage" # forces replacement
~ id = "project-master" -> (known after apply)
}
# aws_organizations_account.project-new will be created
+ resource "aws_organizations_account" "project-new" {
+ arn = (known after apply)
+ email = "aws-admins+project-new#project.io"
+ id = (known after apply)
+ joined_method = (known after apply)
+ joined_timestamp = (known after apply)
+ name = "PROJECT-NEW"
+ parent_id = (known after apply)
+ status = (known after apply)
+ tags = {
+ "env" = "new"
}
}
Plan: 6 to add, 0 to change, 4 to destroy.
Here’s the terraform code:
# ./providers.tf
terraform {
required_version = "0.12.12"
backend "s3" {
bucket = "{redacted-acc-no}-tfstate"
key = "core/accounts"
region = "eu-west-1"
profile = "PROJECT-MASTER"
}
}
provider aws {
region = "eu-west-1"
profile = "PROJECT-MASTER"
}
# ./accounts.tf
#dev
resource "aws_organizations_account" "project-dev" {
name = "PROJECT-DEV"
email = "aws-admins+project-dev#project.io"
tags = {
env = "dev"
}
}
resource "aws_iam_account_alias" "project-dev" {
account_alias = "project-dev"
}
#stage
resource "aws_organizations_account" "project-stage" {
name = "PROJECT-STAGE"
email = "aws-admins+project-stage#project.io"
tags = {
env = "stage"
}
}
resource "aws_iam_account_alias" "project-stage" {
account_alias = "project-stage"
}
#project-prod
resource "aws_organizations_account" "project-prod" {
name = "PROJECT-PROD"
email = "aws-admins+project-prod#project.io"
tags = {
env = "prod"
}
}
resource "aws_iam_account_alias" "project-prod" {
account_alias = "project-prod"
}
#project-new
resource "aws_organizations_account" "project-new" {
name = "PROJECT-NEW"
email = "aws-admins+project-new#project.io"
tags = {
env = "new"
}
}
resource "aws_iam_account_alias" "project-pepelatz" {
account_alias = "project-new"
}
#project-platform
resource "aws_organizations_account" "project-platform" {
name = "PROJECT-PLATFORM"
email = "aws-admins+project-platform#project.io"
tags = {
env = "shared"
}
}
resource "aws_iam_account_alias" "project-platform" {
account_alias = "project-platform"
}

Related

Terraform kubernetes service account and role binding modules not working

I am trying to create a kubernetes service account in a created namespace, which will have a secret and a cluster role binding, however, even though the terraform plan and apply stage shows that is is being created, it isn't, please see below module code and screenshots:
resource "kubernetes_service_account" "serviceaccount" {
metadata {
name = var.name
namespace = "kube-system"
}
}
resource "kubernetes_cluster_role_binding" "serviceaccount" {
metadata {
name = var.name
}
subject {
kind = "User"
name = "system:serviceaccount:kube-system:${var.name}"
}
role_ref {
kind = "ClusterRole"
name = "cluster-admin"
api_group = "rbac.authorization.k8s.io"
}
}
data "kubernetes_service_account" "serviceaccount" {
metadata {
name = var.name
namespace = "kube-system"
}
depends_on = [
resource.kubernetes_service_account.serviceaccount
]
}
data "kubernetes_secret" "serviceaccount" {
metadata {
name = data.kubernetes_service_account.serviceaccount.default_secret_name
namespace = "kube-system"
}
binary_data = {
"token": ""
}
depends_on = [
resource.kubernetes_service_account.serviceaccount
]
}
And the output from terraform run in devops:
# module.dd_service_account.data.kubernetes_secret.serviceaccount will be read during apply
# (config refers to values not yet known)
<= data "kubernetes_secret" "serviceaccount" {
+ binary_data = (sensitive value)
+ data = (sensitive value)
+ id = (known after apply)
+ immutable = (known after apply)
+ type = (known after apply)
+ metadata {
+ generation = (known after apply)
+ name = (known after apply)
+ namespace = "kube-system"
+ resource_version = (known after apply)
+ uid = (known after apply)
}
}
# module.dd_service_account.data.kubernetes_service_account.serviceaccount will be read during apply
# (depends on a resource or a module with changes pending)
<= data "kubernetes_service_account" "serviceaccount" {
+ automount_service_account_token = (known after apply)
+ default_secret_name = (known after apply)
+ id = (known after apply)
+ image_pull_secret = (known after apply)
+ secret = (known after apply)
+ metadata {
+ generation = (known after apply)
+ name = "deployer-new"
+ namespace = "kube-system"
+ resource_version = (known after apply)
+ uid = (known after apply)
}
}
# module.dd_service_account.kubernetes_cluster_role_binding.serviceaccount will be created
+ resource "kubernetes_cluster_role_binding" "serviceaccount" {
+ id = (known after apply)
+ metadata {
+ generation = (known after apply)
+ name = "deployer-new"
+ resource_version = (known after apply)
+ uid = (known after apply)
}
+ role_ref {
+ api_group = "rbac.authorization.k8s.io"
+ kind = "ClusterRole"
+ name = "cluster-admin"
}
+ subject {
+ api_group = (known after apply)
+ kind = "User"
+ name = "system:serviceaccount:kube-system:deployer-new"
+ namespace = "default"
}
}
# module.dd_service_account.kubernetes_service_account.serviceaccount will be created
+ resource "kubernetes_service_account" "serviceaccount" {
+ automount_service_account_token = true
+ default_secret_name = (known after apply)
+ id = (known after apply)
+ metadata {
+ generation = (known after apply)
+ name = "deployer-new"
+ namespace = "kube-system"
+ resource_version = (known after apply)
+ uid = (known after apply)
}
}
When kubectl on the cluster, the namespace I created is there but no service accounts are there.
Any ideas?
Thanks.

Terraform try to replace the previous DynamoDB table instead of creating new one

I am new to terraform. While creating terraform AWS DynamoDB module, It will try to replace the existing table instead of creating a new table every-time. But if we use new terraform state file it will create another dynamodb table without replacing.
Terraform Version: 0.15
locals {
table_name_from_env = var.dynamodb_table
table_name = join("-", [local.table_name_from_env, lower(var.Component)])
kinesis_name = join("-", [local.table_name, "kinesis"])
}
resource "aws_dynamodb_table" "non_autoscaled" {
count = !var.autoscaling_enabled ? 1 : 0
name = "${local.table_name}"
read_capacity = "${var.read_capacity}"
write_capacity = "${var.write_capacity}"
billing_mode = "${var.billing_mode}"
hash_key = "${var.hash_key}"
range_key = var.range_key
dynamic "attribute" {
for_each = var.attributes
content {
name = attribute.value.name
type = attribute.value.type
}
}
ttl {
enabled = var.ttl_enabled
attribute_name = var.ttl_attribute_name
}
# tags = tomap({"organization" = "${var.organization}", "businessunit" = "${var.businessunit}"})
tags = tomap({"Component" = "${var.Component}"})
# tags = local.common_tags
}
resource "aws_dynamodb_table" "autoscaled" {
count = var.autoscaling_enabled ? 1 : 0
name = "${local.table_name}"
read_capacity = "${var.read_capacity}"
write_capacity = "${var.write_capacity}"
billing_mode = "${var.billing_mode}"
hash_key = "${var.hash_key}"
range_key = var.range_key
dynamic "attribute" {
for_each = var.attributes
content {
name = attribute.value.name
type = attribute.value.type
}
}
ttl {
enabled = var.ttl_enabled
attribute_name = var.ttl_attribute_name
}
}
resource "aws_kinesis_stream" "dynamodb_table_kinesis" {
count = var.kinesis_enabled ? 1 : 0
name = "${local.kinesis_name}"
shard_count = "${var.shard_count}"
stream_mode_details {
stream_mode = "${var.kinesis_stream_mode}"
}
}
resource "aws_dynamodb_kinesis_streaming_destination" "dynamodb_table_kinesis_dest_non_autoscaled"{
count = var.kinesis_enabled && !var.autoscaling_enabled ? 1 : 0
stream_arn = aws_kinesis_stream.dynamodb_table_kinesis[0].arn
table_name = aws_dynamodb_table.non_autoscaled[0].name
}
resource "aws_dynamodb_kinesis_streaming_destination" "dynamodb_table_kinesis_dest_autoscaled"{
count = var.kinesis_enabled && var.autoscaling_enabled ? 1 : 0
stream_arn = aws_kinesis_stream.dynamodb_table_kinesis[0].arn
table_name = aws_dynamodb_table.autoscaled[0].name
}
Can anybody suggest what is missing in my approach ?
Terraform Plan output:
+ terraform plan
module.aws_managed_dynamodb_table.aws_kinesis_stream.dynamodb_table_kinesis[0]: Refreshing state... [id=arn:aws:kinesis:stream/dynamodb-testing12345-coms-kinesis]
module.aws_managed_dynamodb_table.aws_dynamodb_table.non_autoscaled[0]: Refreshing state... [id=dynamodb-testing12345-coms]
module.aws_managed_dynamodb_table.aws_dynamodb_kinesis_streaming_destination.dynamodb_table_kinesis_dest_non_autoscaled[0]: Refreshing state... [id=dynamodb-testing12345-coms,arn:aws:kinesis:ap-south-1:stream/dynamodb-testing12345-coms-kinesis]
Terraform used the selected providers to generate the following execution
plan. Resource actions are indicated with the following symbols:
-/+ destroy and then create replacement
Terraform will perform the following actions:
# module.aws_managed_dynamodb_table.aws_dynamodb_kinesis_streaming_destination.dynamodb_table_kinesis_dest_non_autoscaled[0] must be replaced
-/+ resource "aws_dynamodb_kinesis_streaming_destination" "dynamodb_table_kinesis_dest_non_autoscaled" {
~ id = "dynamodb-testing12345-coms,arn:aws:kinesis:ap-south-1:stream/dynamodb-testing12345-coms-kinesis" -> (known after apply)
~ stream_arn = "arn:aws:kinesis:ap-south-1:stream/dynamodb-testing12345-coms-kinesis" -> (known after apply) # forces replacement
~ table_name = "dynamodb-testing12345-coms" -> "dynamodb-testing123456-coms" # forces replacement
}
# module.aws_managed_dynamodb_table.aws_dynamodb_table.non_autoscaled[0] must be replaced
-/+ resource "aws_dynamodb_table" "non_autoscaled" {
~ arn = "arn:aws:dynamodb:ap-south-1:table/dynamodb-testing12345-coms" -> (known after apply)
~ id = "dynamodb-testing12345-coms" -> (known after apply)
~ name = "dynamodb-testing12345-coms" -> "dynamodb-testing123456-coms" # forces replacement
+ stream_arn = (known after apply)
- stream_enabled = false -> null
+ stream_label = (known after apply)
+ stream_view_type = (known after apply)
tags = {
"Component" = "XXX"
}
# (6 unchanged attributes hidden)
~ point_in_time_recovery {
~ enabled = false -> (known after apply)
}
+ server_side_encryption {
+ enabled = (known after apply)
+ kms_key_arn = (known after apply)
}
# (3 unchanged blocks hidden)
}
# module.aws_managed_dynamodb_table.aws_kinesis_stream.dynamodb_table_kinesis[0] must be replaced
-/+ resource "aws_kinesis_stream" "dynamodb_table_kinesis" {
~ arn = (known after apply)
~ id = (known after apply)
~ name = "dynamodb-testing12345-coms-kinesis" -> "dynamodb-testing123456-coms-kinesis" # forces replacement
- shard_level_metrics = [] -> null
- tags = {} -> null
~ tags_all = {} -> (known after apply)
# (4 unchanged attributes hidden)
# (1 unchanged block hidden)
}
Plan: 3 to add, 0 to change, 3 to destroy.

How can we avoid existing EBS volumes from being deleted?

I'm using Terraform 1.1.3 with aws provider 3.75.2 to create TF code for the existing 2-node infra. The code snippet is like below:
module:
resource "aws_ebs_volume" "backend-logs" {
count = var.create_ebs_log_volumes ? var.backend_nodes_qty : 0
availability_zone = element(data.aws_subnet.backend.*.availability_zone, count.index)
size = var.volume_log_size
type = var.ebs_volume_type
encrypted = var.ebs_enable_encryption
kms_key_id = var.ebs_encryption_key_id
}
resource "aws_volume_attachment" "backend-logs" {
count = var.backend_nodes_qty
device_name = "/dev/sdf"
volume_id = element(module.xyz.backend_ebs_volume_log_ids, count.index)
instance_id = element(module.xyz.backend_instance_ids, count.index)
}
and I've imported the instance/volume/attachment resources successfully.
terraform import module.xyz.aws_ebs_volume.backend-logs[0] vol-0123456789abcedf0
terraform import module.xyz.aws_ebs_volume.backend-logs[1] vol-0123456789abcedf1
terraform import aws_volume_attachment.backend-logs[0] /dev/sdf:vol-0123456789abcedf0:i-0123456789abcedf0
terraform import aws_volume_attachment.backend-logs[1] /dev/sdf:vol-0123456789abcedf1:i-0123456789abcedf1
When I run terraform plan, the plan tells me that the volumes are going to be destroyed. How can we avoid that? thanks
# aws_volume_attachment.backend-logs[0] must be replaced
-/+ resource "aws_volume_attachment" "backend-logs" {
~ id = "vai-1993905001" -> (known after apply)
~ volume_id = "vol-0123456789abcedf0" -> (known after apply) # forces replacement
# (2 unchanged attributes hidden)
}
# aws_volume_attachment.backend-logs[1] must be replaced
-/+ resource "aws_volume_attachment" "backend-logs" {
~ id = "vai-1955292002" -> (known after apply)
~ volume_id = "vol-0123456789abcedf1" -> (known after apply) # forces replacement
# (2 unchanged attributes hidden)
}
# module.xyz.aws_ebs_volume.backend-logs[0] must be replaced
-/+ resource "aws_ebs_volume" "backend-logs" {
~ arn = "arn:aws:ec2:us-west-2:1234567890:volume/vol-0123456789abcedf0" -> (known after apply)
~ availability_zone = "us-west-2a" -> (known after apply) # forces replacement
~ id = "vol-0123456789abcedf0" -> (known after apply)
~ iops = 150 -> (known after apply)
+ kms_key_id = (known after apply)
- multi_attach_enabled = false -> null
+ snapshot_id = (known after apply)
~ throughput = 0 -> (known after apply)
# (3 unchanged attributes hidden)
}
# module.xyz.aws_ebs_volume.backend-logs[1] must be replaced
-/+ resource "aws_ebs_volume" "backend-logs" {
~ arn = "arn:aws:ec2:us-west-2:1234567890:volume/vol-0123456789abcedf1" -> (known after apply)
~ availability_zone = "us-west-2b" -> (known after apply) # forces replacement
~ id = "vol-0123456789abcedf1" -> (known after apply)
~ iops = 150 -> (known after apply)
+ kms_key_id = (known after apply)
- multi_attach_enabled = false -> null
+ snapshot_id = (known after apply)
~ throughput = 0 -> (known after apply)
# (3 unchanged attributes hidden)
}
It seems that the issue relates to AZ stuff. You can try the workaround by adding these lines in aws_instance block.
lifecycle {
ignore_changes = [ availability_zone ]
}

Resource plan for creation although count evaluates to false

I have the following variables
variable "policies" {
type = list(string)
description = "List of policy document to attach to the IAM Role."
default = []
}
variable "policy_name" {
type = string
description = "Name of the policy attached to the IAM Role."
default = null
}
variable "policy_description" {
type = string
description = "Description of the policy attached to the IAM Role."
default = ""
}
Which are used by the following Terraform resources:
resource "aws_iam_role" "this" {
name = var.role_name
assume_role_policy = var.assume_role_policy
}
data "aws_iam_policy_document" "this" {
count = var.policies != [] ? 1 : 0
source_policy_documents = var.policies
}
resource "aws_iam_policy" "this" {
count = var.policies != [] ? 1 : 0
name = var.policy_name
description = var.policy_description
policy = data.aws_iam_policy_document.this[count.index].json
}
resource "aws_iam_role_policy_attachment" "this" {
count = var.policies != [] ? 1 : 0
policy_arn = aws_iam_policy.this[count.index].arn
role = aws_iam_role.this.name
}
Now, my understanding is that aws_iam_policy_document, aws_iam_policy and aws_iam_role_policy_attachment are to be created only when var.policies is not empty.
However, these resources are still plan for creation when calling them like
module "iam_role_batch" {
source = "./resources/iam/role"
role_name = local.iam_role_batch_service_name
assume_role_policy = data.aws_iam_policy_document.batch_service.json
}
# module.iam_role_batch.aws_iam_policy.this[0] will be created
+ resource "aws_iam_policy" "this" {
+ arn = (known after apply)
+ id = (known after apply)
+ name = (known after apply)
+ path = "/"
+ policy = jsonencode(
{
+ Statement = null
+ Version = "2012-10-17"
}
)
+ policy_id = (known after apply)
+ tags_all = (known after apply)
}
# module.iam_role_batch.aws_iam_role_policy_attachment.this[0] will be created
+ resource "aws_iam_role_policy_attachment" "this" {
+ id = (known after apply)
+ policy_arn = (known after apply)
+ role = "xxxxxxx"
}
Plan: 2 to add, 0 to change, 0 to destroy.
Why? AFAIK, policies is by default set to [], so the resources should not be planned for creation.
What do I miss?
is by default set to []
Actually it is set to data type of list(string). So your condition var.policies != [] is always true, and that is why the resource is always created. [] is not the same as list(string).
Usually you would do the following instead:
count = length(var.policies) > 0 ? 1 : 0

Terraform ignoring VPC peering connection

This is a surprise for me as Terraform is ignoring a resource creation. I'm using modules in terraform and every other resource is getting created except vpc_peering_connection. It is in fact ignoring that block. Hence, for debugging I created an output for the ID of peering connection and then I get below error:
$ terraform plan
Error: Reference to undeclared resource
on network/output.tf line 6, in output "peering-id":
6: value = "${aws_vpc_peering_connection.default.id}"
A managed resource "aws_vpc_peering_connection" "default" has not been
declared in network.
This is a tree snap of my code structure.
.
├── module-network.tf
├── network
│   ├── data.tf
│   ├── igw.tf
│   ├── output.tf
│   ├── peering-conn
│   ├── rt-public.tf
│   ├── security_group.tf
│   ├── subnet.tf
│   ├── var.tf
│   └── vpc.tf
├── output.tf
├── provider-aws.tf
├── terraform.tfstate
├── terraform.tfstate.backup
├── var-static.tf
└── versions.tf
1 directory, 16 files
Terraform Code:
$ cat module-network.tf
module "network" {
source = "./network"
AWS_REGION = var.REGION
ENVIRONMENT = var.ENVIRONMENT
PRODUCT = var.PRODUCT
VPC_CIDR = var.VPC_CIDR
SUBNET_COUNT = var.SUBNET_COUNT
VPC_PEER_ID = var.VPC_PEER_ID
}
$ cat output.tf
output "vpc-id" {
value = module.network.vpc-id
}
output "peering-id" {
value =module.network.peering-id
}
variable "PRODUCT" {
default = "jjmdb"
}
variable "ENVIRONMENT" {
default = "prod"
}
variable "REGION" {
default = "us-east-1"
}
variable "VPC_CIDR" {
default = "10.100.0.0/16"
}
variable "SUBNET_COUNT" {
default = "2"
}
variable "VPC_PEER_ID" {
default = "vpc-0724db2d24120ca8c"
}
$ cat data.tf
data "aws_vpc" "peer_vpc" {
id = "${var.VPC_PEER_ID}"
}
data "aws_subnet_ids" "private_nodes" {
vpc_id = "${data.aws_vpc.peer_vpc.id}"
tags = {
Tier = "node-private"
}
}
$ cat igw.tf
#Create internet gateway
resource "aws_internet_gateway" "igw" {
vpc_id = "${aws_vpc.vpc.id}"
tags = {
Name = "${format("%s-igw",var.PRODUCT)}"
Environment = "${var.ENVIRONMENT}"
}
}
$ cat output.tf
output "vpc-id" {
value = "${aws_vpc.vpc.id}"
}
output "peering-id" {
value = "${aws_vpc_peering_connection.default.id}"
}
$ cat peering-conn
#Create VPC Peering connection
resource "aws_vpc_peering_connection" "default" {
peer_vpc_id = "${data.aws_vpc.peer_vpc.id}"
vpc_id = "${aws_vpc.vpc.id}"
auto_accept = true
tags = {
Name = "${format("%s-peering",var.PRODUCT)}"
Environment = "${var.ENVIRONMENT}"
}
}
$ cat rt-public.tf
#Create a Public Route table
resource "aws_route_table" "rt-public" {
vpc_id = "${aws_vpc.vpc.id}"
tags = {
Name = "${format("%s-rt-public",var.PRODUCT)}"
Environment = "${var.ENVIRONMENT}"
}
}
resource "aws_route" "rt-public-route" {
route_table_id = "${aws_route_table.rt-public.id}"
destination_cidr_block = "0.0.0.0/0"
gateway_id = "${aws_internet_gateway.igw.id}"
}
#Associate public Route Table to public Subnet
resource "aws_route_table_association" "rt-sub-public" {
count = "${var.SUBNET_COUNT}"
subnet_id = "${aws_subnet.sub_public.*.id[count.index]}"
route_table_id = "${aws_route_table.rt-public.id}"
}
$ cat security_group.tf
$ cat subnet.tf
resource "aws_subnet" "sub_public" {
count = "${var.SUBNET_COUNT}"
vpc_id = "${aws_vpc.vpc.id}"
cidr_block = "${cidrsubnet(var.VPC_CIDR, 2, count.index + 2)}"
tags = {
Name = "${format("%s-sub-public-%d",var.PRODUCT,count.index)}"
Environment = "${var.ENVIRONMENT}"
}
}
$ cat var.tf
variable "AWS_REGION" {}
variable "ENVIRONMENT" {}
variable "PRODUCT" {}
variable "VPC_CIDR" {}
variable "SUBNET_COUNT" {}
variable "VPC_PEER_ID" {}
$ cat vpc.tf
# Create VPC
resource "aws_vpc" "vpc"{
cidr_block = "${var.VPC_CIDR}"
instance_tenancy = "default"
enable_dns_support = "true"
enable_dns_hostnames = "true"
enable_classiclink = "false"
tags = {
Name = "${format("%s-vpc",var.PRODUCT)}"
Environment = "${var.ENVIRONMENT}"
}
}
If I remove the output blocks, it gets ignored. Terraform plan as below:
**Terraform plan**
$ terraform plan
Refreshing Terraform state in-memory prior to plan...
The refreshed state will be used to calculate this plan, but will not be
persisted to local or remote state storage.
module.network.data.aws_vpc.peer_vpc: Refreshing state...
module.network.data.aws_subnet_ids.private_nodes: Refreshing state...
------------------------------------------------------------------------
An execution plan has been generated and is shown below.
Resource actions are indicated with the following symbols:
+ create
Terraform will perform the following actions:
# module.network.aws_internet_gateway.igw will be created
+ resource "aws_internet_gateway" "igw" {
+ id = (known after apply)
+ owner_id = (known after apply)
+ tags = {
+ "Environment" = "prod"
+ "Name" = "jjmdb-igw"
}
+ vpc_id = (known after apply)
}
# module.network.aws_route.rt-public-route will be created
+ resource "aws_route" "rt-public-route" {
+ destination_cidr_block = "0.0.0.0/0"
+ destination_prefix_list_id = (known after apply)
+ egress_only_gateway_id = (known after apply)
+ gateway_id = (known after apply)
+ id = (known after apply)
+ instance_id = (known after apply)
+ instance_owner_id = (known after apply)
+ nat_gateway_id = (known after apply)
+ network_interface_id = (known after apply)
+ origin = (known after apply)
+ route_table_id = (known after apply)
+ state = (known after apply)
}
# module.network.aws_route_table.rt-public will be created
+ resource "aws_route_table" "rt-public" {
+ id = (known after apply)
+ owner_id = (known after apply)
+ propagating_vgws = (known after apply)
+ route = (known after apply)
+ tags = {
+ "Environment" = "prod"
+ "Name" = "jjmdb-rt-public"
}
+ vpc_id = (known after apply)
}
# module.network.aws_route_table_association.rt-sub-public[0] will be created
+ resource "aws_route_table_association" "rt-sub-public" {
+ id = (known after apply)
+ route_table_id = (known after apply)
+ subnet_id = (known after apply)
}
# module.network.aws_route_table_association.rt-sub-public[1] will be created
+ resource "aws_route_table_association" "rt-sub-public" {
+ id = (known after apply)
+ route_table_id = (known after apply)
+ subnet_id = (known after apply)
}
# module.network.aws_subnet.sub_public[0] will be created
+ resource "aws_subnet" "sub_public" {
+ arn = (known after apply)
+ assign_ipv6_address_on_creation = false
+ availability_zone = (known after apply)
+ availability_zone_id = (known after apply)
+ cidr_block = "10.100.128.0/18"
+ id = (known after apply)
+ ipv6_cidr_block = (known after apply)
+ ipv6_cidr_block_association_id = (known after apply)
+ map_public_ip_on_launch = false
+ owner_id = (known after apply)
+ tags = {
+ "Environment" = "prod"
+ "Name" = "jjmdb-sub-public-0"
}
+ vpc_id = (known after apply)
}
# module.network.aws_subnet.sub_public[1] will be created
+ resource "aws_subnet" "sub_public" {
+ arn = (known after apply)
+ assign_ipv6_address_on_creation = false
+ availability_zone = (known after apply)
+ availability_zone_id = (known after apply)
+ cidr_block = "10.100.192.0/18"
+ id = (known after apply)
+ ipv6_cidr_block = (known after apply)
+ ipv6_cidr_block_association_id = (known after apply)
+ map_public_ip_on_launch = false
+ owner_id = (known after apply)
+ tags = {
+ "Environment" = "prod"
+ "Name" = "jjmdb-sub-public-1"
}
+ vpc_id = (known after apply)
}
# module.network.aws_vpc.vpc will be created
+ resource "aws_vpc" "vpc" {
+ arn = (known after apply)
+ assign_generated_ipv6_cidr_block = false
+ cidr_block = "10.100.0.0/16"
+ default_network_acl_id = (known after apply)
+ default_route_table_id = (known after apply)
+ default_security_group_id = (known after apply)
+ dhcp_options_id = (known after apply)
+ enable_classiclink = false
+ enable_classiclink_dns_support = (known after apply)
+ enable_dns_hostnames = true
+ enable_dns_support = true
+ id = (known after apply)
+ instance_tenancy = "default"
+ ipv6_association_id = (known after apply)
+ ipv6_cidr_block = (known after apply)
+ main_route_table_id = (known after apply)
+ owner_id = (known after apply)
+ tags = {
+ "Environment" = "prod"
+ "Name" = "jjmdb-vpc"
}
}
Plan: 8 to add, 0 to change, 0 to destroy.
------------------------------------------------------------------------
Note: You didn't specify an "-out" parameter to save this plan, so Terraform
can't guarantee that exactly these actions will be performed if
"terraform apply" is subsequently run.
Note: I was getting the same issue with Terraform version 0.11.1. So, I upgraded to 0.12.3. But no luck.
Please advise.