Why is terraform forcing replacement of aurora global database? - amazon-web-services

Terraform CLI and Terraform AWS Provider Version
Installed from https://releases.hashicorp.com/terraform/0.13.5/terraform_0.13.5_linux_amd64.zip
hashicorp/aws v3.15.0
Affected Resource(s)
aws_rds_cluster
aws_rds_cluster_instance
Terraform Configuration Files
# inside ./modules/rds/main.tf
terraform {
required_providers {
aws = {
source = "hashicorp/aws"
}
}
required_version = "~> 0.13"
}
provider "aws" {
alias = "primary"
}
provider "aws" {
alias = "dr"
}
locals {
region_tags = ["primary", "dr"]
db_name = "${var.project_name}-${var.stage}-db"
db_cluster_0 = "${local.db_name}-cluster-${local.region_tags[0]}"
db_cluster_1 = "${local.db_name}-cluster-${local.region_tags[1]}"
db_instance_name = "${local.db_name}-instance"
}
resource "aws_rds_global_cluster" "global_db" {
global_cluster_identifier = "${var.project_name}-${var.stage}"
database_name = "${var.project_name}${var.stage}db"
engine = "aurora-mysql"
engine_version = "${var.mysql_version}.mysql_aurora.${var.aurora_version}"
// force_destroy = true
}
resource "aws_rds_cluster" "primary_cluster" {
depends_on = [aws_rds_global_cluster.global_db]
provider = aws.primary
cluster_identifier = "${local.db_name}-cluster-${local.region_tags[0]}"
# the database name does not allow dashes:
database_name = "${var.project_name}${var.stage}db"
# The engine and engine_version must be repeated in aws_rds_global_cluster,
# aws_rds_cluster, and aws_rds_cluster_instance to
# avoid "Value for engine should match" error
engine = "aurora-mysql"
engine_version = "${var.mysql_version}.mysql_aurora.${var.aurora_version}"
engine_mode = "global"
global_cluster_identifier = aws_rds_global_cluster.global_db.id
# backtrack and multi-master not supported by Aurora Global.
master_username = var.username
master_password = var.password
backup_retention_period = 5
preferred_backup_window = "07:00-09:00"
db_subnet_group_name = aws_db_subnet_group.primary.id
# We must have these values, because destroying or rolling back requires them
skip_final_snapshot = true
final_snapshot_identifier = "ci-aurora-cluster-backup"
tags = {
Name = local.db_cluster_0
Stage = var.stage
CreatedBy = var.created_by
}
}
resource "aws_rds_cluster_instance" "primary" {
depends_on = [aws_rds_global_cluster.global_db]
provider = aws.primary
cluster_identifier = aws_rds_cluster.primary_cluster.id
engine = "aurora-mysql"
engine_version = "${var.mysql_version}.mysql_aurora.${var.aurora_version}"
instance_class = "db.${var.instance_class}.${var.instance_size}"
db_subnet_group_name = aws_db_subnet_group.primary.id
tags = {
Name = local.db_instance_name
Stage = var.stage
CreatedBy = var.created_by
}
}
resource "aws_rds_cluster" "dr_cluster" {
depends_on = [aws_rds_cluster_instance.primary, aws_rds_global_cluster.global_db]
provider = aws.dr
cluster_identifier = "${local.db_name}-cluster-${local.region_tags[1]}"
# db name now allowed to specified on secondary regions
# The engine and engine_version must be repeated in aws_rds_global_cluster,
# aws_rds_cluster, and aws_rds_cluster_instance to
# avoid "Value for engine should match" error
engine = "aurora-mysql"
engine_version = "${var.mysql_version}.mysql_aurora.${var.aurora_version}"
engine_mode = "global"
global_cluster_identifier = aws_rds_global_cluster.global_db.id
# backtrack and multi-master not supported by Aurora Global.
# cannot specify username/password in cross-region replication cluster:
backup_retention_period = 5
preferred_backup_window = "07:00-09:00"
db_subnet_group_name = aws_db_subnet_group.dr.id
# We must have these values, because destroying or rolling back requires them
skip_final_snapshot = true
final_snapshot_identifier = "ci-aurora-cluster-backup"
tags = {
Name = local.db_cluster_1
Stage = var.stage
CreatedBy = var.created_by
}
}
resource "aws_rds_cluster_instance" "dr_instance" {
depends_on = [aws_rds_cluster_instance.primary, aws_rds_global_cluster.global_db]
provider = aws.dr
cluster_identifier = aws_rds_cluster.dr_cluster.id
engine = "aurora-mysql"
engine_version = "${var.mysql_version}.mysql_aurora.${var.aurora_version}"
instance_class = "db.${var.instance_class}.${var.instance_size}"
db_subnet_group_name = aws_db_subnet_group.dr.id
tags = {
Name = local.db_instance_name
Stage = var.stage
CreatedBy = var.created_by
}
}
resource "aws_db_subnet_group" "primary" {
name = "${local.db_name}-subnetgroup"
subnet_ids = var.subnet_ids
provider = aws.primary
tags = {
Name = "primary_subnet_group"
Stage = var.stage
CreatedBy = var.created_by
}
}
resource "aws_db_subnet_group" "dr" {
provider = aws.dr
name = "${local.db_name}-subnetgroup"
subnet_ids = var.dr_subnet_ids
tags = {
Name = "dr_subnet_group"
Stage = var.stage
CreatedBy = var.created_by
}
}
resource "aws_rds_cluster_parameter_group" "default" {
name = "rds-cluster-pg"
family = "aurora-mysql${var.mysql_version}"
description = "RDS default cluster parameter group"
parameter {
name = "character_set_server"
value = "utf8"
}
parameter {
name = "character_set_client"
value = "utf8"
}
parameter {
name = "aurora_parallel_query"
value = "ON"
apply_method = "pending-reboot"
}
}
Inside ./modules/sns/main.tf, this is the resource I'm adding when calling terraform apply from within the ./modules directory:
resource "aws_sns_topic" "foo_topic" {
name = "foo-${var.stage}-${var.topic_name}"
tags = {
Name = "foo-${var.stage}-${var.topic_name}"
Stage = var.stage
CreatedBy = var.created_by
CreatedOn = timestamp()
}
}
./modules/main.tf:
terraform {
backend "s3" {
bucket = "terraform-remote-state-s3-bucket-unique-name"
key = "terraform.tfstate"
region = "us-east-2"
dynamodb_table = "TerraformLockTable"
}
}
provider "aws" {
alias = "primary"
region = var.region
}
provider "aws" {
alias = "dr"
region = var.dr_region
}
module "vpc" {
stage = var.stage
source = "./vpc"
providers = {
aws = aws.primary
}
}
module "dr_vpc" {
stage = var.stage
source = "./vpc"
providers = {
aws = aws.dr
}
}
module "vpc_security_group" {
source = "./vpc_security_group"
vpc_id = module.vpc.vpc_id
providers = {
aws = aws.primary
}
}
module "rds" {
source = "./rds"
stage = var.stage
created_by = var.created_by
vpc_id = module.vpc.vpc_id
subnet_ids = [module.vpc.subnet_a_id, module.vpc.subnet_b_id, module.vpc.subnet_c_id]
dr_subnet_ids = [module.dr_vpc.subnet_a_id, module.dr_vpc.subnet_b_id, module.dr_vpc.subnet_c_id]
region = var.region
username = var.rds_username
password = var.rds_password
providers = {
aws.primary = aws.primary
aws.dr = aws.dr
}
}
module "sns_start" {
stage = var.stage
source = "./sns"
topic_name = "start"
created_by = var.created_by
}
./modules/variables.tf:
variable "region" {
default = "us-east-2"
}
variable "dr_region" {
default = "us-west-2"
}
variable "service" {
type = string
default = "foo-back"
description = "service to match what serverless framework deploys"
}
variable "stage" {
type = string
default = "sandbox"
description = "The stage to deploy: sandbox, dev, qa, uat, or prod"
validation {
condition = can(regex("sandbox|dev|qa|uat|prod", var.stage))
error_message = "The stage value must be a valid stage: sandbox, dev, qa, uat, or prod."
}
}
variable "created_by" {
description = "Company or vendor name followed by the username part of the email address"
}
variable "rds_username" {
description = "Username for rds"
}
variable "rds_password" {
description = "Password for rds"
}
./modules/sns/main.tf:
resource "aws_sns_topic" "foo_topic" {
name = "foo-${var.stage}-${var.topic_name}"
tags = {
Name = "foo-${var.stage}-${var.topic_name}"
Stage = var.stage
CreatedBy = var.created_by
CreatedOn = timestamp()
}
}
./modules/sns/output.tf:
output "sns_topic_arn" {
value = aws_sns_topic.foo_topic.arn
}
Debug Output
Both outputs have modified keys, names, account IDs, etc:
The plan output from running terraform apply:
https://gist.github.com/ystoneman/95df711ee0a11d44e035b9f8f39b75f3
The state before applying: https://gist.github.com/ystoneman/5c842769c28e1ae5969f9aaff1556b37
Expected Behavior
The entire ./modules/main.tf had already been created, and the only thing that was added was the SNS module, so only the SNS module should be created.
Actual Behavior
But instead, the RDS resources are affected too, and terraform "claims" that engine_mode has changed from provisioned to global, even though it already was global according to the console:
The plan output also says that cluster_identifier is only known after apply and therefore forces replacement, however, I think the cluster_identifier is necessary to let the aws_rds_cluster know it belongs to the aws_rds_global_cluster, and for the aws_rds_cluster_instance to know it belongs to the aws_rds_cluster, respectively.
Steps to Reproduce
comment out the module "sns_start"
cd ./modules
terraform apply (after this step is done is where the state file I included is at)
uncomment out the module "sns_start"
terraform apply (at this point is where I provide the debug output)
Important Factoids
This problem happens whether I run it from my Mac or within AWS CodeBuild.
References
Seems like AWS Terraform tried to destory and rebuild RDS cluster references this too, but it's not specific to a Global Cluster, where you do need identifiers so that instances and clusters know to what they belong to.

It seems like you are using an outdated version of the aws provider and are specifying the engine_mode incorrectly. There was a bug ticket relating to this: https://github.com/hashicorp/terraform-provider-aws/issues/16088
It is fixed in version 3.15.0 which you can use via
terraform {
required_providers {
aws = {
source = "hashicorp/aws"
version = "~> 3.15.0"
}
}
required_version = "~> 0.13"
}
Additionally you should drop the engine_mode property from your terraform specification completely.

Related

InvalidDBClusterStateFault: Source cluster is in a state which is not valid for physical replication when adding a new rds cluster in global cluster

I am using Terraform to setup RDS Global Cluster in 2 regions - us-east-1 and us-east-2. Engine is "aurora-postgres" and engine_version is "13.4".
I already had an existing cluster in us-east-1 made without Terraform, which I imported into terraform, and now want to create a global cluster with another cluster in us-east-2. So I am following this part of the aws-provider docs
Here is what my current hcl looks like:
# provider.tf
terraform {
required_providers {
aws = {
source = "hashicorp/aws"
version = "~> 4.0"
}
}
}
provider "aws" {
alias = "useast1"
region = "us-east-1"
assume_role {
role_arn = var.TF_IAM_ROLE_ARN
}
}
provider "aws" {
alias = "useast2"
region = "us-east-2"
assume_role {
role_arn = var.TF_IAM_ROLE_ARN
}
}
# rds.tf
locals {
rds-monitoring-role_arn = "iam role for rds monitoring"
kms_key_id = {
"us-east-1" : "aws managed rds key arn in us-east-1"
"us-east-2" : "aws managed rds key arn in us-east-2"
}
}
resource "aws_rds_global_cluster" "global-lego-production" {
global_cluster_identifier = "global-lego-production"
force_destroy = true
source_db_cluster_identifier = aws_rds_cluster.lego-production-us-east-1.arn
lifecycle {
ignore_changes = [
engine_version,
database_name
]
}
}
resource "aws_rds_cluster" "lego-production-us-east-1" {
provider = aws.useast1
engine = "aurora-postgresql"
engine_version = "13.4"
cluster_identifier = "lego-production"
master_username = "nektar"
master_password = var.RDS_MASTER_PASSWORD
database_name = "lego"
db_subnet_group_name = module.us-east-1.rds-lego-prod-subnet-group-id
db_cluster_parameter_group_name = module.us-east-1.rds-lego-production-parameter-group-id
backup_retention_period = 7
storage_encrypted = true
kms_key_id = local.kms_key_id.us-east-1
copy_tags_to_snapshot = true
deletion_protection = true
skip_final_snapshot = true
iam_database_authentication_enabled = true
enabled_cloudwatch_logs_exports = ["postgresql"]
vpc_security_group_ids = [
module.us-east-1.rds-db-webserver-security-group-id,
module.us-east-1.rds-db-quicksight-security-group-id
]
tags = {
vpc = "nektar"
}
lifecycle {
ignore_changes = [
engine_version,
global_cluster_identifier
]
}
}
resource "aws_rds_cluster_instance" "lego-production-us-east-1-instance-1" {
provider = aws.useast1
engine = aws_rds_cluster.lego-production-us-east-1.engine
engine_version = aws_rds_cluster.lego-production-us-east-1.engine_version
identifier = "lego-production-instance-1"
cluster_identifier = aws_rds_cluster.lego-production-us-east-1.id
instance_class = "db.r6g.4xlarge"
db_subnet_group_name = module.us-east-1.rds-lego-prod-subnet-group-id
monitoring_role_arn = local.rds-monitoring-role_arn
performance_insights_enabled = true
performance_insights_kms_key_id = local.kms_key_id.us-east-1
performance_insights_retention_period = 7
monitoring_interval = 60
tags = {
"devops-guru-default" = "lego-production"
}
lifecycle {
ignore_changes = [
instance_class
]
}
}
resource "aws_rds_cluster_instance" "lego-production-us-east-1-instance-2" {
provider = aws.useast1
engine = aws_rds_cluster.lego-production-us-east-1.engine
engine_version = aws_rds_cluster.lego-production-us-east-1.engine_version
identifier = "lego-production-instance-1-us-east-1b"
cluster_identifier = aws_rds_cluster.lego-production-us-east-1.id
instance_class = "db.r6g.4xlarge"
db_subnet_group_name = module.us-east-1.rds-lego-prod-subnet-group-id
monitoring_role_arn = local.rds-monitoring-role_arn
performance_insights_enabled = true
performance_insights_kms_key_id = local.kms_key_id.us-east-1
performance_insights_retention_period = 7
monitoring_interval = 60
tags = {
"devops-guru-default" = "lego-production"
}
lifecycle {
ignore_changes = [
instance_class
]
}
}
resource "aws_rds_cluster" "lego-production-us-east-2" {
provider = aws.useast2
engine = aws_rds_cluster.lego-production-us-east-1.engine
engine_version = aws_rds_cluster.lego-production-us-east-1.engine_version
cluster_identifier = "lego-production-us-east-2"
global_cluster_identifier = aws_rds_global_cluster.global-lego-production.id
db_subnet_group_name = module.us-east-2.rds-lego-prod-subnet-group-id
db_cluster_parameter_group_name = module.us-east-2.rds-lego-production-parameter-group-id
backup_retention_period = 7
storage_encrypted = true
kms_key_id = local.kms_key_id.us-east-2
copy_tags_to_snapshot = true
deletion_protection = true
skip_final_snapshot = true
iam_database_authentication_enabled = true
enabled_cloudwatch_logs_exports = ["postgresql"]
vpc_security_group_ids = [
module.us-east-2.rds-db-webserver-security-group-id,
module.us-east-2.rds-db-quicksight-security-group-id
]
tags = {
vpc = "nektar"
}
depends_on = [
aws_rds_cluster.lego-production-us-east-1,
aws_rds_cluster_instance.lego-production-us-east-1-instance-1,
aws_rds_cluster_instance.lego-production-us-east-1-instance-2
]
lifecycle {
ignore_changes = [
engine_version
]
}
}
resource "aws_rds_cluster_instance" "lego-production-us-east-2-instance-1" {
provider = aws.useast2
engine = aws_rds_cluster.lego-production-us-east-1.engine
engine_version = aws_rds_cluster.lego-production-us-east-1.engine_version
identifier = "lego-production-instance-1"
cluster_identifier = aws_rds_cluster.lego-production-us-east-2.id
instance_class = "db.r6g.4xlarge"
db_subnet_group_name = module.us-east-2.rds-lego-prod-subnet-group-id
monitoring_role_arn = local.rds-monitoring-role_arn
performance_insights_enabled = true
performance_insights_kms_key_id = local.kms_key_id.us-east-2
performance_insights_retention_period = 7
monitoring_interval = 60
tags = {
"devops-guru-default" = "lego-production"
}
lifecycle {
ignore_changes = [
instance_class
]
}
}
When applying it with terraform plan -out tfplan.out and then terraform apply tfplan.out (the initial plan only showed adding the 3 resources - aws_rds_global_cluster, aws_rds_cluster & aws_rds_cluster_instance in us-east-2)...
The Global Cluster was created successfully (as seen in the AWS Console). But the RDS Cluster in us-east-2 is failing due to the error InvalidDBClusterStateFault: Source cluster: arn:aws:rds:us-east-1:<account-id>:cluster:lego-production is in a state which is not valid for physical replication.
I tried the same thing using just the AWS Console (without terraform, "Add Region" through the "Modify" option on selecting the Global Cluster), and it shows the same error.
What criteria is missing for adding another region to my global cluster? It certainly isn't just terraform acting up. And I couldn't find any other places on the internet where somebody encountered the same error.
If there is any other information that I should provide, pls comment.
You are referencing your useast2 cluster engine to useast1 which has a provider of useast1, which is trying to replicate the same thing.
You should create an additional resource such as "aws_rds_cluster" "lego-production-us-east-2" and provide the same information but enter useast2 as a provider.
For example,for your useast2 cluster you have:
resource "aws_rds_cluster" "lego-production-us-east-2" {
provider = aws.useast2
engine = 👉🏽aws_rds_cluster.lego-production-us-east-1.engine
engine_version = 👉🏽aws_rds_cluster.lego-production-us-east-1.engine_version
cluster_identifier = "lego-production-us-east-2"
Notice your engine is pointing to your useast1 cluster. Reference your engine and engine_version to a new rds cluster which will include your useast2 alias.
Let me know if this works.
It took me the AWS Developer Support Plan to resolve this.
The reason for the error InvalidDBClusterStateFault is pretty straighforward apparently - there are some pending changes to the cluster, to be applied at the next maintenance window.
That's it! To view the pending changes you can run the following command:
aws rds describe-db-clusters --db-cluster-identifier lego-production --query 'DBClusters[].{DBClusterIdentifier:DBClusterIdentifier,PendingModifiedValues:PendingModifiedValues}'
In my case, some changes made through terraform were gonna be applied at the next maintenance window. I had to add the following line in my aws_rds_cluster resource block to apply the aforementioned changes - immediately:
resource "aws_rds_cluster" "lego-production-us-east-1" {
...
+ apply_immediately = true
...
}
And the same had to be done for resource block lego-production-us-east-2 also, just to be sure.
Once I applied these changes, the cluster addition to the global cluster took place as expected.

Unable to create an EKS Cluster with an existing security group using Terraform

I'm having issues when trying to create an EKS cluster with a few security groups that I already have created. I don't want a new SG every time I create a new EKS Cluster.
I have a problem with a part of this code under vpc_id part."cluster_create_security_group=false" produces an error, and cluster_security_group_id = "sg-123" is completely ignored.
My code is like this:
provider "aws" {
region = "us-east-2"
}
terraform {
backend "s3" {
bucket = "mys3bucket"
key = "eks/terraform.tfstate"
region = "us-east-2"
}
}
data "aws_eks_cluster" "cluster" {
name = module.eks.cluster_id
}
data "aws_eks_cluster_auth" "cluster" {
name = module.eks.cluster_id
}
provider "kubernetes" {
host = data.aws_eks_cluster.cluster.endpoint
cluster_ca_certificate = base64decode(data.aws_eks_cluster.cluster.certificate_authority.0.data)
token = data.aws_eks_cluster_auth.cluster.token
}
variable "cluster_security_group_id" {
description = "Existing security group ID to be attached to the cluster. Required if `create_cluster_security_group` = `false`"
type = string
default = "sg-1234"
}
module "eks" {
source = "terraform-aws-modules/eks/aws"
version = "~> 18.0"
cluster_name = "cluster-example"
cluster_version = "1.21" #This may vary depending on the purpose of the cluster
cluster_endpoint_private_access = true
cluster_endpoint_public_access = true
cluster_addons = {
coredns = {
resolve_conflicts = "OVERWRITE"
}
kube-proxy = {}
vpc-cni = {
resolve_conflicts = "OVERWRITE"
}
}
vpc_id = "vpc-12345"
subnet_ids = ["subnet-123", "subnet-456", "subnet-789"]
create_cluster_security_group=false ----------> ERROR: An argument named "cluster_create_security_group" is not expected here
cluster_security_group_id = "my-security-group-id"
# EKS Managed Node Group(s)
eks_managed_node_group_defaults = {
disk_size = 50
instance_types = ["t3.medium"]
}
eks_managed_node_groups = {
Test-Nodegroup = {
min_size = 2
max_size = 5
desired_size = 2
instance_types = ["t3.large"]
capacity_type = "SPOT"
}
}
tags = {
Environment = "dev"
Terraform = "true"
}
}
Where am I wrong? This is my whole Terraform file.

Is it possible to get the terraform state file from the remote backend and set it for modifications

I need to get the terraform state file from the backend GCS and to use it while I update the resource is it possible to overwrite the excising terraform state file and fetch it on need.
this is my main.tf
provider "google" {
project = var.project
region = var.region
zone = var.zone
}
###################################################
########################################################
data "google_compute_default_service_account" "default" {
}
#create instance1
resource "random_id" "bucket_prefix" {
byte_length = 8
}
#create instance
resource "google_compute_instance" "vm_instance" {
name = var.instance_name
machine_type = var.machine_type
zone = var.zone
metadata_startup_script = var.script
allow_stopping_for_update = true
#metadata = {
# enable-oslogin = "TRUE"
#}
service_account {
email = data.google_compute_default_service_account.default.email
scopes = ["cloud-platform"]
}
boot_disk {
initialize_params {
image = var.image
#image = "ubuntu-2004-lts" # TensorFlow Enterprise
size = 30
}
}
# Install Flask
tags = ["http-server","allow-ssh-ingress-from-iap", "https-server"]
network_interface {
network = "default"
access_config {
}
}
guest_accelerator{
#type = "nvidia-tesla-t4" // Type of GPU attahced
type = var.type
count = var.gpu_count
#count = 2 // Num of GPU attached
}
scheduling{
on_host_maintenance = "TERMINATE"
automatic_restart = true// Need to terminate GPU on maintenance
}
}
This is my variables.tfvars:
instance_name = "test-vm-v5"
machine_type = "n1-standard-16"
region = "europe-west4"
zone = "europe-west4-a"
image = "tf28-np-pandas-nltk-scikit-py39"
#image = "debian-cloud/debian-10"
project = "my_project"
network = "default"
type = ""
gpu_count = "0"
I wanted to create multiple instances by changing the variables.tfvars and need to modify the instance on the basis of the name of vm.

How can I override the default region in aws provider?

I have a terraform configuration file like below. When I deploy terraform apply I got the error: A default (non-aliased) provider configuration for "aws" was already given at │ versions.tf:37,1-15. If multiple configurations are required, set the "alias" argument for │ alternative configurations.
I think it could be caused by some modules already defined the aws provider configuration. I know I can set an alias for the one I added at the bottom of below configuration. But how I can override the default one used by module?
terraform {
required_providers {
aws = {
source = "hashicorp/aws"
version = ">= 3.20.0"
}
random = {
source = "hashicorp/random"
version = "3.0.0"
}
local = {
source = "hashicorp/local"
version = "2.0.0"
}
null = {
source = "hashicorp/null"
version = "3.0.0"
}
template = {
source = "hashicorp/template"
version = "2.2.0"
}
kubernetes = {
source = "hashicorp/kubernetes"
version = ">= 2.0.1"
}
}
required_version = "~> 0.14"
}
variable "region" {
default = "us-east-2"
description = "AWS region"
}
provider "aws" {
region = "us-east-2"
}
data "aws_availability_zones" "available" {}
locals {
cluster_name = "education-eks-${random_string.suffix.result}"
}
resource "random_string" "suffix" {
length = 8
special = false
}
module "vpc" {
source = "terraform-aws-modules/vpc/aws"
version = "2.66.0"
name = "elk-vpc"
cidr = "10.0.0.0/16"
azs = data.aws_availability_zones.available.names
private_subnets = ["10.0.1.0/24", "10.0.2.0/24", "10.0.3.0/24"]
public_subnets = ["10.0.4.0/24", "10.0.5.0/24", "10.0.6.0/24"]
enable_nat_gateway = true
single_nat_gateway = true
enable_dns_hostnames = true
tags = {
"kubernetes.io/cluster/${local.cluster_name}" = "shared"
}
public_subnet_tags = {
"kubernetes.io/cluster/${local.cluster_name}" = "shared"
"kubernetes.io/role/elb" = "1"
}
private_subnet_tags = {
"kubernetes.io/cluster/${local.cluster_name}" = "shared"
"kubernetes.io/role/internal-elb" = "1"
}
}
provider "aws" {
region = "ap-southeast-2"
}

terraform import error with module with rds instances

So I created rds instance and I am trying to import it into terraform. However I am using modules in my code so when running terraform I am getting the error:
AT first it says:
module.rds_dr.aws_db_instance.db_instance: Import prepared!
Prepared aws_db_instance for import
then it gives error:
Error: Cannot import non-existent remote object
While attempting to import an existing object to aws_db_instance.db_instance,
the provider detected that no object exists with the given id. Only
pre-existing objects can be imported; check that the id is correct and that it
is associated with the provider's configured region or endpoint, or use
"terraform apply" to create a new remote object for this resource.
The command I ran was :
terraform import module.rds_dr.aws_db_instance.db_instance db-ID
I created the instance using source of module in github. The code for the rds instance is below:
# PostgreSQL RDS DR Instance
module "rds_dr" {
source = "git#github.com:****"
name = var.rds_name_dr
engine = var.rds_engine_dr
engine_version = var.rds_engine_version_dr
family = var.rds_family_dr
instance_class = var.rds_instance_class_dr
# WARNING: 'terraform taint random_string.rds_password' must be run prior to recreating the DB if it is destroyed
password = random_string.rds_password.result
port = var.rds_port_dr
security_groups = [aws_security_group.rds_app.id]
subnets = [module.vpc.public_subnets]
auto_minor_version_upgrade = var.rds_auto_minor_version_upgrade_dr
backup_retention_period = var.rds_backup_retention_period_dr
backup_window = var.rds_backup_window_dr
maintenance_window = var.rds_maintenance_window_dr
environment = var.environment
kms_key_id = aws_kms_key.rds.arn
multi_az = var.rds_multi_az_dr
notification_topic = var.rds_notification_topic_dr
publicly_accessible = var.rds_publicly_accessible_dr
storage_encrypted = var.rds_storage_encrypted_dr
storage_size = var.rds_storage_size_dr
storage_type = var.rds_storage_type_dr
apply_immediately = true
}
Also, this is part of the module code:
resource "aws_db_instance" "db_instance" {
allocated_storage = local.storage_size
allow_major_version_upgrade = false
apply_immediately = var.apply_immediately
auto_minor_version_upgrade = var.auto_minor_version_upgrade
backup_retention_period = var.read_replica ? 0 : var.backup_retention_period
backup_window = var.backup_window
character_set_name = local.is_oracle ? var.character_set_name : null
copy_tags_to_snapshot = var.copy_tags_to_snapshot
db_subnet_group_name = local.same_region_replica ? null : local.subnet_group
deletion_protection = var.enable_deletion_protection
engine = var.engine
engine_version = local.engine_version
final_snapshot_identifier = lower("${var.name}-final-snapshot${var.final_snapshot_suffix == "" ? "" : "-"}${var.final_snapshot_suffix}")
iam_database_authentication_enabled = var.iam_authentication_enabled
identifier_prefix = "${lower(var.name)}-"
instance_class = var.instance_class
iops = var.storage_iops
kms_key_id = var.kms_key_id
license_model = var.license_model == "" ? local.license_model : var.license_model
maintenance_window = var.maintenance_window
max_allocated_storage = var.max_storage_size
monitoring_interval = var.monitoring_interval
monitoring_role_arn = var.monitoring_interval > 0 ? local.monitoring_role_arn : null
multi_az = var.read_replica ? false : var.multi_az
name = var.dbname
option_group_name = local.same_region_replica ? null : local.option_group
parameter_group_name = local.same_region_replica ? null : local.parameter_group
password = var.password
port = local.port
publicly_accessible = var.publicly_accessible
replicate_source_db = var.source_db
skip_final_snapshot = var.read_replica || var.skip_final_snapshot
snapshot_identifier = var.db_snapshot_id
storage_encrypted = var.storage_encrypted
storage_type = var.storage_type
tags = merge(var.tags, local.tags)
timezone = local.is_mssql ? var.timezone : null
username = var.username
vpc_security_group_ids = var.security_groups
}
This is my code for the providers:
# pinned provider versions
provider "random" {
version = "~> 2.3.0"
}
provider "template" {
version = "~> 2.1.2"
}
provider "archive" {
version = "~> 1.1"
}
# default provider
provider "aws" {
version = "~> 2.44"
allowed_account_ids = [var.aws_account_id]
region = "us-east-1"
}
# remote state
terraform {
required_version = "0.12.24"
backend "s3" {
key = "terraform.dev.tfstate"
encrypt = "true"
bucket = "dev-tfstate"
region = "us-east-1"
}
}
I have inserted the correct DB ID and i still do not know why terraform says "import non-existent remote object"?
How do I fix this?