I'm using Terraform 1.1.3 with aws provider 3.75.2 to create TF code for the existing 2-node infra. The code snippet is like below:
module:
resource "aws_ebs_volume" "backend-logs" {
count = var.create_ebs_log_volumes ? var.backend_nodes_qty : 0
availability_zone = element(data.aws_subnet.backend.*.availability_zone, count.index)
size = var.volume_log_size
type = var.ebs_volume_type
encrypted = var.ebs_enable_encryption
kms_key_id = var.ebs_encryption_key_id
}
resource "aws_volume_attachment" "backend-logs" {
count = var.backend_nodes_qty
device_name = "/dev/sdf"
volume_id = element(module.xyz.backend_ebs_volume_log_ids, count.index)
instance_id = element(module.xyz.backend_instance_ids, count.index)
}
and I've imported the instance/volume/attachment resources successfully.
terraform import module.xyz.aws_ebs_volume.backend-logs[0] vol-0123456789abcedf0
terraform import module.xyz.aws_ebs_volume.backend-logs[1] vol-0123456789abcedf1
terraform import aws_volume_attachment.backend-logs[0] /dev/sdf:vol-0123456789abcedf0:i-0123456789abcedf0
terraform import aws_volume_attachment.backend-logs[1] /dev/sdf:vol-0123456789abcedf1:i-0123456789abcedf1
When I run terraform plan, the plan tells me that the volumes are going to be destroyed. How can we avoid that? thanks
# aws_volume_attachment.backend-logs[0] must be replaced
-/+ resource "aws_volume_attachment" "backend-logs" {
~ id = "vai-1993905001" -> (known after apply)
~ volume_id = "vol-0123456789abcedf0" -> (known after apply) # forces replacement
# (2 unchanged attributes hidden)
}
# aws_volume_attachment.backend-logs[1] must be replaced
-/+ resource "aws_volume_attachment" "backend-logs" {
~ id = "vai-1955292002" -> (known after apply)
~ volume_id = "vol-0123456789abcedf1" -> (known after apply) # forces replacement
# (2 unchanged attributes hidden)
}
# module.xyz.aws_ebs_volume.backend-logs[0] must be replaced
-/+ resource "aws_ebs_volume" "backend-logs" {
~ arn = "arn:aws:ec2:us-west-2:1234567890:volume/vol-0123456789abcedf0" -> (known after apply)
~ availability_zone = "us-west-2a" -> (known after apply) # forces replacement
~ id = "vol-0123456789abcedf0" -> (known after apply)
~ iops = 150 -> (known after apply)
+ kms_key_id = (known after apply)
- multi_attach_enabled = false -> null
+ snapshot_id = (known after apply)
~ throughput = 0 -> (known after apply)
# (3 unchanged attributes hidden)
}
# module.xyz.aws_ebs_volume.backend-logs[1] must be replaced
-/+ resource "aws_ebs_volume" "backend-logs" {
~ arn = "arn:aws:ec2:us-west-2:1234567890:volume/vol-0123456789abcedf1" -> (known after apply)
~ availability_zone = "us-west-2b" -> (known after apply) # forces replacement
~ id = "vol-0123456789abcedf1" -> (known after apply)
~ iops = 150 -> (known after apply)
+ kms_key_id = (known after apply)
- multi_attach_enabled = false -> null
+ snapshot_id = (known after apply)
~ throughput = 0 -> (known after apply)
# (3 unchanged attributes hidden)
}
It seems that the issue relates to AZ stuff. You can try the workaround by adding these lines in aws_instance block.
lifecycle {
ignore_changes = [ availability_zone ]
}
Related
I have the following Terraform configuration that is used to create a cluster with two instances:
resource "aws_rds_cluster" "aurora" {
storage_encrypted = true
cluster_identifier = var.cluster_identifier
engine = "aurora-postgresql"
engine_mode = "provisioned"
engine_version = "13.6"
database_name = var.database_name
master_username = "test"
master_password = var.database_password
availability_zones = ["ap-southeast-2a", "ap-southeast-2b"]
db_subnet_group_name = var.db_subnet_group_name
serverlessv2_scaling_configuration {
max_capacity = 1.0
min_capacity = 0.5
}
tags = {
Name = "${var.prefix}-${var.environment}-rds-cluster"
Environment = "${var.prefix}-${var.environment}"
}
vpc_security_group_ids = var.aurora_security_group_id
skip_final_snapshot = true
}
resource "aws_rds_cluster_instance" "aurora" {
count = 2
availability_zone = var.availability_zones[count.index]
cluster_identifier = aws_rds_cluster.aurora.id
instance_class = "db.serverless"
engine = aws_rds_cluster.aurora.engine
engine_version = aws_rds_cluster.aurora.engine_version
db_subnet_group_name = var.db_subnet_group_name
publicly_accessible = true
}
If I make literally no changes to my configuration, and run terraform plan, I receive the output below:
module.potentium_databases.module.potentium_rds_cluster.aws_rds_cluster_instance.aurora[1] must be replaced
-/+ resource "aws_rds_cluster_instance" "aurora" {
+ apply_immediately = (known after apply)
~ arn = "arn:aws:rds:ap-southeast-2:749732509682:db:tf-20220706042316120800000001" -> (known after apply)
~ ca_cert_identifier = "rds-ca-2019" -> (known after apply)
~ cluster_identifier = "potentium-cluster" -> (known after apply) # forces replacement
~ db_parameter_group_name = "default.aurora-postgresql13" -> (known after apply)
~ dbi_resource_id = "db-5AH6GR5KJNW4IXQ2BSGNPLL4FM" -> (known after apply)
~ endpoint = "tf-20220706042316120800000001.cv6x1exxvfdc.ap-southeast-2.rds.amazonaws.com" -> (known after apply)
~ engine_version_actual = "13.6" -> (known after apply)
~ id = "tf-20220706042316120800000001" -> (known after apply)
~ identifier = "tf-20220706042316120800000001" -> (known after apply)
+ identifier_prefix = (known after apply)
~ kms_key_id = "arn:aws:kms:ap-southeast-2:749732509682:key/a3f87bb9-f0b4-44a4-8677-bac5f0bb1546" -> (known after apply)
+ monitoring_role_arn = (known after apply)
~ performance_insights_enabled = false -> (known after apply)
+ performance_insights_kms_key_id = (known after apply)
~ performance_insights_retention_period = 0 -> (known after apply)
~ port = 5432 -> (known after apply)
~ preferred_backup_window = "13:51-14:21" -> (known after apply)
~ preferred_maintenance_window = "thu:15:39-thu:16:09" -> (known after apply)
~ storage_encrypted = true -> (known after apply)
- tags = {} -> null
~ tags_all = {} -> (known after apply)
~ writer = false -> (known after apply)
# (10 unchanged attributes hidden)
}
Can anyone explain why Terraform thinks this resource needs to be recreated even if nothing has changed? It is causing me grief due to how long it takes to actually re-create the instances.
It appears my issue is that I was only specifying 2 Availability Zones instead of 3. Im assuming because Terraform/AWS is left to decide the third AZ, it must perform a re-create as it does not know what to use.
As I know, the re-creating issue is usually caused by the changed attribute, which is shown with #forces replacement. In your case:
~ cluster_identifier = "potentium-cluster" -> (known after apply) # forces replacement
Please double-check it.
I created a documentdb cluster by terraform. When I run terraform plan, it tried to destroy the cluster and rebuild it, but I didn't change the value file.
below is the main content of terraform script,
resource "aws_docdb_subnet_group" "default" {
name = format("%s-subnet-group", var.env)
subnet_ids = [
data.terraform_remote_state.net.outputs.cicd-sub-priv1,
data.terraform_remote_state.net.outputs.cicd-sub-priv2,
data.terraform_remote_state.net.outputs.cicd-sub-pub2,
]
tags = {
Name = format("%s-subnet-group", var.env)
}
}
resource "aws_docdb_cluster_instance" "docdb" {
count = var.docdb_instance_count
identifier = "${var.env}-docdb-instance-${count.index}"
cluster_identifier = aws_docdb_cluster.docdb.id
instance_class = var.docdb_instance_class
tags = {
Name = format("%s-docdb-cluster-instance", var.env)
}
}
resource "aws_docdb_cluster" "docdb" {
cluster_identifier = format("%s-docdb-cluster", var.env)
availability_zones = var.docdb_az
db_subnet_group_name = aws_docdb_subnet_group.default.id
master_username = var.docdb_master_username
master_password = var.docdb_master_password
storage_encrypted = "true"
kms_key_id = data.aws_kms_alias.rds.arn
final_snapshot_identifier = format("%s-docdb-final-snapshot", var.env)
engine = "docdb"
engine_version = "4.0.0"
port = var.docdb_port
tags = {
Name = format("%s-docdb-cluster", var.env)
}
}
output "docdb_name" {
value = aws_docdb_cluster.docdb.id
description = "The name of docdb cluster"
}
output "docdb_arn" {
value = aws_docdb_cluster.docdb.arn
description = "The arn of docdb cluster"
}
output "docdb_endpoint" {
value = aws_docdb_cluster.docdb.endpoint
description = "The DNS address of the DocDB instance"
}
data_kms_alias_rds.tf
data "aws_kms_alias" "rds" {
name = "alias/aws/rds"
}
and these are terraform plan out reult
$ terraform plan -out tfplan -var-file test.tfvars
Acquiring state lock. This may take a few moments...
aws_docdb_subnet_group.default: Refreshing state... [id=test-subnet-group]
aws_docdb_cluster.docdb: Refreshing state... [id=test-docdb-cluster]
aws_docdb_cluster_instance.docdb[0]: Refreshing state... [id=test-docdb-instance-0]
aws_docdb_cluster_instance.docdb[1]: Refreshing state... [id=test-docdb-instance-1]
Note: Objects have changed outside of Terraform
Terraform detected the following changes made outside of Terraform since the last "terraform apply":
# aws_docdb_cluster.docdb has been changed
~ resource "aws_docdb_cluster" "docdb" {
~ cluster_members = [
+ "test-docdb-instance-0",
+ "test-docdb-instance-1",
]
+ enabled_cloudwatch_logs_exports = []
id = "test-docdb-cluster"
+ tags = {}
# (24 unchanged attributes hidden)
}
# aws_docdb_cluster_instance.docdb[0] has been changed
~ resource "aws_docdb_cluster_instance" "docdb" {
id = "test-docdb-instance-0"
+ tags = {}
# (21 unchanged attributes hidden)
}
# aws_docdb_cluster_instance.docdb[1] has been changed
~ resource "aws_docdb_cluster_instance" "docdb" {
id = "test-docdb-instance-1"
+ tags = {}
# (21 unchanged attributes hidden)
}
Unless you have made equivalent changes to your configuration, or ignored the relevant attributes using ignore_changes, the
following plan may include actions to undo or respond to these changes.
────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────
Terraform used the selected providers to generate the following execution plan. Resource actions are indicated with the following
symbols:
-/+ destroy and then create replacement
Terraform will perform the following actions:
# aws_docdb_cluster.docdb must be replaced
-/+ resource "aws_docdb_cluster" "docdb" {
+ apply_immediately = (known after apply)
~ arn = "arn:aws:rds:us-east-1:<hidden>:cluster:test-docdb-cluster" -> (known after apply)
+ cluster_identifier_prefix = (known after apply)
~ cluster_members = [
- "test-docdb-instance-0",
- "test-docdb-instance-1",
] -> (known after apply)
~ cluster_resource_id = "cluster-<hidden>" -> (known after apply)
~ db_cluster_parameter_group_name = "default.docdb4.0" -> (known after apply)
- deletion_protection = false -> null
- enabled_cloudwatch_logs_exports = [] -> null
~ endpoint = "<hidden>" -> (known after apply)
~ hosted_zone_id = "ZNKXH85TT8WVW" -> (known after apply)
~ id = "test-docdb-cluster" -> (known after apply)
~ kms_key_id = "arn:aws:kms:us-east-1:<hidden>:key/<hidden>" -> "arn:aws:kms:us-east-1:<hidden>:alias/aws/rds" # forces replacement
~ preferred_backup_window = "07:55-08:25" -> (known after apply)
~ preferred_maintenance_window = "wed:10:07-wed:10:37" -> (known after apply)
~ reader_endpoint = "<hidden>" -> (known after apply)
- tags = {} -> null
~ tags_all = {} -> (known after apply)
~ vpc_security_group_ids = [
- "sg-066866c3e4988de42",
] -> (known after apply)
# (12 unchanged attributes hidden)
}
# aws_docdb_cluster_instance.docdb[0] must be replaced
-/+ resource "aws_docdb_cluster_instance" "docdb" {
+ apply_immediately = (known after apply)
~ arn = "arn:aws:rds:us-east-1:<hidden>:db:test-docdb-instance-0" -> (known after apply)
~ availability_zone = "us-east-1a" -> (known after apply)
~ ca_cert_identifier = "rds-ca-2019" -> (known after apply)
~ cluster_identifier = "test-docdb-cluster" -> (known after apply) # forces replacement
~ db_subnet_group_name = "test-subnet-group" -> (known after apply)
~ dbi_resource_id = "db-<hidden>" -> (known after apply)
~ endpoint = "<hidden>" -> (known after apply)
~ engine_version = "4.0.0" -> (known after apply)
~ id = "test-docdb-instance-0" -> (known after apply)
+ identifier_prefix = (known after apply)
~ kms_key_id = "arn:aws:kms:us-east-1:<hidden>:key/<hidden>" -> (known after apply)
~ port = 37018-> (known after apply)
~ preferred_backup_window = "07:55-08:25" -> (known after apply)
~ preferred_maintenance_window = "sat:07:23-sat:07:53" -> (known after apply)
~ publicly_accessible = false -> (known after apply)
~ storage_encrypted = true -> (known after apply)
- tags = {} -> null
~ tags_all = {} -> (known after apply)
~ writer = false -> (known after apply)
# (5 unchanged attributes hidden)
}
# aws_docdb_cluster_instance.docdb[1] must be replaced
-/+ resource "aws_docdb_cluster_instance" "docdb" {
+ apply_immediately = (known after apply)
~ arn = "arn:aws:rds:us-east-1:<hidden>:db:test-docdb-instance-1" -> (known after apply)
~ availability_zone = "us-east-1c" -> (known after apply)
~ ca_cert_identifier = "rds-ca-2019" -> (known after apply)
~ cluster_identifier = "test-docdb-cluster" -> (known after apply) # forces replacement
~ db_subnet_group_name = "test-subnet-group" -> (known after apply)
~ dbi_resource_id = "db-<hidden>" -> (known after apply)
~ endpoint = "<hidden>" -> (known after apply)
~ engine_version = "4.0.0" -> (known after apply)
~ id = "test-docdb-instance-1" -> (known after apply)
+ identifier_prefix = (known after apply)
~ kms_key_id = "arn:aws:kms:us-east-1:<hidden>:key/<hidden>" -> (known after apply)
~ port = 37018 -> (known after apply)
~ preferred_backup_window = "07:55-08:25" -> (known after apply)
~ preferred_maintenance_window = "sat:05:13-sat:05:43" -> (known after apply)
~ publicly_accessible = false -> (known after apply)
~ storage_encrypted = true -> (known after apply)
- tags = {} -> null
~ tags_all = {} -> (known after apply)
~ writer = true -> (known after apply)
# (5 unchanged attributes hidden)
}
Plan: 3 to add, 0 to change, 3 to destroy.
Changes to Outputs:
~ docdb_arn = "arn:aws:rds:us-east-1:<hidden>:cluster:test-docdb-cluster" -> (known after apply)
~ docdb_endpoint = "<hidden>" -> (known after apply)
~ docdb_name = "test-docdb-cluster" -> (known after apply)
────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────
Saved the plan to: tfplan
To perform exactly these actions, run the following command to apply:
terraform apply "tfplan"
Releasing state lock. This may take a few moments...
How can I keep update the documentdb cluster without rebuilding every time I run my script?
Changing kms key for your DocumentDB requires replacement. There is not much you can do about that.
You are correctly referring to default kms (kms_key_id = data.aws_kms_alias.rds.arn). But since you had different kms key before, a replacement is required. If so, make sure you backup your db before that.
It seems that AWS replaced the value for kms_key_id property during creation of the cluster.
Try to use target_key_arn property instead of arn of the alias in the cluster resource:
kms_key_id = data.aws_kms_alias.rds.target_key_arn
That’s the only property which forces replacement of the cluster resource. Cluster instances replacement is a consequence of the cluster replacement.
Good morning
I am having a problem with my terraform code. I have defined two gcp compute instances in the following way inside a specific module:
resource "google_compute_instance" "dev_machine" {
name = "dev-machine-${var.tag}"
deletion_protection = true
machine_type = "e2-standard-2"
boot_disk {
initialize_params {
image = "ubuntu-os-pro-cloud/ubuntu-pro-2004-lts"
size = 200
}
}
network_interface {
network = var.vpc.name
subnetwork = var.subnet.name
access_config {
}
}
tags = ["dev-vm-${var.tag}"]
}
resource "google_compute_instance" "dev_machine-minor" {
name = "dev-machine-minor-${var.tag}"
deletion_protection = true
machine_type = "n1-standard-1"
boot_disk {
initialize_params {
image = "ubuntu-os-pro-cloud/ubuntu-pro-2004-lts"
size = 30
}
}
network_interface {
network = var.vpc.name
subnetwork = var.subnet.name
access_config {
}
}
tags = ["dev-vm-${var.tag}"]
}
resource "google_compute_firewall" "ssh_access" {
name = "allow-ssh-access-${var.tag}"
network = var.vpc.name
allow {
protocol = "tcp"
ports = ["22"]
}
source_ranges = [ "0.0.0.0/0" ]
target_tags = ["dev-vm-${var.tag}"]
}
The variables are defined in the following way:
variable "vpc" {
description = "vpc to deploy instance"
}
variable "subnet" {
description = "subnet to deploy the subnet"
}
variable "tag" {
description = "general project tag"
}
Everything works as expected when I run the command 'terraform apply', but if I run it again it always states that the instances must be replaced, even if I did not make any changes to the code. When I connect to the instance via ssh, I notice that everything was wiped out.
This is the output from 'terraform plan' with no changes to the code:
An execution plan has been generated and is shown below.
Resource actions are indicated with the following symbols:
-/+ destroy and then create replacement
Terraform will perform the following actions:
# module.dev_environment.google_compute_instance.dev_machine must be replaced
-/+ resource "google_compute_instance" "dev_machine" {
~ cpu_platform = "Intel Broadwell" -> (known after apply)
~ current_status = "RUNNING" -> (known after apply)
~ deletion_protection = false -> true
- enable_display = false -> null
~ guest_accelerator = [] -> (known after apply)
~ id = "<id with project>" -> (known after apply)
~ instance_id = "<instance id>" -> (known after apply)
~ label_fingerprint = "<label fingerprint>" -> (known after apply)
- labels = {} -> null
- metadata = {} -> null
~ metadata_fingerprint = "<metadata fingerprint>=" -> (known after apply)
+ min_cpu_platform = (known after apply)
name = "dev-machine-pweather"
~ project = "<project id>" -> (known after apply)
- resource_policies = [] -> null
~ self_link = "<project id url>/instances/dev-machine-pweather" -> (known after apply)
tags = [
"dev-vm-pweather",
]
~ tags_fingerprint = "<tag fingerprint>" -> (known after apply)
~ zone = "us-east4-a" -> (known after apply)
# (2 unchanged attributes hidden)
~ boot_disk {
~ device_name = "persistent-disk-0" -> (known after apply)
+ disk_encryption_key_sha256 = (known after apply)
+ kms_key_self_link = (known after apply)
~ source = "<project id url>/us-east4-a/disks/dev-machine-pweather" -> (known after apply)
# (2 unchanged attributes hidden)
~ initialize_params {
~ image = "https://www.googleapis.com/compute/v1/projects/ubuntu-os-pro-cloud/global/images/ubuntu-pro-2004-focal-v20210720" -> "ubuntu-os-pro-cloud/ubuntu-pro-2004-lts" # forces replacement
~ labels = {} -> (known after apply)
~ type = "pd-standard" -> (known after apply)
# (1 unchanged attribute hidden)
}
}
+ confidential_instance_config {
+ enable_confidential_compute = (known after apply)
}
~ network_interface {
~ name = "nic0" -> (known after apply)
~ network = "<project id url>/global/networks/pweather-vpc" -> "pweather-vpc"
~ network_ip = "10.0.0.17" -> (known after apply)
~ subnetwork = "<project id url>/subnetworks/pweather-subnet" -> "pweather-subnet"
~ subnetwork_project = "<project>" -> (known after apply)
~ access_config {
~ nat_ip = "<NAT IP> -> (known after apply)
~ network_tier = "PREMIUM" -> (known after apply)
}
}
+ reservation_affinity {
+ type = (known after apply)
+ specific_reservation {
+ key = (known after apply)
+ values = (known after apply)
}
}
~ scheduling {
~ automatic_restart = true -> (known after apply)
~ min_node_cpus = 0 -> (known after apply)
~ on_host_maintenance = "MIGRATE" -> (known after apply)
~ preemptible = false -> (known after apply)
+ node_affinities {
+ key = (known after apply)
+ operator = (known after apply)
+ values = (known after apply)
}
}
- shielded_instance_config {
- enable_integrity_monitoring = true -> null
- enable_secure_boot = false -> null
- enable_vtpm = true -> null
}
}
# module.dev_environment.google_compute_instance.dev_machine-minor must be replaced
-/+ resource "google_compute_instance" "dev_machine-minor" {
~ cpu_platform = "Intel Broadwell" -> (known after apply)
~ current_status = "RUNNING" -> (known after apply)
~ deletion_protection = false -> true
- enable_display = false -> null
~ guest_accelerator = [] -> (known after apply)
~ id = "<project id url>/instances/dev-machine-minor-pweather" -> (known after apply)
~ instance_id = "<instance id>" -> (known after apply)
~ label_fingerprint = "<label fingerprint>" -> (known after apply)
- labels = {} -> null
- metadata = {} -> null
~ metadata_fingerprint = "udK04sf2kcQ=" -> (known after apply)
+ min_cpu_platform = (known after apply)
name = "dev-machine-minor-pweather"
~ project = "<project name>" -> (known after apply)
- resource_policies = [] -> null
~ self_link = "<project id url>/us-east4-a/instances/dev-machine-minor-pweather" -> (known after apply)
tags = [
"dev-vm-pweather",
]
~ tags_fingerprint = "<tag fingerprint>" -> (known after apply)
~ zone = "us-east4-a" -> (known after apply)
# (2 unchanged attributes hidden)
~ boot_disk {
~ device_name = "persistent-disk-0" -> (known after apply)
+ disk_encryption_key_sha256 = (known after apply)
+ kms_key_self_link = (known after apply)
~ source = "<project id url>/us-east4-a/disks/dev-machine-minor-pweather" -> (known after apply)
# (2 unchanged attributes hidden)
~ initialize_params {
~ image = "https://www.googleapis.com/compute/v1/projects/ubuntu-os-pro-cloud/global/images/ubuntu-pro-2004-focal-v20210720" -> "ubuntu-os-pro-cloud/ubuntu-pro-2004-lts" # forces replacement
~ labels = {} -> (known after apply)
~ type = "pd-standard" -> (known after apply)
# (1 unchanged attribute hidden)
}
}
+ confidential_instance_config {
+ enable_confidential_compute = (known after apply)
}
~ network_interface {
~ name = "nic0" -> (known after apply)
~ network = "<project id url>>/global/networks/pweather-vpc" -> "pweather-vpc"
~ network_ip = "10.0.0.16" -> (known after apply)
~ subnetwork = "<project id url>/us-east4/subnetworks/pweather-subnet" -> "pweather-subnet"
~ subnetwork_project = "<project>" -> (known after apply)
~ access_config {
~ nat_ip = "<NAT IP>" -> (known after apply)
~ network_tier = "PREMIUM" -> (known after apply)
}
}
+ reservation_affinity {
+ type = (known after apply)
+ specific_reservation {
+ key = (known after apply)
+ values = (known after apply)
}
}
~ scheduling {
~ automatic_restart = true -> (known after apply)
~ min_node_cpus = 0 -> (known after apply)
~ on_host_maintenance = "MIGRATE" -> (known after apply)
~ preemptible = false -> (known after apply)
+ node_affinities {
+ key = (known after apply)
+ operator = (known after apply)
+ values = (known after apply)
}
}
- shielded_instance_config {
- enable_integrity_monitoring = true -> null
- enable_secure_boot = false -> null
- enable_vtpm = true -> null
}
}
Plan: 2 to add, 0 to change, 2 to destroy.
Changes to Outputs:
~ vm_ip = "<VM IP>" -> (known after apply)
------------------------------------------------------------------------
Note: You didn't specify an "-out" parameter to save this plan, so Terraform
can't guarantee that exactly these actions will be performed if
"terraform apply" is subsequently run.
I tried to review the documentation, but (to my understanding) I did not find anything that could help me. Is there something I am doing wrong or is it a bug in the provider? Thank you!
EDIT 1: Added output of 'terraform plan' command.
According to the plan output, the argument value forcing a replacement is:
~ boot_disk {
...
~ initialize_params {
~ image = "https://www.googleapis.com/compute/v1/projects/ubuntu-os-pro-cloud/global/images/ubuntu-pro-2004-focal-v20210720" -> "ubuntu-os-pro-cloud/ubuntu-pro-2004-lts" # forces replacement
}
}
If you are comfortable updating the value from ubuntu-os-pro-cloud/ubuntu-pro-2004-lts to https://www.googleapis.com/compute/v1/projects/ubuntu-os-pro-cloud/global/images/ubuntu-pro-2004-focal-v20210720, then this will prevent the resource from having a Delete/Create operation applied to it.
However, the additional question here is why your argument value for image is changing. If you changed the config or someone manually changed the resource's attributes, then these would cause the change. However, you stated that did not occur. The provider may be attempting to auto-correct and modify the value during validation, but according to the documentation, your value is completely valid. Therefore, this is likely a bug in the provider for that resource's schema.
Complementing Matt Schuchard answer:
Something that can be useful is to add the lifecycle Meta-Argument to ignore changes on that property that would cause the replacement (destroy/apply).
Example:
lifecycle {
ignore_changes = [ boot_disk ]
}
https://www.terraform.io/docs/language/meta-arguments/lifecycle.html
I am trying to increase size of my root volume for my ami ami-0d013c5896434b38a - I am using terraform to provision this.
Just to clarify - I have only 1 instance. And I want to make sure that if I need to increase the disk space, I don't have to destroy the machine first. Elasticity (EC2) is my reason to believe that it's doable.
Does anyone know whether this is doable? Yes, I could simply do terraform plan and do a dry-run, but just double-checking.
I'm running terraform 1.0.1 and would like to change my volume_size from 20gb to 30gb.
After run terraform apply
[...]
# aws_instance.typo3_staging_1 will be updated in-place
~ resource "aws_instance" "staging_1" {
id = "i-0eb2f8af6c8ac4125"
tags = {
"Name" = "Staging 1"
"Team" = "DevOps"
}
# (28 unchanged attributes hidden)
~ root_block_device {
tags = {}
~ volume_size = 20 -> 30
# (8 unchanged attributes hidden)
}
# (4 unchanged blocks hidden)
}
Plan: 0 to add, 1 to change, 0 to destroy.
[...]
I see that terraform will not destroy the system. Now a simple 'yes' change the volume. After ~33sec the root_block_device has been changed.
A login on the ec2 shows nothing has been changed. df shows the old 20gb size of the root partition. But a simple sudo reboot increased the disk space by 10gb without destoring the current system. All docker containers on that instance runs as expected. Perfect.
My terraform resource config for such aws_instance is:
resource "aws_instance" "staging_1" {
instance_type = "t3.medium"
ebs_optimized = true
ami = "ami-001183208be54f75c"
key_name = aws_key_pair.master_key.key_name
subnet_id = aws_subnet.web_development_private_a.id
vpc_security_group_ids = [aws_security_group.ec2_staging.id]
root_block_device {
volume_size = 30 # in GB <<----- I increased this!
volume_type = "gp3"
encrypted = true
kms_key_id = data.aws_kms_key.customer_master_key.arn
}
# This is for T3 only (doesn't apply to M5/R5/...)
# standard: Baseline of 20% or 30% CPU. Short bursts of 100% CPU are possible, but under a budget. Throttled, if budget is 0.
# unlimited: Always 100% CPU possible, but costs are higher, if over burst budget.
credit_specification {
cpu_credits = "unlimited"
}
metadata_options {
http_endpoint = "enabled"
http_tokens = "required"
}
lifecycle {
prevent_destroy = true
}
tags = {
Name = "Staging 1"
Team = "DevOps"
}
volume_tags = {
Name = "Staging 1"
Team = "DevOps"
}
}
It is doable through the AWS Console or AWS CLI, but not through Terraform, based on a quick test.
Changing the volume_size parameter from 10 to 20 in an aws_instance definition such as the one below caused a destroy/re-create of the instance. Using terraform 0.15.0
If you need to keep managing the instance with Terraform, consider the option of (1) performing the modification outside of Terraform (aws console or CLI) and (2) importing the modified resource back into terraform.
In the second section of the answer I describe a simple example of re-importing into Terraform the state of the aws_instance modified through the console. Disclaimer: do this at your own risk and after suitable testing in a non-production environment. Read carefully the warnings in the documentation for the terraform import command
Testing EBS modification done in Terraform - requires instance replacement
resource "aws_instance" "testebs" {
availability_zone = local.aznames[0]
ami = data.aws_ami.ubuntu.id
instance_type = "t2.micro"
associate_public_ip_address = true
key_name = "zzzzzzzz"
ebs_block_device {
device_name = "/dev/sda1"
volume_size = 20
}
}
I paste below the full config and the output of terraform plan.
The ami is a recent ubuntu 20.04 for eu-west-1, not the one in the original question.
File ebstest.tf
terraform {
required_version = "~> 0.15.0"
required_providers {
aws = {
source = "hashicorp/aws"
version = "~> 3.0"
}
}
}
provider "aws" {
region = "eu-west-1"
profile = "xxxxxxx"
}
data "aws_ami" "ubuntu" {
most_recent = true
filter {
name = "name"
values = ["ubuntu/images/hvm-ssd/ubuntu-focal-20.04-amd64-server-*"]
}
filter {
name = "virtualization-type"
values = ["hvm"]
}
filter {
name = "root-device-type"
values = ["ebs"]
}
owners = ["099720109477"] # Canonical
}
data "aws_availability_zones" "available" {
state = "available"
}
locals {
aznames = data.aws_availability_zones.available.names
}
resource "aws_instance" "testebs" {
availability_zone = local.aznames[0]
ami = data.aws_ami.ubuntu.id
instance_type = "t2.micro"
associate_public_ip_address = true
key_name = "zzzzzzzz"
ebs_block_device {
device_name = "/dev/sda1"
volume_size = 20
}
tags = {
Name = "testebs-${local.aznames[0]}"
}
}
Output of terraform plan:
$ terraform plan
aws_instance.testebs: Refreshing state... [id=i-0e1fededb2e432a98]
Terraform used the selected providers to generate the following execution plan. Resource actions are indicated with the following symbols:
-/+ destroy and then create replacement
Terraform will perform the following actions:
# aws_instance.testebs must be replaced
-/+ resource "aws_instance" "testebs" {
~ arn = "arn:aws:ec2:eu-west-1:xxxxxxxxxxxxx:instance/i-0e1fededb2e432a98" -> (known after apply)
~ cpu_core_count = 1 -> (known after apply)
~ cpu_threads_per_core = 1 -> (known after apply)
- disable_api_termination = false -> null
- ebs_optimized = false -> null
- hibernation = false -> null
+ host_id = (known after apply)
~ id = "i-0e1fededb2e432a98" -> (known after apply)
~ instance_state = "running" -> (known after apply)
~ ipv6_address_count = 0 -> (known after apply)
~ ipv6_addresses = [] -> (known after apply)
- monitoring = false -> null
+ outpost_arn = (known after apply)
+ password_data = (known after apply)
+ placement_group = (known after apply)
~ primary_network_interface_id = "eni-0a923724fec1e76ce" -> (known after apply)
~ private_dns = "ip-172-31-13-57.eu-west-1.compute.internal" -> (known after apply)
~ private_ip = "172.31.13.57" -> (known after apply)
~ public_dns = "ec2-3-250-102-86.eu-west-1.compute.amazonaws.com" -> (known after apply)
~ public_ip = "3.250.102.86" -> (known after apply)
~ secondary_private_ips = [] -> (known after apply)
~ security_groups = [
- "default",
] -> (known after apply)
~ subnet_id = "subnet-192e767f" -> (known after apply)
tags = {
"Name" = "testebs-eu-west-1a"
}
~ tenancy = "default" -> (known after apply)
~ vpc_security_group_ids = [
- "sg-d7dc5a9a",
] -> (known after apply)
# (7 unchanged attributes hidden)
- credit_specification {
- cpu_credits = "standard" -> null
}
+ ebs_block_device { # forces replacement
+ delete_on_termination = true
+ device_name = "/dev/sda1"
+ encrypted = (known after apply)
+ iops = (known after apply)
+ kms_key_id = (known after apply)
+ snapshot_id = (known after apply)
+ throughput = (known after apply)
+ volume_id = (known after apply)
+ volume_size = 20
+ volume_type = (known after apply)
}
- ebs_block_device { # forces replacement
- delete_on_termination = true -> null
- device_name = "/dev/sda1" -> null
- encrypted = false -> null
- iops = 100 -> null
- snapshot_id = "snap-0f4b18aebb4264157" -> null
- tags = {} -> null
- throughput = 0 -> null
- volume_id = "vol-01eade74ebeba666f" -> null
- volume_size = 10 -> null
- volume_type = "gp2" -> null
}
~ enclave_options {
~ enabled = false -> (known after apply)
}
+ ephemeral_block_device {
+ device_name = (known after apply)
+ no_device = (known after apply)
+ virtual_name = (known after apply)
}
~ metadata_options {
~ http_endpoint = "enabled" -> (known after apply)
~ http_put_response_hop_limit = 1 -> (known after apply)
~ http_tokens = "optional" -> (known after apply)
}
+ network_interface {
+ delete_on_termination = (known after apply)
+ device_index = (known after apply)
+ network_interface_id = (known after apply)
}
~ root_block_device {
~ delete_on_termination = true -> (known after apply)
~ device_name = "/dev/sda1" -> (known after apply)
~ encrypted = false -> (known after apply)
~ iops = 100 -> (known after apply)
+ kms_key_id = (known after apply)
~ tags = {} -> (known after apply)
~ throughput = 0 -> (known after apply)
~ volume_id = "vol-01eade74ebeba666f" -> (known after apply)
~ volume_size = 10 -> (known after apply)
~ volume_type = "gp2" -> (known after apply)
}
}
Plan: 1 to add, 0 to change, 1 to destroy.
Testing modification through AWS Console and importing resource from terraform (see disclaimer/warning above).
Create instance with 10G volume as per the previous part of the answer.
Modify EBS volume size to 20G in AWS Console.
(did not extend the volume at OS level -- exercise for the reader :D)
List Terraform items in the state file and select aws_instance resource for which the state will be removed
$ terraform state list
data.aws_ami.ubuntu
data.aws_availability_zones.available
aws_instance.testebs
Remove state (terraform state rm) for the aws_instance resource.
$ terraform state rm aws_instance.testebs
Removed aws_instance.testebs
Successfully removed 1 resource instance(s).
Using terraform import, import the aws_resource using the instance-id of the modified ec2 instance
$ terraform import aws_instance.testebs i-xxxxxxxxxxxxxxxx
aws_instance.testebs: Importing from ID "i-xxxxxxxxxxxxxxxx"...
aws_instance.testebs: Import prepared!
Prepared aws_instance for import
aws_instance.testebs: Refreshing state... [id=i-xxxxxxxxxxxxxxxx]
Import successful!
The resources that were imported are shown above. These resources are now in your Terraform state and will henceforth be managed by Terraform.
Verify that terraform can manage the imported instance properly (modify instance through terraform and verify behavior)
ebs_block_device - (Optional) One or more configuration blocks with additional EBS block devices to attach to the instance. Block device configurations only apply on resource creation.
So, as per documentation, ebs_block_device can only be applied during creation of the resource. In your case it is an EC2 instance which needs to be re-created.
I am using Terraform to manage my infrastructure in AWS, I have used Terraform to launch one bastion instance.
My issue is that every time I do a terraform plan, Terraform tells me that it would be destroy and recreate that bastion instance and terraform apply does it.
Here is my code:
resource "aws_instance" "bastion" {
ami = var.ami_id
instance_type = "t2.micro"
key_name = var.key_name
monitoring = false
vpc_security_group_ids = [aws_security_group.`bastion_sg`.id]
subnet_id = var.subnet_id_private
iam_instance_profile = aws_iam_instance_profile.instance_profile.name
user_data = data.template_file.script.rendered
tags = merge(
{
"Name" = local.name_prefix
},
var.default_tags,
)
ebs_block_device {
device_name = "/dev/sda1"
volume_size = 8
volume_type = "gp2"
delete_on_termination = true
}
}
Here is terrafrom plan output:
# module.bastion.aws_instance.bastion must be replaced
-/+ resource "aws_instance" "bastion" {
~ arn = "arn:aws:ec2:xx-xxxx-x:xxxxx:instance/i-xxxxxxxxxxxxxx" -> (known after apply)
~ associate_public_ip_address = false -> (known after apply)
~ availability_zone = "xx-xxxx-xx" -> (known after apply)
~ cpu_core_count = 1 -> (known after apply)
~ cpu_threads_per_core = 1 -> (known after apply)
- disable_api_termination = false -> null
- ebs_optimized = false -> null
- hibernation = false -> null
+ host_id = (known after apply)
~ id = "i-xxxxxxxxxxxxxx" -> (known after apply)
~ instance_state = "running" -> (known after apply)
~ ipv6_address_count = 0 -> (known after apply)
~ ipv6_addresses = [] -> (known after apply)
+ outpost_arn = (known after apply)
+ password_data = (known after apply)
+ placement_group = (known after apply)
~ primary_network_interface_id = "eni-xxxxxxxxxxxxxx" -> (known after apply)
~ private_dns = "ip-xx-xxx-xxx-xxxxx.xx-xxxx-x.compute.internal" -> (known after apply)
~ private_ip = "xx.xxx.x.xx" -> (known after apply)
+ public_dns = (known after apply)
+ public_ip = (known after apply)
~ secondary_private_ips = [] -> (known after apply)
~ security_groups = [] -> (known after apply)
~ tenancy = "default" -> (known after apply)
# (10 unchanged attributes hidden)
- credit_specification {
- cpu_credits = "standard" -> null
}
+ ebs_block_device { # forces replacement
+ delete_on_termination = true
+ device_name = "/dev/sda1"
+ encrypted = (known after apply)
+ iops = (known after apply)
+ kms_key_id = (known after apply)
+ snapshot_id = (known after apply)
+ throughput = (known after apply)
+ volume_id = (known after apply)
+ volume_size = 8
+ volume_type = "gp2"
}
- ebs_block_device { # forces replacement
- delete_on_termination = true -> null
- device_name = "/dev/sda1" -> null
- encrypted = false -> null
- iops = 100 -> null
- snapshot_id = "snap-xxxxxxxxxxxxxx" -> null
- tags = {} -> null
- throughput = 0 -> null
- volume_id = "vol-xxxxxxxxxxxxxx" -> null
- volume_size = 8 -> null
- volume_type = "gp2" -> null
}
~ enclave_options {
~ enabled = false -> (known after apply)
}
+ ephemeral_block_device {
+ device_name = (known after apply)
+ no_device = (known after apply)
+ virtual_name = (known after apply)
}
~ metadata_options {
~ http_endpoint = "enabled" -> (known after apply)
~ http_put_response_hop_limit = 1 -> (known after apply)
~ http_tokens = "optional" -> (known after apply)
}
+ network_interface {
+ delete_on_termination = (known after apply)
+ device_index = (known after apply)
+ network_interface_id = (known after apply)
}
~ root_block_device {
~ delete_on_termination = true -> (known after apply)
~ device_name = "/dev/sda1" -> (known after apply)
~ encrypted = false -> (known after apply)
~ iops = 100 -> (known after apply)
+ kms_key_id = (known after apply)
~ tags = {} -> (known after apply)
~ throughput = 0 -> (known after apply)
~ volume_id = "vol-xxxxxxxxxxxxxx" -> (known after apply)
~ volume_size = 8 -> (known after apply)
~ volume_type = "gp2" -> (known after apply)
}
}
So it is the EBS volume that is causing the replacement.
The most likely cause is that it hasn't been able to attach to /dev/sda1 so when Terraform looks in the plan it sees its disk is not on sda1 and decides it needs to replace the instance.
You would need to confirm this when the instance is running to see where your volume has been attached to (this can be on the instance or in the console).
It is likely that the root volume has been attached to sda1 pushing yours elsewhere.
If this is the case then you would need to change the mount point of this volume OR if you intend this to be the root volume then use root_block_device