Adding another volume when there are ebs volumes already - amazon-web-services

I am new to terraform and been trying to figure out how attach a new drive when dealing with ebs_block_device and without tearing down the whole instance.
resource "aws_instance" "test_directory_controller" {
for_each = aws_network_interface.test_directory_controller
ami = local.test_ami_id
key_name = var.test_instance_key_name
instance_type = var.test_instance_type
iam_instance_profile = module.test_manager.test_instance_profile_name
network_interface {
network_interface_id = each.value.id
device_index = 0
}
root_block_device {
volume_size = 120
encrypted = true
}
ebs_block_device {
device_name = "/dev/sdh"
volume_size = 40
encrypted = true
}
Here is the new code that I have added :
resource "aws_ebs_volume" "test_directory_d_drive" {
for_each = aws_network_interface.test_directory_controller
availability_zone = each.key
size = 40
encrypted = true
tags = {
Name = "Local Disk"
DriveLetter = "D"
}
}
resource "aws_volume_attachment" "test_volume_attachment" {
for_each = aws_network_interface.test_directory_controller
device_name = "xvdf"
volume_id = aws_instance.test_directory_controller[each.key].id
instance_id = aws_ebs_volume.test_directory_d_drive[each.key].id
}
The new code above works but according to hashicorp, we can not add aws_volume_attachment and ebs_block. My question here is how do I just add a new ebs_block_device without tearing down the whole instance?

Related

Terraform - creating multiple EBS volumes for single instance

How would I go about creating and attaching more than one EBS volume to a single ec2 instance? I'm trying to pass a map with the device block variables:
block_device_mappings = [
{
device_name = "/dev/sdg"
app_disk_type = "gp3"
volume_size = "40"
},
{
device_name = "/dev/sdh"
app_disk_type = "gp3"
volume_size = "45"
}
]
This is my resource attachment:
resource "aws_ebs_volume" "ebs_volume" {
count = length(module.ec2-module.id)
encrypted = "true"
type = var.app_disk_type
kms_key_id = module.datasource-module.data.ebs_kms.arn
availability_zone = module.ec2-module.availability_zone[count.index]
size = var.app_disk_size
}
resource "aws_volume_attachment" "volume_attachment" {
count = length(module.ec2-module.id)
device_name = "/dev/sdg"
volume_id = aws_ebs_volume.ebs_volume[count.index].id
instance_id = module.ec2-module.id[count.index]
}

Terraform - creating multiple EBS volumes

How would I go about creating and attaching more than one EBS volume to an instance?
The code below works when attaching a single EBS volume. My main concern is creating a map between the size of the EBS volume and the device name. I've tried a variant of things, creating a list, etc. But no luck.
# Create EBS volume
resource "aws_ebs_volume" "ebs_volume" {
count = "${var.ec2_create_volume == true ? var.ec2_instance_count : 0 }"
availability_zone = "${aws_instance.ec2.*.availability_zone[count.index]}"
size = "${var.ec2_ebs_volume_size}"
type = "${var.ec2_ebs_volume_type}"
}
# Attach EBS Volume
resource "aws_volume_attachment" "volume_attachment" {
count = "${var.ec2_create_volume == true ? var.ec2_instance_count : 0 }"
device_name = "${var.ec2_device_name}"
volume_id = "${aws_ebs_volume.ebs_volume.*.id[count.index]}"
instance_id = "${aws_instance.ec2.*.id[count.index]}"
}
You almost there, try using element(list, index) - it will loop over the list. For example, this config will successfully create 2 ec2 instances with 3 additional ebs volumes attached to each:
variable "ec2_device_names" {
default = [
"/dev/sdd",
"/dev/sde",
"/dev/sdf",
]
}
variable "ec2_instance_count" {
default = 2
}
variable "ec2_ebs_volume_count" {
default = 3
}
resource "aws_instance" "ec2" {
count = "${var.ec2_instance_count}"
ami = "${var.aws_ami_id}"
instance_type = "${var.ec2_instance_type}"
}
resource "aws_ebs_volume" "ebs_volume" {
count = "${var.ec2_instance_count * var.ec2_ebs_volume_count}"
availability_zone = "${element(aws_instance.ec2.*.availability_zone, count.index)}"
size = "${var.ec2_ebs_volume_size}"
}
resource "aws_volume_attachment" "volume_attachement" {
count = "${var.ec2_instance_count * var.ec2_ebs_volume_count}"
volume_id = "${aws_ebs_volume.ebs_volume.*.id[count.index]}"
device_name = "${element(var.ec2_device_names, count.index)}"
instance_id = "${element(aws_instance.ec2.*.id, count.index)}"
}
Incase anyone else is looking for the answer. The solution below works for multiple instances across multiple az. Here device_name is list of string so we need to pass as many names as the number of additional volumes and volume_count is the length of list of number additional_volume_size.
resource "aws_ebs_volume" "ebs_volume" {
count = var.instance_count * var.volume_count
availability_zone = aws_instance.ec2[floor(count.index/var.volume_count)].availability_zone
size = var.additional_volume_size[count.index%var.volume_count]
}
resource "aws_volume_attachment" "volume_attachement" {
count = var.instance_count * var.volume_count
volume_id = element(aws_ebs_volume.ebs_volume.*.id, count.index)
device_name = element(var.device_name, count.index)
instance_id = element(aws_instance.ec2.*.id, floor(count.index/var.volume_count))
}
Multiple EC2 instances with multiple EBS volumes of different sizes. This works with odd or even number of volumes.
instance_count = 3
ebs_volume_count = 2
ec2_ebs_volume_size = [10, 15]
ec2_device_names = ["/dev/sdd", "/dev/sde"]
variable "instance_count" {
type = number
default = 1
}
variable "ebs_volume_count" {
type = number
default = 0
}
variable "ec2_ebs_volume_size" {
type = list(any)
default = [
10
]
}
variable "ec2_device_names" {
type = list(any)
default = [
"/dev/sdd"
]
}
variable "availability_zones" {
type = list(any)
}
variable "subnet_ids" {
type = list(any)
}
resource "aws_instance" "ec2_instance" {
count = var.instance_count
ami = var.aws_ami_id
availability_zone = var.availability_zones[count.index]
subnet_id = var.subnet_ids[count.index]
instance_type = var.ec2_instance_type
}
resource "aws_ebs_volume" "ebs_volume" {
count = var.instance_count * var.ebs_volume_count
availability_zone = "${element(aws_instance.ec2_instance.*.availability_zone, floor (count.index/var.ebs_volume_count))}"
size = var.ec2_ebs_volume_size[count.index%var.ebs_volume_count]
}
resource "aws_volume_attachment" "volume_attachement" {
count = var.instance_count * var.ebs_volume_count
volume_id = aws_ebs_volume.ebs_volume.*.id[count.index]
device_name = var.ec2_device_names[count.index%var.ebs_volume_count]
instance_id = "${element(aws_instance.ec2_instance.*.id, floor (count.index/var.ebs_volume_count))}"
}

Terraform aws_spot_fleet_request iam_instance_profile

I read the Terraform spot fleet example usages from here.
What is the significance of "iam_instance_profile_arn" and what does it do in Example 1?
I'm getting the error "launch_specification.0: invalid or unknown key: tags" in some cases, while not in others so I thought maybe it is related to the iam_profile.
iam_instance_profile_arn = "${aws_iam_instance_profile.example.arn}"
Example 1:
# Request a Spot fleet
resource "aws_spot_fleet_request" "cheap_compute" {
iam_fleet_role = "arn:aws:iam::12345678:role/spot-fleet"
spot_price = "0.03"
allocation_strategy = "diversified"
target_capacity = 6
valid_until = "2019-11-04T20:44:20Z"
launch_specification {
instance_type = "m4.10xlarge"
ami = "ami-1234"
spot_price = "2.793"
placement_tenancy = "dedicated"
iam_instance_profile_arn = "${aws_iam_instance_profile.example.arn}"
}
launch_specification {
instance_type = "m4.4xlarge"
ami = "ami-5678"
key_name = "my-key"
spot_price = "1.117"
iam_instance_profile_arn = "${aws_iam_instance_profile.example.arn}"
availability_zone = "us-west-1a"
subnet_id = "subnet-1234"
weighted_capacity = 35
root_block_device {
volume_size = "300"
volume_type = "gp2"
}
tags {
Name = "spot-fleet-example"
}
}
}
Example 2:
resource "aws_spot_fleet_request" "foo" {
iam_fleet_role = "arn:aws:iam::12345678:role/spot-fleet"
spot_price = "0.005"
target_capacity = 2
valid_until = "2019-11-04T20:44:20Z"
launch_specification {
instance_type = "m1.small"
ami = "ami-d06a90b0"
key_name = "my-key"
availability_zone = "us-west-2a"
}
launch_specification {
instance_type = "m3.large"
ami = "ami-d06a90b0"
key_name = "my-key"
availability_zone = "us-west-2a"
}
depends_on = ["aws_iam_policy_attachment.test-attach"]
}
The instance profile is unrelated to the error. The error is saying this:
tags {
Name = "spot-fleet-example"
}
Part of the first example isnt recognized. You can read about what instance profiles are here:
An instance profile is a container for an IAM role that you can use to
pass role information to an EC2 instance when the instance starts.

Terraform applying huge index value for instance EBS block store

I am using Terraform (called via Terragrunt, if that's relevant) to create an instance from an AMI and mount an existing volume:
resource "aws_instance" "jenkins_master_with_snap" {
count = "${var.master_with_snapshot}"
ami = "${var.jenkins_ami}"
instance_type = "${var.jenkins_instance_type}"
iam_instance_profile = "${data.terraform_remote_state.global.jenkins_profile_name}"
subnet_id = "${data.aws_subnet.jenkins_subnet_with_snap.id}"
key_name = "${var.key_name}"
vpc_security_group_ids = [
"${aws_security_group.jenkins_master_target_sg.id}",
"${data.terraform_remote_state.cicd.cicd_sg_ipa}"
]
ebs_block_device {
snapshot_id = "${var.master_snapshot_id}"
device_name = "${var.jenkins_volume_device}"
volume_type = "gp2"
}
}
It's worth noting that the AMI used to create this resource already has a snapshot mapped to it from the build process, so this resource basically just replaces it with a different snapshot. I'm not sure if this is why I'm having the problem or not.
I'm using the resulting resource attributes to populate a Python template that will be zipped and uploaded as a lambda function. The Python script requires the volume-id from this instance's EBS block device.
data "template_file" "ebs_backup_lambda_with_snapshot_template" {
count = "${var.master_with_snapshot}"
template = "${file("${path.module}/jenkins_lambda_ebs_backup.py.tpl")}"
vars {
volume_id = "${aws_instance.jenkins_master_with_snap.ebs_block_device.???.volume_id}"
}
}
Onto the actual problem: I do not know how to properly reference the volume ID in the vars section of the template_file resource above. Here is the resulting state:
ebs_block_device.# = 1
ebs_block_device.1440725774.delete_on_termination = true
ebs_block_device.1440725774.device_name = /dev/xvdf
ebs_block_device.1440725774.encrypted = true
ebs_block_device.1440725774.iops = 900
ebs_block_device.1440725774.snapshot_id = snap-1111111111111
ebs_block_device.1440725774.volume_id = vol-1111111111111
ebs_block_device.1440725774.volume_size = 300
ebs_block_device.1440725774.volume_type = gp2
ebs_optimized = false
root_block_device.# = 1
root_block_device.0.delete_on_termination = false
root_block_device.0.iops = 0
root_block_device.0.volume_id = vol-1111111111111
root_block_device.0.volume_size = 8
root_block_device.0.volume_type = standard
The problem is that the index for the EBS volume is that insane integer 1440725774. I have no idea why that is occuring. In the console, there's only a single map in the list I'm interested in:
> aws_instance.jenkins_master_with_snap.ebs_block_device
[
{ delete_on_termination = 1 device_name = /dev/xvdf encrypted = 1 iops = 900 snapshot_id = snap-1111111111111 volume_id = vol-1111111111111 volume_size = 300 volume_type = gp2}
]
And it appears the only way to reference any of those keys is to use that index value directly:
> aws_instance.jenkins_master_with_snap.ebs_block_device.1440725774.volume_id
vol-1111111111111
Is there any way to reliably reference a single element in a list like this when I have no idea what the index is going to be? I can't just hardcode that integer into the template_file resource above and assume it's going to be the same every time. Does anyone have any clues as to why this is occurring in the first place?
Perhaps instead of inlining ebs_block_device block, create a separate aws_ebs_volume resource, then attach it with an aws_volume_attachment. Then reference the aws_ebs_volume.name.id attribute to get the ID you need.
Example (extended from the example code in aws_volume_attachment):
resource "aws_volume_attachment" "ebs_att" {
device_name = "/dev/sdh"
volume_id = "${aws_ebs_volume.example.id}"
instance_id = "${aws_instance.web.id}"
}
resource "aws_instance" "web" {
ami = "ami-21f78e11"
availability_zone = "us-west-2a"
instance_type = "t1.micro"
tags {
Name = "HelloWorld"
}
subnet_id = "<REDACTED>"
}
resource "aws_ebs_volume" "example" {
availability_zone = "us-west-2a"
size = 1
}
data "template_file" "example" {
template = "Your volume ID is $${volume_id}"
vars {
volume_id = "${aws_ebs_volume.example.id}"
}
}
output "custom_template" {
value = "${data.template_file.example.rendered}"
}
The resultant output:
Outputs:
custom_template = Your volume ID is vol-0b1064d4ca6f89a15
You can then use ${aws_ebs_volume.example.id} in your template vars to populate your lambda.

How to correctly use Count and pick multiple az subnets in Terraform

I am trying to implement a module where i am trying to spin a number of instance in already created subnets (by terraform) , but i am not sure how to actually use count in modules and also how to pick values from s3 bucket datasource to spin instance in multi-az , here is what my resource in module dir looks like
resource "aws_instance" "ec2-instances" {
count = "${var.count_num }"
ami = "${data.aws_ami.ubuntu.id}"
instance_type = "${var.machine_type}"
key_name = "${var.key_name}"
#vpc_security_group_ids = ["${aws_security_group.jumpbox-sec-group.id}"]
vpc_security_group_ids = ["${var.sec-group}"]
disable_api_termination = "${var.is_production ? true : false}"
subnet_id = "${element(var.es_stg_subnets, count.index)}" <--- This won't work , i need to use data-source as s3
tags {
#Name = "${var.master_name}-${count.index+1}"
Name = "${var.instance-tag}-${count.index+1}"
Type = "${var.instance-type-tag}"
}
root_block_device {
volume_size = "${var.instance-vol-size}"
volume_type = "gp2"
}
}
And here is the actual module :
module "grafana-stg" {
source = "../../modules/services/gen-ec2"
#ami_id = "${data.aws_ami.ubuntu.id}"
instance_type = "${var.grafana_machine_type}"
key_name = "jumpbox"
vpc_security_group_ids = ["${aws_security_group.grafana-sec-group.id}"]
#subnets = "${data.terraform_remote_state.s3_bucket_state.subnet-public-prod-1a}"
subnet_id = ??????????????????
disable_api_termination = "${var.is_production ? true : false}"
}
I would look at retrieving your subnets utilising a data source.
Utilising Data Sources
Terraform has the concept of data sources. You can pull information from AWS that you require for resources. In your gen-ec2.tf file -
// In order to get subnets, you need the VPC they belong to.
// Note you can filter on a variety of different tags.
data "aws_vpc" "selected" {
tags {
Name = "NameOfVPC"
}
}
// This will then retrieve all subnet ids based on filter
data "aws_subnet_ids" "private" {
vpc_id = "${data.aws_vpc.selected.id}"
tags {
Tier = "private*"
}
}
resource "aws_instance" "ec2-instances" {
count = "${length(data.aws_subnet_ids.private.ids)}"
ami = "${data.aws_ami.ubuntu.id}"
instance_type = "${var.machine_type}"
key_name = "${var.key_name}"
vpc_security_group_ids = ["${var.sec-group}"]
disable_api_termination = "${var.is_production ? true : false}"
subnet_id = "${element(data.aws_subnet_ids.private.*.ids, count.index)}"
tags {
Name = "${var.instance-tag}-${count.index+1}"
Type = "${var.instance-type-tag}"
}
root_block_device {
volume_size = "${var.instance-vol-size}"
volume_type = "gp2"
}
}
Your module now looks like so -
module "grafana-stg" {
source = "../../modules/services/gen-ec2"
#ami_id = "${data.aws_ami.ubuntu.id}"
instance_type = "${var.grafana_machine_type}"
key_name = "jumpbox"
vpc_security_group_ids = ["${aws_security_group.grafana-sec-group.id}"]
disable_api_termination = "${var.is_production ? true : false}"
}
For me as I am using Terraform v0.12.5, the bellow snippet worked fine
data "aws_subnet_ids" "public_subnet_list" {
vpc_id = "${var.vpc_id}"
tags = {
Tier = "Public"
}
}
resource "aws_instance" "example" {
count = 2
ami = "ami-0c55b159cbfafe1f0"
instance_type = "t2.micro"
subnet_id = tolist(data.aws_subnet_ids.public_subnet_list.ids)[count.index]
}