Terraform aws secrets - nested object variable - amazon-web-services

I have a terraform template that create aws ecs task.
I filled a variable with a list of object like this:
`
variables.tf
variable "microservices" {
description = "the microservices to implement"
type = list(object({
name = string,
port = number,
secrets = optional(list(object({
key = string,
arn = string
})))
}))
`
Then in my main.tf I have the following:
`
main.tf
resource "aws_ecs_task_definition" "task_definition" {
count = length("${var.microservices}")
family = "${var.microservices[count.index].name}-${var.environment}"
requires_compatibilities = ["FARGATE"]
network_mode = "awsvpc"
cpu = 1024
memory= 2048
execution_role_arn = "arn:aws:iam::xxxxx:role/service-role/xxxx-test-service-role"
container_definitions = jsonencode([
{
name = "${var.microservices[count.index].name}"
image = "${aws_ecr_repository.microservices_ecr_repos[count.index].repository_url}"
cpu = 1
essential = true
Ulimits = [{
Name = "nofile"
SoftLimit = 65535
HardLimit = 65535
}]
//length("${var.microservices[count.index].secrets}") > 0 ?
Secrets = [{
Name = length("${var.microservices[count.index].secrets}") > 0 ? "${var.microservices[count.index].secrets[0].key}" : 0
ValueFrom = length("${var.microservices[count.index].secrets}") > 0 ? "${var.microservices[count.index].secrets[0].arn}" : 0
//Name = "${var.microservices[count.index].secrets[0].key}"
//ValueFrom = "${var.microservices[count.index].secrets[0].arn}"
`
I don't understand how can I create Secrets parsing the variables.
The secrets can be optional (it could exist or not).
I should need a sort of for_each only in Secrets section in order to check if secret exist in input and then fill this filed.
An example of inputs is the following:
`
microservices = [
{
"name" = "api",
"port" = 3000,
"secrets" = [{ "key" = "test123", "arn" = "0123"},{ "key" = "testXXX", "arn" = "1010"}] },
{
"name" = "web",
"port" = 3000
"secrets" = [{ "key" = "test456", "arn" = "4567"}]
}]
`
Anyone approach this kind of issue/configuration? What I would like to achieve is to create a task definition in aws ecs with secrets field (or empty secrets section) based on microservices input.
I tested a different data structure like here:
flatten object made of nested list in terraform
But in this scenario I was able to create a new data structure but when I create the resource (e.g.) aws_ecs_task_definition with a For_each it replicate some configuration like ecs tasks with the same name:
`
locals {
microservices_and_secrets = merge([
for ecs_taks, group in var.microservices:
{
for secrets_key, secret in group["secrets"]:
"${ecs_taks}-${secrets_key}" => {
name = group["name"]
port = group["port"]
secret = secret
}
}
]...)
}
`
`
resource "aws_ecs_task_definition" "task_definition" {
for_each = local.microservices_and_secrets
family = "${each.value.name}-${var.environment}" <-- ISSUE with creation because it replicates the ecs task microservice name due to foreach
requires_compatibilities = ["FARGATE"]
network_mode = "awsvpc"
cpu = 1024
memory= 2048
`
The problem is also that with this solution I can't have a microservice without any secret. e.g. the issue is the following:
`
microservices = [
{
"name" = "api",
"port" = 3000,
"secrets" = [{ "key" = "test123", "arn" = "0123"},{ "key" = "testXXX", "arn" = "1010"}] },
{
"name" = "web",
"port" = 3000
"secrets" = [{ "key" = "test456", "arn" = "4567"}]
},
{
"name" = "ciaotask",
"port" = 3000
}
]
`
`
Error: Iteration over null value
│
│ on main-aws-ecs.tf line 153, in locals:
│ 152: {
│ 153: for secrets_key, secret in group["secrets"]:
│ 154: "${ecs_taks}-${secrets_key}" => {
│ 155: name = group["name"]
│ 156: port = group["port"]
│ 157: secret = secret
│ 158: }
│ 159: }
│ ├────────────────
│ │ group["secrets"] is null
│
│ A null value cannot be used as the collection in a 'for' expression.
`
Anyone could help how can I manage the ecs task creation based on microservice input posted above?
The question is, how can I create one aws_ecs_task_definition for each microservice present into microservices variable and it can have zero to n Secrets, starting from microservices variable list of objects.

I solved the issue.
I started from this guide https://codeburst.io/how-to-securely-use-aws-secrets-manager-to-inject-secrets-into-ecs-using-infrastructure-as-code-ff2b39b420b6
then I created a template file like this:
`container_definitions.json.tpl
[{
"name" : "${name}",
"image": "${image}",
"cpu" : 1,
"essential" : true,
"Ulimits" : [{
"Name" : "nofile",
"SoftLimit" : 65535,
"HardLimit" : 65535
}],
"Secrets" : ${secrets},
"Environment" : ${environment},
"LogConfiguration" : {
"LogDriver" : "awslogs",
"Options" : {
"awslogs-group" : "${awslogs-group}",
"awslogs-region" : "${aws_region}",
"awslogs-stream-prefix" : "ecs"
}
},
"portMappings" : [
{
"containerPort" : 3000,
"hostPort" : 3000
}
]
}]
`
in my main.tf instead I created the resources in this way:
`
*/
data "template_file" "container_definitions" {
count = length("${var.microservices}")
template = file("${path.module}/template_dir/container_definitions.json.tpl")
vars = {
aws_region = "${var.aws_region}"
cpu = 1
image = "${aws_ecr_repository.microservices_ecr_repos[count.index].repository_url}"
name = "${var.microservices[count.index].name}"
awslogs-group = "${aws_cloudwatch_log_group.cloudwatch_log_groups[count.index].id}"
environment = jsonencode("${var.microservices[count.index].environment}")
secrets = jsonencode("${var.microservices[count.index].secrets}")
}
}
/*
AWS ECS Task definition
*/
resource "aws_ecs_task_definition" "task_definition" {
count = length("${var.microservices}")
family = "${var.microservices[count.index].name}-${var.environment}"
requires_compatibilities = ["FARGATE"]
network_mode = "awsvpc"
cpu = "${var.microservices[count.index].cpu}"
memory= "${var.microservices[count.index].memory}"
execution_role_arn = "${aws_iam_role.task_execution_roles[count.index].arn}"
task_role_arn = "${aws_iam_role.task_execution_roles[count.index].arn}"
container_definitions = "${data.template_file.container_definitions[count.index].rendered}" //file("./containers_file/api.json")
}
`
In this way I was able to create a task definition in aws ecs with 0..n secrets and 0..n environment variables based on this (e.g.) input.
`
microservices = [
{
"name" = "api",
"port" = 3000,
"cpu" = 1024,
"memory" = 2048,
"secrets" = [{ "name" = "test123", "valuefrom" = "0123"},{ "name" = "testXXX", "valuefrom" = "1010"}] },
{
"name" = "web",
"port" = 3000,
"cpu" = 1024,
"memory" = 2048,
"secrets" = [{ "name" = "test456", "valuefrom" = "4567"}],
"environment" = [{ "name" = "weenv", "value" = "emi_is_ok" },{ "name" = "weenv123", "value" = "emi_is_ok123" } ]
},
{
"name" = "ciaotask",
"port" = 3000
"cpu" = 1024,
"memory" = 2048
}
]
`
I hope this could help someone else that ran in the same issue.

Related

Dynamic Task Definition in Terraform

At the moment I have multiple aws_ecs_task_definition that are blueprints for my ECS tasks. Rather than having a separate module to store each of these unique definitions, I would like to somehow structure my task definition to be agnostic so that it can be used for each of my containers. The main challenge I am facing is that the environment variables differ between task definitions. For example, one task definition might have 4 environment variables and another might have 12. Also, some of the values these variables have are provided by other modules by way of using Outputs (e.g. databases endpoints). Below is an example of an existing task definition.
resource "aws_ecs_task_definition" "service" {
execution_role_arn = var.ecsTaskExecutionRole
task_role_arn = var.ecsTaskExecutionRole
requires_compatibilities = ["FARGATE"]
family = "${var.name}-definition"
container_definitions = jsonencode([
{
"environment" : [
{ "name" : "database_endpoint",
"value" : "${var.database_endpoint}"
},
{ "name" : "database_password",
"value" : "admin"
}
],
"healthCheck" : {
"command" : [
"CMD-SHELL",
"echo \"hello\""
],
"interval" : 10,
"timeout" : 60,
"retries" : 10,
"startPeriod" : 60
},
command = ["start", "--auto-build"]
name = "${var.name}"
image = "${var.image}"
cpu = 1024
memory = 2048
essential = true
portMappings = [
{
containerPort = 7278
hostPort = 7278
}
]
},
])
network_mode = "awsvpc"
cpu = 1024
memory = 2048
runtime_platform {
operating_system_family = "LINUX"
}
}
In essence, I want to be able to dynamically build the environment list based on an object that is provided as a variable when making the module call. For example, that variable might look like this and the values that are empty string are updated at a later point:
containers = {
image1 = {
image = "imageUrl"
}
image2 = {
image = "imageUrl"
documentdb_endpoint = ""
}
image3 = {
image = "imageUrl"
redis_endpoint = ""
}
image4 = {
image = "imageUrl"
}
}
Would appreciate any advice to help make the code more concise.

"Error: Invalid count argument" error when trying to find the routing table' s id from the subnet ids to add new route entries

I am trying to update the routing tables of the subnets in VPC A and VPC B to include a route to a VPC peering end-point. This is my terraform code.
main.tf
module "vpc" {
source = "terraform-aws-modules/vpc/aws"
version = "~> 3.14.2"
for_each = local.vpc_list
name = each.key
cidr =each.value.vpc_cidr
azs = each.value.vpc_azs
public_subnets = each.value.vpc_public_subnets
private_subnets = each.value.vpc_private_subnets
enable_nat_gateway = each.value.vpc_enable_nat_gateway
enable_vpn_gateway = each.value.vpc_enable_vpn_gateway
tags = each.value.vpc_tags
public_subnet_tags = each.value.vpc_public_subnet_tags
private_subnet_tags = each.value.vpc_private_subnet_tags
}
resource "aws_vpc_peering_connection" "vpc_peering_conn" {
peer_owner_id = data.aws_caller_identity.current.account_id
peer_vpc_id = module.vpc["vpcB"].vpc_id
vpc_id = module.vpc["vpcA"].vpc_id
auto_accept = true
tags = {
Name = "VPC Peering between ${module.vpc["vpcA"].name} and ${module.vpc["vpcB"].name}."
}
}
data "aws_route_tables" "vpcA_public_subnet_rts" {
depends_on = [ module.vpc ]
vpc_id = module.vpc["vpcA"].vpc_id
filter {
name = "tag:Subnet"
values = ["*public*"]
}
}
resource "aws_route" "route_vpcA" {
count = length(data.aws_route_tables.vpcA_public_subnet_rts.ids)
route_table_id = tolist(data.aws_route_tables.vpcA_public_subnet_rts.ids)[count.index]
destination_cidr_block = "10.10.11.0/24"
vpc_peering_connection_id = aws_vpc_peering_connection.vpc_peering_conn.id
}
data "aws_route_tables" "vpcB_private_subnet_rts" {
depends_on = [ module.vpc ]
vpc_id = module.vpc["vpcB"].vpc_id
filter {
name = "tag:Subnet"
values = ["*private*"]
}
}
resource "aws_route" "route_vpcB" {
count = length(data.aws_route_tables.vpcB_private_subnet_rts.ids)
route_table_id = tolist(data.aws_route_tables.vpcB_private_subnet_rts.ids)[count.index]
destination_cidr_block = "10.10.10.0/24"
vpc_peering_connection_id = aws_vpc_peering_connection.vpc_peering_conn.id
}
locals.tf
locals {
vpc_list = {
"vpcA" = {
vpc_cidr = "10.10.10.0/24",
vpc_azs = ["ap-southeast-1a"],
vpc_public_subnets = ["10.10.10.0/25"],
vpc_private_subnets = ["10.10.10.128/25"],
vpc_enable_nat_gateway = false,
vpc_enable_vpn_gateway = false,
vpc_tags = {
Name= "VPC A"
Terraform = "true"
Environment = "1st VPC"
Facing= "public and private"
},
vpc_public_subnet_tags = {
Subnet = "vpcA_public_subnet"
},
vpc_private_subnet_tags = {
Subnet = "vpcA_private_subnet"
},
},
"vpcB" = {
vpc_cidr = "10.10.11.0/24",
vpc_azs = ["ap-southeast-1b"],
vpc_public_subnets = [],
vpc_private_subnets = ["10.10.11.0/24"],
vpc_enable_nat_gateway = false,
vpc_enable_vpn_gateway = false,
vpc_tags = {
Name= "VPC B"
Terraform = "true"
Environment = "2nd VPC"
Facing= "private"
},
vpc_public_subnet_tags = {
Subnet = "vpcB_public_subnet"
},
vpc_private_subnet_tags = {
Subnet = "vpcB_private_subnet"
},
},
}
}
locals {
routing_table = {
route_peer_con_vpcA = {
vpc_id = module.vpc["vpcA"].vpc_id
route = {
route_peer_to_vpcB = {
cidr_block = "10.10.11.0/24"
}
}
}
route_peer_con_vpcB = {
vpc_id = module.vpc["vpcB"].vpc_id
route = {
route_peer_to_vpcA = {
cidr_block = "10.10.10.0/24"
}
}
}
}
}
When I run the terraform plan or apply I am getting the below error. Does anyone knows how to address the issue or Is there a better way to achieve what I want?
I saw this post "terraform: data.aws_subnet, value of 'count' cannot be computed" but am not sure how to refer to the output of the subnet' s id for the routing table id.
Thanks.
➜ 01-tf-deploy terraform apply --auto-approve
data.aws_region.current: Reading...
data.aws_caller_identity.current: Reading...
data.aws_region.current: Read complete after 0s [id=ap-southeast-1]
data.aws_ami.amzlinux2: Reading...
data.aws_ami.amzlinux2: Read complete after 1s [id=ami-0c802847a7dd848c0]
data.aws_caller_identity.current: Read complete after 1s [id=500295128231]
╷
│ Error: Invalid count argument
│
│ on main.tf line 70, in resource "aws_route" "route_vpcA":
│ 70: count = length(data.aws_route_tables.vpcA_public_subnet_rts.ids)
│
│ The "count" value depends on resource attributes that cannot be determined until apply, so Terraform cannot predict how many instances will be created. To work around this, use the -target argument to first apply only the resources that the count
│ depends on.
╵
╷
│ Error: Invalid count argument
│
│ on main.tf line 88, in resource "aws_route" "route_vpcB":
│ 88: count = length(data.aws_route_tables.vpcB_private_subnet_rts.ids)
│
│ The "count" value depends on resource attributes that cannot be determined until apply, so Terraform cannot predict how many instances will be created. To work around this, use the -target argument to first apply only the resources that the count
│ depends on.
╵

Add values to deep nested map in Terraform

I'm using the AWS EKS module 18.20.5 and I'm trying to add values to a deeply nested map. The map is
variable "eks_managed_node_groups" {
description = "Map of managed node group definitions to create"
type = any
default = {
management_cluster_on_demand = {
desired_capacity = 3
max_capacity = 10
min_capacity = 3
instance_types = ["c5.2xlarge"]
capacity_type = "ON_DEMAND"
k8s_labels = {
Environment = "testing"
GithubRepo = "infrastructure-modules-kubernetes-cluster"
GithubSource = "terraform-aws-modules"
}
additional_tags = {
cluster = "management_cluster_new"
}
block_device_mappings = {
xvda = {
device_name = "/dev/xvda"
ebs = {
volume_size = 50
volume_type = "gp2"
delete_on_termination = true
}
}
}
}
}
}
What I am aiming to do is add some extra values into the ebs section, specifically
encrypted = true
kms_key_id = module.kms.arn
This would force any volumes added to a node group, to have their EBS volume encrypted with a KMS key.
I've tried using locals to add the values, but the issue is when I get to the xbda section, it tries to loop through the strings and fails
locals {
managed_nodes = flatten([
for group in var.eks_managed_node_groups: [
for vol in group.block_device_mappings: [
for settings in vol: [
for values in settings: values
]
]
]
])
}
Which when running Terraform plan, results in the following error
│ Error: Iteration over non-iterable value
│
│ on main.tf line 9, in locals:
│ 8: for settings in vol: [
│ 9: for values in settings: values
│ 10: ]
│
│ A value of type string cannot be used as the collection in a 'for' expression.
Is this even possible to accomplish?
Thanks.
I think the following should do the job:
locals {
eks_managed_node_groups = {
for group_name, group in var.eks_managed_node_groups:
group_name => merge(group, {block_device_mappings = {
for device_name, device in group.block_device_mappings:
device_name => merge(device,
{ebs=merge(device.ebs, {
encrypted = true
kms_key_id = "module.kms.arn"
})})
}})
}
}
resulting in:
{
"management_cluster_on_demand" = {
"additional_tags" = {
"cluster" = "management_cluster_new"
}
"block_device_mappings" = {
"xvda" = {
"device_name" = "/dev/xvda"
"ebs" = {
"delete_on_termination" = true
"encrypted" = true
"kms_key_id" = "module.kms.arn"
"volume_size" = 50
"volume_type" = "gp2"
}
}
}
"capacity_type" = "ON_DEMAND"
"desired_capacity" = 3
"instance_types" = [
"c5.2xlarge",
]
"k8s_labels" = {
"Environment" = "testing"
"GithubRepo" = "infrastructure-modules-kubernetes-cluster"
"GithubSource" = "terraform-aws-modules"
}
"max_capacity" = 10
"min_capacity" = 3
}
}
I don't have your module.kms.arn, so I just use it as string "module.kms.arn". So you have to change it back to module.kms.arn.

terraform keeps forcing new resource/force replacement for container definition with default parameters

I am bringing up aws_ecs_task_defintion with following terraform configuration.
I pass local.image_tag as variable to control the deployment of our ecr image through terraform.
I am able to bring up the ecs_cluster on initial terraform plan/apply cycle just fine.
However, on the subsequent terraform plan/apply cycle, terraform is forcing the new container definition and thats why redeploying the entire task definition even though our ecr image local.image_tag remains just same
This behaviour, is causing the unintended task definition recycle without any changes to the ecr image and just terraform forcing values with defaults.
TF Config
resource "aws_ecs_task_definition" "this_task" {
family = "this-service"
execution_role_arn = var.this_role
task_role_arn = var.this_role
network_mode = "awsvpc"
requires_compatibilities = ["FARGATE"]
cpu = 256
memory = var.env != "prod" ? 512 : 1024
tags = local.common_tags
# Log the to datadog if it's running in the prod account.
container_definitions = (
<<TASK_DEFINITION
[
{
"essential": true,
"image": "AWS_ACCOUNT_ID.dkr.ecr.us-west-2.amazonaws.com/thisisservice:${local.image_tag}",
"environment" :[
{"name":"ID", "value":"${jsondecode(data.aws_secretsmanager_secret_version.this_decrypt.secret_string)["id"]}"},
{"name":"SECRET","value":"${jsondecode(data.aws_secretsmanager_secret_version.this_decrypt.secret_string)["secret"]}"},
{"name":"THIS_SOCKET_URL","value":"${local.websocket_url}"},
{"name":"THIS_PLATFORM_API","value":"${local.platform_api}"},
{"name":"REDISURL","value":"${var.redis_url}"},
{"name":"BASE_S3","value":"${aws_s3_bucket.ec2_vp.id}"}
],
"name": "ec2-vp",
"logConfiguration": {
"logDriver": "awsfirelens",
"options": {
"Name": "datadog",
"apikey": "${jsondecode(data.aws_secretsmanager_secret_version.datadog_api_key[0].secret_string)["api_key"]}",
"Host": "http-intake.logs.datadoghq.com",
"dd_service": "this",
"dd_source": "this",
"dd_message_key": "log",
"dd_tags": "cluster:${var.cluster_id},Env:${var.env}",
"TLS": "on",
"provider": "ecs"
}
},
"portMappings": [
{
"containerPort": 443,
"hostPort": 443
}
]
},
{
"essential": true,
"image": "amazon/aws-for-fluent-bit:latest",
"name": "log_router",
"firelensConfiguration": {
"type": "fluentbit",
"options": { "enable-ecs-log-metadata": "true" }
}
}
]
TASK_DEFINITION
)
}
-/+ resource "aws_ecs_task_definition" "this_task" {
~ arn = "arn:aws:ecs:ca-central-1:AWS_ACCOUNT_ID:task-definition/this:4" -> (known after apply)
~ container_definitions = jsonencode(
~ [ # forces replacement
~ {
- cpu = 0 -> null
environment = [
{
name = "BASE_S3"
value = "thisisthevalue"
},
{
name = "THIS_PLATFORM_API"
value = "thisisthevlaue"
},
{
name = "SECRET"
value = "thisisthesecret"
},
{
name = "ID"
value = "thisistheid"
},
{
name = "THIS_SOCKET_URL"
value = "thisisthevalue"
},
{
name = "REDISURL"
value = "thisisthevalue"
},
]
essential = true
image = "AWS_ACCOUNT_ID.dkr.ecr.us-west-2.amazonaws.com/this:v1.0.0-develop.6"
logConfiguration = {
logDriver = "awsfirelens"
options = {
Host = "http-intake.logs.datadoghq.com"
Name = "datadog"
TLS = "on"
apikey = "thisisthekey"
dd_message_key = "log"
dd_service = "this"
dd_source = "this"
dd_tags = "thisisthetags"
provider = "ecs"
}
}
- mountPoints = [] -> null
name = "ec2-vp"
~ portMappings = [
~ {
containerPort = 443
hostPort = 443
- protocol = "tcp" -> null
},
]
- volumesFrom = [] -> null
} # forces replacement,
~ {
- cpu = 0 -> null
- environment = [] -> null
essential = true
firelensConfiguration = {
options = {
enable-ecs-log-metadata = "true"
}
type = "fluentbit"
}
image = "amazon/aws-for-fluent-bit:latest"
- mountPoints = [] -> null
name = "log_router"
- portMappings = [] -> null
- user = "0" -> null
- volumesFrom = [] -> null
} # forces replacement,
]
)
cpu = "256"
execution_role_arn = "arn:aws:iam::AWS_ACCOUNTID:role/thisistherole"
family = "this"
~ id = "this-service" -> (known after apply)
memory = "512"
network_mode = "awsvpc"
requires_compatibilities = [
"FARGATE",
]
~ revision = 4 -> (known after apply)
tags = {
"Cluster" = "this"
"Env" = "this"
"Name" = "this"
"Owner" = "this"
"Proj" = "this"
"SuperCluster" = "this"
"Terraform" = "true"
}
task_role_arn = "arn:aws:iam::AWS_ACCOUNT+ID:role/thisistherole"
}
Above is the terraform plan that is forcing new task definition/container definition.
As you can see , terraform is replacing all default values with null or empty. I have double check the terraform.tfstate file it already generated from the previous run and those values are exactly the same as its showing on the above plan.
I am not sure why this unintended behaviour is happening and want to have some clues on how to fix this.
I am using terraform 0.12.25 and latest terraform aws provider.
There is a known terraform aws provider bug for this issue.
In order to make terraform not replace the running task / container definition, I have to fill out all the default values that its showing on terraform plan with either null or empty sets of configuration.
Once all the parameters are filled out, I ran the terafform plan/apply cycle again to ensure its not replacing the container definition like it was doing it before.
I got the same issue when I have the aws-for-fluent-bit as a sidecar container. Adding "user": "0" in this container definition is the least thing that can prevent the task definition from being force recreated.
{
"name": "log_router",
"image": "public.ecr.aws/aws-observability/aws-for-fluent-bit:latest",
"logConfiguration": null,
"firelensConfiguration": {
"type": "fluentbit",
"options": {
"enable-ecs-log-metadata": "true"
}
},
"user": "0"
}

Terraform: List of AMI specific to ubuntu 20.08 LTS AWS

Problem: I am using terraform to get a list of AMI for a specific OS - ubuntu 20.08
I have checked different examples link
When I use the script this does not give me list of AMI
Script
data "aws_ami" "ubuntu" {
most_recent = true
filter {
name = "name"
values = ["ubuntu/images/hvm-ssd/ubuntu-xenial-20.08-amd64-server-*"]
}
filter {
name = "virtualization - type"
values = ["hvm"]
}
owners = ["AWS"]
}
I have referred the below link as well
How are data sources used in Terraform?
Output:
[ec2-user#ip-172-31-84-148 ~]$ terraform plan
provider.aws.region
The region where AWS operations will take place. Examples
are us-east-1, us-west-2, etc.
Enter a value: us-east-1
Refreshing Terraform state in-memory prior to plan...
The refreshed state will be used to calculate this plan, but will not be
persisted to local or remote state storage.
data.aws_ami.std_ami: Refreshing state...
------------------------------------------------------------------------
No changes. Infrastructure is up-to-date.
This means that Terraform did not detect any differences between your configuration and real physical resources that exist. As a result, no actions need to be performed.
i am not sure where am I going wrong i have checked a lot of links some i have listed below.
Your data should be:
data "aws_ami" "ubuntu" {
most_recent = true
filter {
name = "name"
values = ["ubuntu/images/hvm-ssd/ubuntu-focal-20.04-amd64-server-*"]
}
filter {
name = "virtualization-type"
values = ["hvm"]
}
owners = ["099720109477"]
}
output "test" {
value = data.aws_ami.ubuntu
}
The owner of Ubuntu is not AWS, and the image is ubuntu-focal-20.04-amd64-server-, not ubuntu-xenial-20.08-amd64-server-.
The above results in (us-east-1):
{
"architecture" = "x86_64"
"arn" = "arn:aws:ec2:us-east-1::image/ami-0dba2cb6798deb6d8"
"block_device_mappings" = [
{
"device_name" = "/dev/sda1"
"ebs" = {
"delete_on_termination" = "true"
"encrypted" = "false"
"iops" = "0"
"snapshot_id" = "snap-0f06f1549ff7327c9"
"volume_size" = "8"
"volume_type" = "gp2"
}
"no_device" = ""
"virtual_name" = ""
},
{
"device_name" = "/dev/sdb"
"ebs" = {}
"no_device" = ""
"virtual_name" = "ephemeral0"
},
{
"device_name" = "/dev/sdc"
"ebs" = {}
"no_device" = ""
"virtual_name" = "ephemeral1"
},
]
"creation_date" = "2020-09-08T00:55:25.000Z"
"description" = "Canonical, Ubuntu, 20.04 LTS, amd64 focal image build on 2020-09-07"
"filter" = [
{
"name" = "name"
"values" = [
"ubuntu/images/hvm-ssd/ubuntu-focal-20.04-amd64-server-*",
]
},
{
"name" = "virtualization-type"
"values" = [
"hvm",
]
},
]
"hypervisor" = "xen"
"id" = "ami-0dba2cb6798deb6d8"
"image_id" = "ami-0dba2cb6798deb6d8"
"image_location" = "099720109477/ubuntu/images/hvm-ssd/ubuntu-focal-20.04-amd64-server-20200907"
"image_type" = "machine"
"most_recent" = true
"name" = "ubuntu/images/hvm-ssd/ubuntu-focal-20.04-amd64-server-20200907"
"owner_id" = "099720109477"
"owners" = [
"099720109477",
]
"product_codes" = []
"public" = true
"root_device_name" = "/dev/sda1"
"root_device_type" = "ebs"
"root_snapshot_id" = "snap-0f06f1549ff7327c9"
"sriov_net_support" = "simple"
"state" = "available"
"state_reason" = {
"code" = "UNSET"
"message" = "UNSET"
}
"tags" = {}
"virtualization_type" = "hvm"
}