Terraform Output via Jenkins - amazon-web-services

I'm unable to find vpc_id and public_subnet.0 in a terraform my-vpc output.
Every single time when I put below command:
ansible-playbook playbook1.yml -e "vpc_id=$(terraform output my-vpc.vpc_id) vpc_subnet_id=$(terraform output my-vpc.public_subnets.0)" -vvv
But it prints terraform output my-vpc
-bash-4.2$ terraform output my-vpc
{
"azs" = tolist([
"eu-west-1a",
"eu-west-1b",
"eu-west-1c",
])
"cgw_arns" = []
"cgw_ids" = []
"database_internet_gateway_route_id" = ""
"database_ipv6_egress_route_id" = ""
"database_nat_gateway_route_ids" = []
"database_network_acl_arn" = ""
"database_network_acl_id" = ""
"database_route_table_association_ids" = [
"rtbassoc-0c0a897cce6cbac74",
"rtbassoc-02c2f605eb9988418",
"rtbassoc-0b14ef189c6c39da5",
]
"database_route_table_ids" = [
"rtb-038a86ea7eef50de1",
]
"database_subnet_arns" = [
"arn:aws:ec2:eu-west-1:008421212484:subnet/subnet-0ef73bc4e91557920",
"arn:aws:ec2:eu-west-1:008421212484:subnet/subnet-0f044d3b21cecbca6",
"arn:aws:ec2:eu-west-1:008421212484:subnet/subnet-042312717d7ed4fe6",
]
"database_subnet_group" = "my-vpc"
"database_subnet_group_name" = "my-vpc"
"database_subnets" = [
"subnet-0ef73bc4e91557920",
"subnet-0f044d3b21cecbca6",
"subnet-042312717d7ed4fe6",
]
"database_subnets_cidr_blocks" = tolist([
"10.0.21.0/24",
"10.0.22.0/24",
"10.0.23.0/24",
])
"database_subnets_ipv6_cidr_blocks" = tolist([])
"default_network_acl_id" = "acl-08f647113ea769f4f"
"default_route_table_id" = "rtb-0b55d9919c1133316"
"default_security_group_id" = "sg-050c394121822118f"
"default_vpc_arn" = ""
"default_vpc_cidr_block" = ""
"default_vpc_default_network_acl_id" = ""
"default_vpc_default_route_table_id" = ""
"default_vpc_default_security_group_id" = ""
"default_vpc_enable_dns_hostnames" = ""
"default_vpc_enable_dns_support" = ""
"default_vpc_id" = ""
"default_vpc_instance_tenancy" = ""
"default_vpc_main_route_table_id" = ""
"dhcp_options_id" = ""
"egress_only_internet_gateway_id" = ""
"elasticache_network_acl_arn" = ""
"elasticache_network_acl_id" = ""
"elasticache_route_table_association_ids" = []
"elasticache_route_table_ids" = [
"rtb-0bbad3836a81d51cd",
]
"elasticache_subnet_arns" = []
"elasticache_subnet_group" = ""
"elasticache_subnet_group_name" = ""
"elasticache_subnets" = []
"elasticache_subnets_cidr_blocks" = tolist([])
"elasticache_subnets_ipv6_cidr_blocks" = tolist([])
"igw_arn" = "arn:aws:ec2:eu-west-1:008421212484:internet-gateway/igw-006457c32d869d138"
"igw_id" = "igw-006457c32d869d138"
"intra_network_acl_arn" = ""
"intra_network_acl_id" = ""
"intra_route_table_association_ids" = []
"intra_route_table_ids" = []
"intra_subnet_arns" = []
"intra_subnets" = []
"intra_subnets_cidr_blocks" = tolist([])
"intra_subnets_ipv6_cidr_blocks" = tolist([])
"name" = "my-vpc"
"nat_ids" = [
"eipalloc-0a8b17a3d8101a5a0",
]
"nat_public_ips" = tolist([
"18.200.43.46",
])
"natgw_ids" = [
"nat-0c1f640cfa6bc1b9c",
]
"outpost_network_acl_arn" = ""
"outpost_network_acl_id" = ""
"outpost_subnet_arns" = []
"outpost_subnets" = []
"outpost_subnets_cidr_blocks" = tolist([])
"outpost_subnets_ipv6_cidr_blocks" = tolist([])
"private_ipv6_egress_route_ids" = []
"private_nat_gateway_route_ids" = [
"r-rtb-0bbad3836a81d51cd1080289494",
]
"private_network_acl_arn" = ""
"private_network_acl_id" = ""
"private_route_table_association_ids" = [
"rtbassoc-046f37259950a7f95",
"rtbassoc-0be769cfe2ebd6034",
"rtbassoc-03dab517c9aa2789c",
]
"private_route_table_ids" = [
"rtb-0bbad3836a81d51cd",
]
"private_subnet_arns" = [
"arn:aws:ec2:eu-west-1:008421212484:subnet/subnet-038ffd52b102ad03d",
"arn:aws:ec2:eu-west-1:008421212484:subnet/subnet-07fff3a676d8792b7",
"arn:aws:ec2:eu-west-1:008421212484:subnet/subnet-09d7264428c109220",
]
"private_subnets" = [
"subnet-038ffd52b102ad03d",
"subnet-07fff3a676d8792b7",
"subnet-09d7264428c109220",
]
"private_subnets_cidr_blocks" = tolist([
"10.0.11.0/24",
"10.0.12.0/24",
"10.0.13.0/24",
])
"private_subnets_ipv6_cidr_blocks" = tolist([])
"public_internet_gateway_ipv6_route_id" = ""
"public_internet_gateway_route_id" = "r-rtb-095c4a905b733cb521080289494"
"public_network_acl_arn" = ""
"public_network_acl_id" = ""
"public_route_table_association_ids" = [
"rtbassoc-08b42ea2c1b00f82c",
"rtbassoc-06d913cc918b08721",
"rtbassoc-04437e6c3b6deea45",
]
"public_route_table_ids" = [
"rtb-095c4a905b733cb52",
]
"public_subnet_arns" = [
"arn:aws:ec2:eu-west-1:008421212484:subnet/subnet-08dbadd83e2dfea89",
"arn:aws:ec2:eu-west-1:008421212484:subnet/subnet-07b49f530d35a3899",
"arn:aws:ec2:eu-west-1:008421212484:subnet/subnet-057153ea6183d363f",
]
"public_subnets" = [
"subnet-08dbadd83e2dfea89",
"subnet-07b49f530d35a3899",
"subnet-057153ea6183d363f",
]
"public_subnets_cidr_blocks" = tolist([
"10.0.1.0/24",
"10.0.2.0/24",
"10.0.3.0/24",
])
"public_subnets_ipv6_cidr_blocks" = tolist([])
"redshift_network_acl_arn" = ""
"redshift_network_acl_id" = ""
"redshift_public_route_table_association_ids" = []
"redshift_route_table_association_ids" = []
"redshift_route_table_ids" = tolist([
"rtb-0bbad3836a81d51cd",
])
"redshift_subnet_arns" = []
"redshift_subnet_group" = ""
"redshift_subnets" = []
"redshift_subnets_cidr_blocks" = tolist([])
"redshift_subnets_ipv6_cidr_blocks" = tolist([])
"this_customer_gateway" = {}
"vgw_arn" = ""
"vgw_id" = ""
"vpc_arn" = "arn:aws:ec2:eu-west-1:008421212484:vpc/vpc-081b13e816e14214c"
"vpc_cidr_block" = "10.0.0.0/16"
"vpc_enable_dns_hostnames" = true
"vpc_enable_dns_support" = true
"vpc_flow_log_cloudwatch_iam_role_arn" = ""
"vpc_flow_log_destination_arn" = ""
"vpc_flow_log_destination_type" = "cloud-watch-logs"
"vpc_flow_log_id" = ""
"vpc_id" = "vpc-081b13e816e14214c"
"vpc_instance_tenancy" = "default"
"vpc_ipv6_association_id" = ""
"vpc_ipv6_cidr_block" = ""
"vpc_main_route_table_id" = "rtb-0b55d9919c1133316"
"vpc_owner_id" = "008421212484"
"vpc_secondary_cidr_blocks" = tolist([])
}
I was trying with:
Different naming and paths.
Jenkins user privileges.
Destination of output.

Outputs are meant to be human-readable:
The terraform output command by default displays in a human-readable format, which can change over time to improve clarity.
That means it's not really easy to get the values using the usual terraform syntax and you would need to use a tool like jq [1]. So in your example, that would have to be something like:
VPC_ID=$(terraform output -json my-vpc | jq -r '.vpc_id')
VPC_SUBNET_ID=$(terraform output -json my-vpc | jq -r '.public_subnets[0]')
Followed by:
ansible-playbook playbook1.yml -e vpc_id=$VPC_ID -e vpc_subnet_id=$VPC_SUBNET_ID -vvv
My Ansible skills are a bit rusty, and there might be better ways of doing it, but this should get you started.
[1] https://developer.hashicorp.com/terraform/cli/commands/output#use-in-automation

Thanks for supporting my college, we found a solution:
jq ;)
ansible-playbook playbook1.yml -e "vpc_id=$(terraform output -json my-vpc|jq -r .vpc_id) vpc_subnet_id=$(terraform output -json my-vpc|jq -r .public_subnets[0])" -vvv

Related

for_each loop with dynamic block and values from tfvars

I'm trying to create certain BigQuery tables with time_partitioning with the dynamic block and I want to use the values from tfvars in runtime as follows:
./tables/tables.tf:
resource "google_bigquery_table" "tables" {
for_each = var.tables == [] ? [] : toset(var.tables)
dataset_id = var.db_id
deletion_protection = false
table_id = each.key
dynamic "time_partitioning" {
for_each = var.partitioned_tables
content {
type = "DAY"
field = time_partitioning.value.field
}
}
labels = {
environment = var.environment
application = var.application
}
schema = fileexists("${path.module}/${var.db_id}/${each.key}.json") ? file("${path.module}/${var.db_id}/${each.key}.json") : null
}
main.tf:
resource "google_bigquery_dataset" "database" {
count = length(var.dbs)
dataset_id = var.dbs[count.index].db_id
friendly_name = var.dbs[count.index].db_name
description = "TF"
location = "US"
delete_contents_on_destroy = var.delete_contents_on_destroy
labels = {
environment = var.environment
application = var.dbs[count.index].app_name
}
}
module "tables" {
source = "./tables"
count = length(var.dbs)
db_id = google_bigquery_dataset.database[count.index].dataset_id
environment = var.environment
application = var.dbs[count.index].app_name
tables = var.dbs[count.index].tables
partitioned_tables = var.dbs[count.index].partitioned_tables
}
module "iam" {
source = "./iam"
count = length(var.dbs)
db_id = google_bigquery_dataset.database[count.index].dataset_id
iam_members = var.dbs[count.index].iam_members
}
dev.tfvars:
region = "us-central1"
project_id = "some-project"
dbs = [
{
db_id = "dataset1"
db_name = "dataset1"
app_name = "hello"
iam_members = [
{
role = "roles/bigquery.dataEditor"
member = "serviceAccount:ser-sa#some-project.iam.gserviceaccount.com",
}
]
tables = ["daily_inventory", "dc_inventory", "products", "daily_sales", "planned_inventory", "stores", "stores_in_program"]
partitioned_tables = [
{
table = "daily_sales"
field = "sales_timestamp"
},
{
table = "daily_inventory"
field = "inventory_timestamp"
}
]
},
{
db_id = "dataset2"
db_name = "dataset2"
app_name = "hello"
iam_members = [
{
role = "roles/bigquery.dataEditor"
member = "serviceAccount:ser-sa#some-project.iam.gserviceaccount.com"
}
]
tables = []
}
]
environment = "development"
delete_contents_on_destroy = true
var.dbs is type = list(any)
Getting:
The given value is not suitable for var.dbs declared at
variables.tf:9,1-15: all list elements must have the same type.
Thanks in advance!
list(any) does not mean that you can have elements of "any" type in your list. All elements must have same type, and you can't mix types, as you do now (i.e. second element is missing partitioned_tables). any only means that TF will infer the single type for the elements, but all elements must be of that single type. So you have three choices:
remove type = list(any)
Fully define your type with optional arguments, instead of using any
Add partitioned_tables to the second element:
[
{
db_id = "dataset1"
db_name = "dataset1"
app_name = "hello"
iam_members = [
{
role = "roles/bigquery.dataEditor"
member = "serviceAccount:ser-sa#some-project.iam.gserviceaccount.com",
}
]
tables = ["daily_inventory", "dc_inventory", "products", "daily_sales", "planned_inventory", "stores", "stores_in_program"]
partitioned_tables = [
{
table = "daily_sales"
field = "sales_timestamp"
},
{
table = "daily_inventory"
field = "inventory_timestamp"
}
]
},
{
db_id = "dataset2"
db_name = "dataset2"
app_name = "hello"
iam_members = [
{
role = "roles/bigquery.dataEditor"
member = "serviceAccount:ser-sa#some-project.iam.gserviceaccount.com"
}
]
partitioned_tables = []
tables = []
}
]

Value from list of string is adding sqaure brackets to the value

I am creating dashboard for ecs but loadbalancer name is getting [] in "[ "mis-app-cluster" ]" because of list of string type in variable.
Is there any by which we can remove [] from the value.
List of string is mandatory.
data -
{
my-dashboard-name = "MIS-Dev-Cloudwatch-Dashboard-ECS"
aws-region = "eu-west-1"
targets = ["mis-app-cluster"]
metrics = ["CPUUtilization", "MemoryUtilization"]
service_name = ["mis-fileserver", "mis-nginx", "mis-odi", "mis-oraclebi", "mis-ords", "mis-restdataservices"]
aws-namespace = "AWS/ECS"
dim = "ClusterName"
stat = "Average"
period = 300
}
module (I have only added for ALB module below)
locals {
widget-def-alb = [for metric in var.metrics :
{
type = "metric",
x = 0,
y = 0
width = 18,
height = 8,
properties = {
metrics = [for targetgroup in var.target_groups : [var.aws-namespace, metric,"TargetGroup", targetgroup, var.dim, var.targets]],
title = "${var.aws-namespace}: ${metric}",
region = var.aws-region,
period = var.period,
stat = var.stat,
view = "timeSeries",
legend = {
position = "right"
}
}
}
]
}
resource "aws_cloudwatch_dashboard" "cw-dashboard-alb" {
count = var.aws-namespace == "AWS/ApplicationELB" ? 1 : 0
dashboard_name = var.dashboard-name
dashboard_body = jsonencode({
start = "-PT9H"
widgets = local.widget-def-alb
})
}
Code -
module "create-dashboard" {
source = "../"
for_each = { for service in local.dashboards : service.my-dashboard-name => service if length(regexall(".*ALB.*", service.my-dashboard-name))
> 0 }
dashboard-name = each.value.my-dashboard-name
aws-region = each.value.aws-region
targets = each.value.targets
metrics = each.value.metrics
aws-namespace = each.value.aws-namespace
dim = each.value.dim
target_groups = each.value.target_groups
stat = each.value.stat
period = each.value.period
}
output
{
"legend": {
"position": "right"
},
"metrics": [
[ "AWS/ECS", "CPUUtilization", "ServiceName", "mis-fileserver", "ClusterName", [ "mis-app-cluster" ] ],
[ ".", "MemoryUtilization", ".", ".", ".", [ "mis-app-cluster" ] ]
],
"period": 300,
"region": "eu-west-1",
"stat": "Average",
"title": "mis-fileserver",
"view": "timeSeries",
"timezone": "UTC"
}

Terraform merge list of maps

My input is as below:
list_groups = [
{
dev-api = {
envs = [
"dev-eu-1",
"dev-eu-2",
]
hosts = [
"dev-api-eu1",
"dev-api-eu2",
]
}
},
{
dev-api = {
envs = [
"dev-us-1",
"dev-us-2",
]
hosts = [
"dev-api-us1",
"dev-api-us2",
]
}
},
]
I am using merge as below:
output "map_groups" {
value = merge(var.list_groups...)
}
And the output I get is :
map_groups = {
dev-api = {
envs = [
"dev-us-1",
"dev-us-2",
]
hosts = [
"dev-api-us1",
"dev-api-us2",
]
}
}
But I need the output to be:
map_groups = {
dev-api = {
envs = [
"dev-us-1",
"dev-us-2",
"dev-eu-1",
"dev-eu-2"
]
hosts = [
"dev-api-us1",
"dev-api-us2",
"dev-api-eu1",
"dev-api-eu2"
]
}
}
I understand the merge is eating up one of the entries because the key is the same but if it could somehow merge the entries/elements, that'll be great.
This is a rather complex issue. I think you should concentrate on simplifying your input data, rather than trying to come out with some convoluted TF code to post-fix your input data structures.
Nevertheless, the expansion symbol ... can be used to solve the issue in terraform.
variable "list_groups" {
default = [
{
dev-api = {
envs = [
"dev-eu-1",
"dev-eu-2",
]
hosts = [
"dev-api-eu1",
"dev-api-eu2",
]
}
},
{
dev-api = {
envs = [
"dev-us-1",
"dev-us-2",
]
hosts = [
"dev-api-us1",
"dev-api-us2",
]
}
},
]
}
locals {
api_names = distinct([for api in var.list_groups: keys(api)[0]])
}
output "test" {
value = {
for key, val in {
for api_name in local.api_names:
api_name => {
envs = flatten([
for api in var.list_groups:
api[api_name].envs
])
hosts = flatten([
for api in var.list_groups:
api[api_name].hosts
])
}...
}:
key => val[0]
}
}
Output is:
test = {
"dev-api" = {
"envs" = [
"dev-eu-1",
"dev-eu-2",
"dev-us-1",
"dev-us-2",
]
"hosts" = [
"dev-api-eu1",
"dev-api-eu2",
"dev-api-us1",
"dev-api-us2",
]
}
}

EKS Terraform scale using cpu

Using terraform to instantiate an eks. How to configure when to scale the nodes? I would like to customize if the cpu reaches 40% for example.
My module eks:
module "eks" {
......
worker_groups = [
{
name = "worker-group-1"
instance_type = "t3a.medium"
root_volume_size = "20"
asg_desired_capacity = 1
asg_max_size = 1
asg_recreate_on_change = true
kubelet_extra_args = "--node-labels=node.kubernetes.io/lifecycle=normal,instance_type=normal"
tags = [.....
]
}
]
worker_groups_launch_template = [
{
name = "spot-family-t-low"
override_instance_types = ["t3a.medium", "t2.medium","t3.medium"]
spot_instance_pools = 3
root_volume_size = "8"
asg_recreate_on_change = true
autoscaling_enabled = true
asg_max_size = 2
asg_desired_capacity = 1
kubelet_extra_args = "--node-labels=node.kubernetes.io/lifecycle=spot,type=t-low"
public_ip = false
},
]
workers_additional_policies = [aws_iam_policy.worker_policy.id]
}

Terraform: List of AMI specific to ubuntu 20.08 LTS AWS

Problem: I am using terraform to get a list of AMI for a specific OS - ubuntu 20.08
I have checked different examples link
When I use the script this does not give me list of AMI
Script
data "aws_ami" "ubuntu" {
most_recent = true
filter {
name = "name"
values = ["ubuntu/images/hvm-ssd/ubuntu-xenial-20.08-amd64-server-*"]
}
filter {
name = "virtualization - type"
values = ["hvm"]
}
owners = ["AWS"]
}
I have referred the below link as well
How are data sources used in Terraform?
Output:
[ec2-user#ip-172-31-84-148 ~]$ terraform plan
provider.aws.region
The region where AWS operations will take place. Examples
are us-east-1, us-west-2, etc.
Enter a value: us-east-1
Refreshing Terraform state in-memory prior to plan...
The refreshed state will be used to calculate this plan, but will not be
persisted to local or remote state storage.
data.aws_ami.std_ami: Refreshing state...
------------------------------------------------------------------------
No changes. Infrastructure is up-to-date.
This means that Terraform did not detect any differences between your configuration and real physical resources that exist. As a result, no actions need to be performed.
i am not sure where am I going wrong i have checked a lot of links some i have listed below.
Your data should be:
data "aws_ami" "ubuntu" {
most_recent = true
filter {
name = "name"
values = ["ubuntu/images/hvm-ssd/ubuntu-focal-20.04-amd64-server-*"]
}
filter {
name = "virtualization-type"
values = ["hvm"]
}
owners = ["099720109477"]
}
output "test" {
value = data.aws_ami.ubuntu
}
The owner of Ubuntu is not AWS, and the image is ubuntu-focal-20.04-amd64-server-, not ubuntu-xenial-20.08-amd64-server-.
The above results in (us-east-1):
{
"architecture" = "x86_64"
"arn" = "arn:aws:ec2:us-east-1::image/ami-0dba2cb6798deb6d8"
"block_device_mappings" = [
{
"device_name" = "/dev/sda1"
"ebs" = {
"delete_on_termination" = "true"
"encrypted" = "false"
"iops" = "0"
"snapshot_id" = "snap-0f06f1549ff7327c9"
"volume_size" = "8"
"volume_type" = "gp2"
}
"no_device" = ""
"virtual_name" = ""
},
{
"device_name" = "/dev/sdb"
"ebs" = {}
"no_device" = ""
"virtual_name" = "ephemeral0"
},
{
"device_name" = "/dev/sdc"
"ebs" = {}
"no_device" = ""
"virtual_name" = "ephemeral1"
},
]
"creation_date" = "2020-09-08T00:55:25.000Z"
"description" = "Canonical, Ubuntu, 20.04 LTS, amd64 focal image build on 2020-09-07"
"filter" = [
{
"name" = "name"
"values" = [
"ubuntu/images/hvm-ssd/ubuntu-focal-20.04-amd64-server-*",
]
},
{
"name" = "virtualization-type"
"values" = [
"hvm",
]
},
]
"hypervisor" = "xen"
"id" = "ami-0dba2cb6798deb6d8"
"image_id" = "ami-0dba2cb6798deb6d8"
"image_location" = "099720109477/ubuntu/images/hvm-ssd/ubuntu-focal-20.04-amd64-server-20200907"
"image_type" = "machine"
"most_recent" = true
"name" = "ubuntu/images/hvm-ssd/ubuntu-focal-20.04-amd64-server-20200907"
"owner_id" = "099720109477"
"owners" = [
"099720109477",
]
"product_codes" = []
"public" = true
"root_device_name" = "/dev/sda1"
"root_device_type" = "ebs"
"root_snapshot_id" = "snap-0f06f1549ff7327c9"
"sriov_net_support" = "simple"
"state" = "available"
"state_reason" = {
"code" = "UNSET"
"message" = "UNSET"
}
"tags" = {}
"virtualization_type" = "hvm"
}