How can I override the default region in aws provider? - amazon-web-services

I have a terraform configuration file like below. When I deploy terraform apply I got the error: A default (non-aliased) provider configuration for "aws" was already given at │ versions.tf:37,1-15. If multiple configurations are required, set the "alias" argument for │ alternative configurations.
I think it could be caused by some modules already defined the aws provider configuration. I know I can set an alias for the one I added at the bottom of below configuration. But how I can override the default one used by module?
terraform {
required_providers {
aws = {
source = "hashicorp/aws"
version = ">= 3.20.0"
}
random = {
source = "hashicorp/random"
version = "3.0.0"
}
local = {
source = "hashicorp/local"
version = "2.0.0"
}
null = {
source = "hashicorp/null"
version = "3.0.0"
}
template = {
source = "hashicorp/template"
version = "2.2.0"
}
kubernetes = {
source = "hashicorp/kubernetes"
version = ">= 2.0.1"
}
}
required_version = "~> 0.14"
}
variable "region" {
default = "us-east-2"
description = "AWS region"
}
provider "aws" {
region = "us-east-2"
}
data "aws_availability_zones" "available" {}
locals {
cluster_name = "education-eks-${random_string.suffix.result}"
}
resource "random_string" "suffix" {
length = 8
special = false
}
module "vpc" {
source = "terraform-aws-modules/vpc/aws"
version = "2.66.0"
name = "elk-vpc"
cidr = "10.0.0.0/16"
azs = data.aws_availability_zones.available.names
private_subnets = ["10.0.1.0/24", "10.0.2.0/24", "10.0.3.0/24"]
public_subnets = ["10.0.4.0/24", "10.0.5.0/24", "10.0.6.0/24"]
enable_nat_gateway = true
single_nat_gateway = true
enable_dns_hostnames = true
tags = {
"kubernetes.io/cluster/${local.cluster_name}" = "shared"
}
public_subnet_tags = {
"kubernetes.io/cluster/${local.cluster_name}" = "shared"
"kubernetes.io/role/elb" = "1"
}
private_subnet_tags = {
"kubernetes.io/cluster/${local.cluster_name}" = "shared"
"kubernetes.io/role/internal-elb" = "1"
}
}
provider "aws" {
region = "ap-southeast-2"
}

Related

Unable to create an EKS Cluster with an existing security group using Terraform

I'm having issues when trying to create an EKS cluster with a few security groups that I already have created. I don't want a new SG every time I create a new EKS Cluster.
I have a problem with a part of this code under vpc_id part."cluster_create_security_group=false" produces an error, and cluster_security_group_id = "sg-123" is completely ignored.
My code is like this:
provider "aws" {
region = "us-east-2"
}
terraform {
backend "s3" {
bucket = "mys3bucket"
key = "eks/terraform.tfstate"
region = "us-east-2"
}
}
data "aws_eks_cluster" "cluster" {
name = module.eks.cluster_id
}
data "aws_eks_cluster_auth" "cluster" {
name = module.eks.cluster_id
}
provider "kubernetes" {
host = data.aws_eks_cluster.cluster.endpoint
cluster_ca_certificate = base64decode(data.aws_eks_cluster.cluster.certificate_authority.0.data)
token = data.aws_eks_cluster_auth.cluster.token
}
variable "cluster_security_group_id" {
description = "Existing security group ID to be attached to the cluster. Required if `create_cluster_security_group` = `false`"
type = string
default = "sg-1234"
}
module "eks" {
source = "terraform-aws-modules/eks/aws"
version = "~> 18.0"
cluster_name = "cluster-example"
cluster_version = "1.21" #This may vary depending on the purpose of the cluster
cluster_endpoint_private_access = true
cluster_endpoint_public_access = true
cluster_addons = {
coredns = {
resolve_conflicts = "OVERWRITE"
}
kube-proxy = {}
vpc-cni = {
resolve_conflicts = "OVERWRITE"
}
}
vpc_id = "vpc-12345"
subnet_ids = ["subnet-123", "subnet-456", "subnet-789"]
create_cluster_security_group=false ----------> ERROR: An argument named "cluster_create_security_group" is not expected here
cluster_security_group_id = "my-security-group-id"
# EKS Managed Node Group(s)
eks_managed_node_group_defaults = {
disk_size = 50
instance_types = ["t3.medium"]
}
eks_managed_node_groups = {
Test-Nodegroup = {
min_size = 2
max_size = 5
desired_size = 2
instance_types = ["t3.large"]
capacity_type = "SPOT"
}
}
tags = {
Environment = "dev"
Terraform = "true"
}
}
Where am I wrong? This is my whole Terraform file.

Terraform: get account id for provider + for_each + account module

I'm trying to create multiple AWS Accounts in an Organization containing ressources.
The resources should owned by the created accounts.
for that I created a module for the accounts:
resource "aws_organizations_account" "this" {
name = var.customer
email = var.email
parent_id = var.parent_id
role_name = "OrganizationAccountAccessRole"
provider = aws.src
}
resource "aws_s3_bucket" "this" {
bucket = "exconcept-terraform-state-${var.customer}"
provider = aws.dst
depends_on = [
aws_organizations_account.this
]
}
output "account_id" {
value = aws_organizations_account.this.id
}
output "account_arn" {
value = aws_organizations_account.this.arn
}
my provider file for the module:
terraform {
required_providers {
aws = {
source = "hashicorp/aws"
version = "~> 4.0"
configuration_aliases = [ aws.src, aws.dst ]
}
}
}
In the root module I'm calling the module like this:
module "account" {
source = "./modules/account"
for_each = var.accounts
customer = each.value["customer"]
email = each.value["email"]
# close_on_deletion = true
parent_id = aws_organizations_organizational_unit.testing.id
providers = {
aws.src = aws.default
aws.dst = aws.customer
}
}
Since the provider information comes from the root module, and the accounts are created with a for_each map, how can I use the current aws.dst provider?
Here is my root provider file:
provider "aws" {
region = "eu-central-1"
profile = "default"
alias = "default"
}
provider "aws" {
assume_role {
role_arn = "arn:aws:iam::${module.account[each.key].account_id}:role/OrganizationAccountAccessRole"
}
alias = "customer"
region = "eu-central-1"
}
With Terraform init I got this error:
Error: Cycle: module.account.aws_s3_bucket_versioning.this, module.account.aws_s3_bucket.this, provider["registry.terraform.io/hashicorp/aws"].customer, module.account.aws_s3_bucket_acl.this, module.account (close)

Why is terraform forcing replacement of aurora global database?

Terraform CLI and Terraform AWS Provider Version
Installed from https://releases.hashicorp.com/terraform/0.13.5/terraform_0.13.5_linux_amd64.zip
hashicorp/aws v3.15.0
Affected Resource(s)
aws_rds_cluster
aws_rds_cluster_instance
Terraform Configuration Files
# inside ./modules/rds/main.tf
terraform {
required_providers {
aws = {
source = "hashicorp/aws"
}
}
required_version = "~> 0.13"
}
provider "aws" {
alias = "primary"
}
provider "aws" {
alias = "dr"
}
locals {
region_tags = ["primary", "dr"]
db_name = "${var.project_name}-${var.stage}-db"
db_cluster_0 = "${local.db_name}-cluster-${local.region_tags[0]}"
db_cluster_1 = "${local.db_name}-cluster-${local.region_tags[1]}"
db_instance_name = "${local.db_name}-instance"
}
resource "aws_rds_global_cluster" "global_db" {
global_cluster_identifier = "${var.project_name}-${var.stage}"
database_name = "${var.project_name}${var.stage}db"
engine = "aurora-mysql"
engine_version = "${var.mysql_version}.mysql_aurora.${var.aurora_version}"
// force_destroy = true
}
resource "aws_rds_cluster" "primary_cluster" {
depends_on = [aws_rds_global_cluster.global_db]
provider = aws.primary
cluster_identifier = "${local.db_name}-cluster-${local.region_tags[0]}"
# the database name does not allow dashes:
database_name = "${var.project_name}${var.stage}db"
# The engine and engine_version must be repeated in aws_rds_global_cluster,
# aws_rds_cluster, and aws_rds_cluster_instance to
# avoid "Value for engine should match" error
engine = "aurora-mysql"
engine_version = "${var.mysql_version}.mysql_aurora.${var.aurora_version}"
engine_mode = "global"
global_cluster_identifier = aws_rds_global_cluster.global_db.id
# backtrack and multi-master not supported by Aurora Global.
master_username = var.username
master_password = var.password
backup_retention_period = 5
preferred_backup_window = "07:00-09:00"
db_subnet_group_name = aws_db_subnet_group.primary.id
# We must have these values, because destroying or rolling back requires them
skip_final_snapshot = true
final_snapshot_identifier = "ci-aurora-cluster-backup"
tags = {
Name = local.db_cluster_0
Stage = var.stage
CreatedBy = var.created_by
}
}
resource "aws_rds_cluster_instance" "primary" {
depends_on = [aws_rds_global_cluster.global_db]
provider = aws.primary
cluster_identifier = aws_rds_cluster.primary_cluster.id
engine = "aurora-mysql"
engine_version = "${var.mysql_version}.mysql_aurora.${var.aurora_version}"
instance_class = "db.${var.instance_class}.${var.instance_size}"
db_subnet_group_name = aws_db_subnet_group.primary.id
tags = {
Name = local.db_instance_name
Stage = var.stage
CreatedBy = var.created_by
}
}
resource "aws_rds_cluster" "dr_cluster" {
depends_on = [aws_rds_cluster_instance.primary, aws_rds_global_cluster.global_db]
provider = aws.dr
cluster_identifier = "${local.db_name}-cluster-${local.region_tags[1]}"
# db name now allowed to specified on secondary regions
# The engine and engine_version must be repeated in aws_rds_global_cluster,
# aws_rds_cluster, and aws_rds_cluster_instance to
# avoid "Value for engine should match" error
engine = "aurora-mysql"
engine_version = "${var.mysql_version}.mysql_aurora.${var.aurora_version}"
engine_mode = "global"
global_cluster_identifier = aws_rds_global_cluster.global_db.id
# backtrack and multi-master not supported by Aurora Global.
# cannot specify username/password in cross-region replication cluster:
backup_retention_period = 5
preferred_backup_window = "07:00-09:00"
db_subnet_group_name = aws_db_subnet_group.dr.id
# We must have these values, because destroying or rolling back requires them
skip_final_snapshot = true
final_snapshot_identifier = "ci-aurora-cluster-backup"
tags = {
Name = local.db_cluster_1
Stage = var.stage
CreatedBy = var.created_by
}
}
resource "aws_rds_cluster_instance" "dr_instance" {
depends_on = [aws_rds_cluster_instance.primary, aws_rds_global_cluster.global_db]
provider = aws.dr
cluster_identifier = aws_rds_cluster.dr_cluster.id
engine = "aurora-mysql"
engine_version = "${var.mysql_version}.mysql_aurora.${var.aurora_version}"
instance_class = "db.${var.instance_class}.${var.instance_size}"
db_subnet_group_name = aws_db_subnet_group.dr.id
tags = {
Name = local.db_instance_name
Stage = var.stage
CreatedBy = var.created_by
}
}
resource "aws_db_subnet_group" "primary" {
name = "${local.db_name}-subnetgroup"
subnet_ids = var.subnet_ids
provider = aws.primary
tags = {
Name = "primary_subnet_group"
Stage = var.stage
CreatedBy = var.created_by
}
}
resource "aws_db_subnet_group" "dr" {
provider = aws.dr
name = "${local.db_name}-subnetgroup"
subnet_ids = var.dr_subnet_ids
tags = {
Name = "dr_subnet_group"
Stage = var.stage
CreatedBy = var.created_by
}
}
resource "aws_rds_cluster_parameter_group" "default" {
name = "rds-cluster-pg"
family = "aurora-mysql${var.mysql_version}"
description = "RDS default cluster parameter group"
parameter {
name = "character_set_server"
value = "utf8"
}
parameter {
name = "character_set_client"
value = "utf8"
}
parameter {
name = "aurora_parallel_query"
value = "ON"
apply_method = "pending-reboot"
}
}
Inside ./modules/sns/main.tf, this is the resource I'm adding when calling terraform apply from within the ./modules directory:
resource "aws_sns_topic" "foo_topic" {
name = "foo-${var.stage}-${var.topic_name}"
tags = {
Name = "foo-${var.stage}-${var.topic_name}"
Stage = var.stage
CreatedBy = var.created_by
CreatedOn = timestamp()
}
}
./modules/main.tf:
terraform {
backend "s3" {
bucket = "terraform-remote-state-s3-bucket-unique-name"
key = "terraform.tfstate"
region = "us-east-2"
dynamodb_table = "TerraformLockTable"
}
}
provider "aws" {
alias = "primary"
region = var.region
}
provider "aws" {
alias = "dr"
region = var.dr_region
}
module "vpc" {
stage = var.stage
source = "./vpc"
providers = {
aws = aws.primary
}
}
module "dr_vpc" {
stage = var.stage
source = "./vpc"
providers = {
aws = aws.dr
}
}
module "vpc_security_group" {
source = "./vpc_security_group"
vpc_id = module.vpc.vpc_id
providers = {
aws = aws.primary
}
}
module "rds" {
source = "./rds"
stage = var.stage
created_by = var.created_by
vpc_id = module.vpc.vpc_id
subnet_ids = [module.vpc.subnet_a_id, module.vpc.subnet_b_id, module.vpc.subnet_c_id]
dr_subnet_ids = [module.dr_vpc.subnet_a_id, module.dr_vpc.subnet_b_id, module.dr_vpc.subnet_c_id]
region = var.region
username = var.rds_username
password = var.rds_password
providers = {
aws.primary = aws.primary
aws.dr = aws.dr
}
}
module "sns_start" {
stage = var.stage
source = "./sns"
topic_name = "start"
created_by = var.created_by
}
./modules/variables.tf:
variable "region" {
default = "us-east-2"
}
variable "dr_region" {
default = "us-west-2"
}
variable "service" {
type = string
default = "foo-back"
description = "service to match what serverless framework deploys"
}
variable "stage" {
type = string
default = "sandbox"
description = "The stage to deploy: sandbox, dev, qa, uat, or prod"
validation {
condition = can(regex("sandbox|dev|qa|uat|prod", var.stage))
error_message = "The stage value must be a valid stage: sandbox, dev, qa, uat, or prod."
}
}
variable "created_by" {
description = "Company or vendor name followed by the username part of the email address"
}
variable "rds_username" {
description = "Username for rds"
}
variable "rds_password" {
description = "Password for rds"
}
./modules/sns/main.tf:
resource "aws_sns_topic" "foo_topic" {
name = "foo-${var.stage}-${var.topic_name}"
tags = {
Name = "foo-${var.stage}-${var.topic_name}"
Stage = var.stage
CreatedBy = var.created_by
CreatedOn = timestamp()
}
}
./modules/sns/output.tf:
output "sns_topic_arn" {
value = aws_sns_topic.foo_topic.arn
}
Debug Output
Both outputs have modified keys, names, account IDs, etc:
The plan output from running terraform apply:
https://gist.github.com/ystoneman/95df711ee0a11d44e035b9f8f39b75f3
The state before applying: https://gist.github.com/ystoneman/5c842769c28e1ae5969f9aaff1556b37
Expected Behavior
The entire ./modules/main.tf had already been created, and the only thing that was added was the SNS module, so only the SNS module should be created.
Actual Behavior
But instead, the RDS resources are affected too, and terraform "claims" that engine_mode has changed from provisioned to global, even though it already was global according to the console:
The plan output also says that cluster_identifier is only known after apply and therefore forces replacement, however, I think the cluster_identifier is necessary to let the aws_rds_cluster know it belongs to the aws_rds_global_cluster, and for the aws_rds_cluster_instance to know it belongs to the aws_rds_cluster, respectively.
Steps to Reproduce
comment out the module "sns_start"
cd ./modules
terraform apply (after this step is done is where the state file I included is at)
uncomment out the module "sns_start"
terraform apply (at this point is where I provide the debug output)
Important Factoids
This problem happens whether I run it from my Mac or within AWS CodeBuild.
References
Seems like AWS Terraform tried to destory and rebuild RDS cluster references this too, but it's not specific to a Global Cluster, where you do need identifiers so that instances and clusters know to what they belong to.
It seems like you are using an outdated version of the aws provider and are specifying the engine_mode incorrectly. There was a bug ticket relating to this: https://github.com/hashicorp/terraform-provider-aws/issues/16088
It is fixed in version 3.15.0 which you can use via
terraform {
required_providers {
aws = {
source = "hashicorp/aws"
version = "~> 3.15.0"
}
}
required_version = "~> 0.13"
}
Additionally you should drop the engine_mode property from your terraform specification completely.

Terraform : Getting AWS subnetID's for a given vpc failing, no results

I am working on creating an ElasticSearch cluster using Terraform. I am not able to get the subnet-ids for a given VPC, with aws_subnet_ids. The return is always null. What am I doing wrong?
Code :
provider "aws" {
region = "eu-central-1"
shared_credentials_file = "${pathexpand("~/.aws/credentials")}"
}
variable "domain" {
default = "tf-test"
}
data "aws_vpc" "selected" {
tags = {
Name = var.vpc
}
}
data "aws_subnet_ids" "selected" {
vpc_id = "${data.aws_vpc.selected.id}"
}
resource "aws_elasticsearch_domain" "es" {
domain_name = "${var.domain}"
elasticsearch_version = "6.3"
cluster_config {
instance_type = "m4.large.elasticsearch"
}
vpc_options {
subnet_ids = [
"${data.aws_subnet_ids.selected.ids[0]}",
"${data.aws_subnet_ids.selected.ids[1]}",
]
}
Output :
terraform plan :
on main.tf line 55, in resource "aws_elasticsearch_domain" "es":
55: "${data.aws_subnet_ids.selected.ids[0]}",
This value does not have any indices.
Error: Invalid index
on main.tf line 56, in resource "aws_elasticsearch_domain" "es":
56: "${data.aws_subnet_ids.selected.ids[1]}",
This value does not have any indices.
Update
If I print the subnet id's without index, I am getting them :
Solved :
subnet_id = "${element(module.vpc.public_subnets, 0)}"
VPC created in this manner :
module "vpc" {
source = "terraform-aws-modules/vpc/aws"
version = "2.6.0"
name = var.vpc_name
cidr = var.vpc_cidr
azs = data.aws_availability_zones.available.names
private_subnets = [var.public_subnet_1, var.public_subnet_2, var.public_subnet_3]
public_subnets = [var.private_subnet_1, var.private_subnet_2, var.private_subnet_3]
enable_nat_gateway = var.enable_nat_gateway
single_nat_gateway = var.single_nat_gateway
enable_dns_hostnames = var.enable_dns_hostname
public_subnet_tags = {
"kubernetes.io/cluster/${var.cluster_name}" = "shared"
"kubernetes.io/role/elb" = "1"
"name" = var.public_subnet_name
}
private_subnet_tags = {
"kubernetes.io/cluster/${var.cluster_name}" = "shared"
"kubernetes.io/role/internal-elb" = "1"
"name" = var.private_subnet_name
}
}

Terraform 0.12.13 - Not able to spin AWS instance

AWS EC2 instance creation is failing while creating a network interface in the aws_instance section. The configuration is following configuration as defined in Terraform Network Interfaces
Configuration.
On removing the network block the configuration works seamlessly. With network block the following error was logged
"Error: Error launching source instance: Unsupported: The requested configuration is currently not supported. Please check the documentation for supported configurations."
variable "aws_region" {}
variable "aws_access_key" {}
variable "aws_secret_key" {}
variable "vpc_cidr_block" {}
variable "environment" {}
variable "applicationtype" {}
variable "subnet_cidr_block" {}
variable "amiid" {}
variable "instancetype" {}
variable "bucketname" {}
variable "publickey-fe" {}
variable "publickey-be" {}
provider "aws" {
profile = "default"
region = "${var.aws_region}"
access_key = "${var.aws_access_key}"
secret_key = "${var.aws_secret_key}"
}
data "aws_availability_zones" "availability" {
state = "available"
}
resource "aws_vpc" "sitespeed_vpc" {
cidr_block = "${var.vpc_cidr_block}"
instance_tenancy = "dedicated"
tags = {
env = "${var.environment}"
application = "${var.applicationtype}"
Name = "site-speed-VPC"
}
}
resource "aws_subnet" "sitespeed_subnet" {
vpc_id = "${aws_vpc.sitespeed_vpc.id}"
cidr_block = "${var.subnet_cidr_block}"
availability_zone = "${data.aws_availability_zones.availability.names[0]}"
tags = {
env = "${var.environment}"
application = "${var.applicationtype}"
Name = "site-speed-Subnet"
}
}
resource "aws_network_interface" "sitespeed_frontend_NIC" {
subnet_id = "${aws_subnet.sitespeed_subnet.id}"
private_ips = ["192.168.10.100"]
tags = {
env = "${var.environment}"
application = "${var.applicationtype}"
Name = "site-speed-frontend-nic"
}
}
resource "aws_network_interface" "sitespeed_backend_NIC" {
subnet_id = "${aws_subnet.sitespeed_subnet.id}"
private_ips = ["192.168.10.110"]
tags = {
env = "${var.environment}"
application = "${var.applicationtype}"
Name = "site-speed-backend-nic"
}
}
resource "aws_key_pair" "sitespeed_front_key" {
key_name = "site_speed_front_key"
public_key = "${var.publickey-fe}"
}
resource "aws_key_pair" "sitespeed_back_key" {
key_name = "site_speed_back_key"
public_key = "${var.publickey-be}"
}
resource "aws_instance" "sitespeed_front" {
ami = "ami-00942d7cd4f3ca5c0"
instance_type = "t2.micro"
key_name = "site_speed_front_key"
availability_zone = "${data.aws_availability_zones.availability.names[0]}"
network_interface {
network_interface_id = "${aws_network_interface.sitespeed_frontend_NIC.id}"
device_index = 0
}
tags = {
env = "${var.environment}"
application = "${var.applicationtype}"
Name = "site-speed-frontend-server"
public = "yes"
}
}
resource "aws_instance" "sitespeed_backend" {
ami = "ami-00942d7cd4f3ca5c0"
instance_type = "t2.micro"
key_name = "site_speed_back_key"
network_interface {
network_interface_id = "${aws_network_interface.sitespeed_backend_NIC.id}"
device_index = 0
}
tags = {
env = "${var.environment}"
application = "${var.applicationtype}"
Name = "site-speed-backend-server"
public = "No"
}
}
resource "aws_s3_bucket" "b" {
bucket = "${var.bucketname}"
acl = "private"
tags = {
env = "${var.environment}"
application = "${var.applicationtype}"
}
}
The issue was due to the Terraform Version. Following is the updated script that supports Terraform V.0.12.16 to create an EC2 Instance on AWS.
// Variable Definition
variable "aws_region" {}
variable "aws_vpc_cidr_block" {}
variable "aws_subnet_cidr_block" {}
variable "aws_private_ip_fe" {}
variable "aws_Name" {}
variable "aws_Application" {}
variable "aws_ami" {}
variable "aws_instance_type" {}
// Provider Definition
provider "aws" {
version = "~> 2.40"
region = var.aws_region
}
// Adds a VPC
resource "aws_vpc" "aws_ec2_deployment_test-vpc" {
cidr_block = var.aws_vpc_cidr_block
tags = {
Name = join("-", [var.aws_Name, "vpc"])
Application = var.aws_Application
}
}
//Adds a subnet
resource "aws_subnet" "aws_ec2_deployment_test-subnet" {
vpc_id = aws_vpc.aws_ec2_deployment_test-vpc.id
cidr_block = var.aws_subnet_cidr_block
availability_zone = join("", [var.aws_region, "a"])
tags = {
Name = join("-", [var.aws_Name, "subnet"])
Application = var.aws_Application
}
}
//Adds a Network Interface
resource "aws_network_interface" "aws_ec2_deployment_test-fe" {
subnet_id = aws_subnet.aws_ec2_deployment_test-subnet.id
private_ips = [ var.aws_private_ip_fe ]
tags = {
Name = join("-", [var.aws_Name, "network-interface-fe"])
Application = var.aws_Application
}
}
//Adds an EC2 Instance
resource "aws_instance" "aws_ec2_deployment_test-fe"{
ami = var.aws_ami
instance_type = var.aws_instance_type
network_interface {
network_interface_id = aws_network_interface.aws_ec2_deployment_test-fe.id
device_index = 0
}
tags = {
Name = join("-", [var.aws_Name, "fe-ec2"])
Application = var.aws_Application
}
}
// Print Output Values
output "aws_ec2_deployment_test-vpc" {
description = "CIDR Block for the VPC: "
value = aws_vpc.aws_ec2_deployment_test-vpc.cidr_block
}
output "aws_ec2_deployment_test-subnet" {
description = "Subnet Block: "
value = aws_subnet.aws_ec2_deployment_test-subnet.cidr_block
}
output "aws_ec2_deployment_test-private-ip" {
description = "System Private IP: "
value = aws_network_interface.aws_ec2_deployment_test-fe.private_ip
}
output "aws_ec2_deployment_test-EC2-Details" {
description = "EC2 Details: "
value = aws_instance.aws_ec2_deployment_test-fe.public_ip
}
Gist link to the solution