I am using terraform to create an elastic beanstalk environment and application. The solution stack name seen on the aws doc website does not list a solution name for the current time frame [8th September 2020].
Consequently, I keep getting an error No Solution Stack named '64bit Amazon Linux 2 v3.1.2 running Go 1' found. when I try to run terraform apply.
Also, I am using a module provided by cloud posse to get my infrastructure up and running but I doubt that cloud posse is at fault here. Any help is highly appreciated. Thank you
Update: Here's the terraform source code to create elastic beasntalk resources.
provider aws {
region = var.region
}
module vpc {
source = "git::https://github.com/cloudposse/terraform-aws-vpc.git?ref=master"
namespace = var.namespace
stage = var.stage
name = var.name
cidr_block = "172.16.0.0/16"
}
module subnets {
source = "git::https://github.com/cloudposse/terraform-aws-dynamic-subnets.git?ref=master"
availability_zones = ["us-east-1a"]
namespace = var.namespace
stage = var.stage
name = var.name
vpc_id = module.vpc.vpc_id
igw_id = module.vpc.igw_id
cidr_block = module.vpc.vpc_cidr_block
nat_gateway_enabled = false
nat_instance_enabled = false
}
module elastic_beanstalk_application {
source = "git::https://github.com/cloudposse/terraform-aws-elastic-beanstalk-application.git?ref=master"
namespace = var.namespace
stage = var.stage
name = var.name
description = "Sentinel Staging"
}
module elastic_beanstalk_environment {
source = "git::https://github.com/cloudposse/terraform-aws-elastic-beanstalk-environment.git?ref=master"
namespace = var.namespace
stage = var.stage
name = var.name
description = "Test elastic_beanstalk_environment"
region = var.region
availability_zone_selector = "Any 2"
dns_zone_id = var.dns_zone_id
elastic_beanstalk_application_name = module.elastic_beanstalk_application.elastic_beanstalk_application_name
instance_type = "t3.small"
autoscale_min = 1
autoscale_max = 2
updating_min_in_service = 0
updating_max_batch = 1
loadbalancer_type = "application"
vpc_id = module.vpc.vpc_id
loadbalancer_subnets = module.subnets.public_subnet_ids
application_subnets = module.subnets.public_subnet_ids
allowed_security_groups = [module.vpc.vpc_default_security_group_id]
// https://docs.aws.amazon.com/elasticbeanstalk/latest/platforms/platforms-supported.html
// https://docs.aws.amazon.com/elasticbeanstalk/latest/platforms/platforms-supported.html#platforms-supported.docker
solution_stack_name = "64bit Amazon Linux 2 v3.1.2 running Go 1"
additional_settings = [
{
namespace = "aws:elasticbeanstalk:application:environment"
name = "DB_URI"
value = var.db_uri
},
{
namespace = "aws:elasticbeanstalk:application:environment"
name = "USER"
value = var.user
},
{
namespace = "aws:elasticbeanstalk:application:environment"
name = "PASSWORD"
value = var.password
},
{
namespace = "aws:elasticbeanstalk:application:environment"
name = "PORT"
value = "5000"
}
]
}
And here's the exact error taken from the console when I try terraform apply with suitable variables passed as command line arguments.
module.subnets.aws_network_acl.public[0]: Refreshing state... [id=acl-0a62395908205c288]
module.subnets.data.aws_availability_zones.available[0]: Reading... [id=2020-09-07 06:34:08.002137232 +0000 UTC]
module.subnets.data.aws_availability_zones.available[0]: Read complete after 0s [id=2020-09-07 06:34:12.416467455 +0000 UTC]
module.elastic_beanstalk_environment.aws_elastic_beanstalk_environment.default: Creating...
Error: InvalidParameterValue: No Solution Stack named '64bit Amazon Linux 2 v3.1.2 running Go 1' found.
status code: 400, request id: 631d5204-f667-4355-aa65-d617536a00b7
on .terraform/modules/elastic_beanstalk_environment/main.tf line 505, in resource "aws_elastic_beanstalk_environment" "default":
505: resource "aws_elastic_beanstalk_environment" "default" {
Related
I'm trying to deploy a cluster with self managed node groups. No matter what config options I use, I always come up with the following error:
Error: Post "http://localhost/api/v1/namespaces/kube-system/configmaps": dial tcp 127.0.0.1:80: connect: connection refusedwith module.eks-ssp.kubernetes_config_map.aws_auth[0]on .terraform/modules/eks-ssp/aws-auth-configmap.tf line 19, in resource "kubernetes_config_map" "aws_auth":resource "kubernetes_config_map" "aws_auth" {
The .tf file looks like this:
module "eks-ssp" {
source = "github.com/aws-samples/aws-eks-accelerator-for-terraform"
# EKS CLUSTER
tenant = "DevOpsLabs2"
environment = "dev-test"
zone = ""
terraform_version = "Terraform v1.1.4"
# EKS Cluster VPC and Subnet mandatory config
vpc_id = "xxx"
private_subnet_ids = ["xxx","xxx", "xxx", "xxx"]
# EKS CONTROL PLANE VARIABLES
create_eks = true
kubernetes_version = "1.19"
# EKS SELF MANAGED NODE GROUPS
self_managed_node_groups = {
self_mg = {
node_group_name = "DevOpsLabs2"
subnet_ids = ["xxx","xxx", "xxx", "xxx"]
create_launch_template = true
launch_template_os = "bottlerocket" # amazonlinux2eks or bottlerocket or windows
custom_ami_id = "xxx"
public_ip = true # Enable only for public subnets
pre_userdata = <<-EOT
yum install -y amazon-ssm-agent \
systemctl enable amazon-ssm-agent && systemctl start amazon-ssm-agent \
EOT
disk_size = 20
instance_type = "t2.small"
desired_size = 2
max_size = 10
min_size = 2
capacity_type = "" # Optional Use this only for SPOT capacity as capacity_type = "spot"
k8s_labels = {
Environment = "dev-test"
Zone = ""
WorkerType = "SELF_MANAGED_ON_DEMAND"
}
additional_tags = {
ExtraTag = "t2x-on-demand"
Name = "t2x-on-demand"
subnet_type = "public"
}
create_worker_security_group = false # Creates a dedicated sec group for this Node Group
},
}
}
module "eks-ssp-kubernetes-addons" {
source = "github.com/aws-samples/aws-eks-accelerator-for-terraform//modules/kubernetes-addons"
eks_cluster_id = module.eks-ssp.eks_cluster_id
# EKS Addons
enable_amazon_eks_vpc_cni = true
enable_amazon_eks_coredns = true
enable_amazon_eks_kube_proxy = true
enable_amazon_eks_aws_ebs_csi_driver = true
#K8s Add-ons
enable_aws_load_balancer_controller = true
enable_metrics_server = true
enable_cluster_autoscaler = true
enable_aws_for_fluentbit = true
enable_argocd = true
enable_ingress_nginx = true
depends_on = [module.eks-ssp.self_managed_node_groups]
}
Providers:
terraform {
backend "remote" {}
required_providers {
aws = {
source = "hashicorp/aws"
version = ">= 3.66.0"
}
kubernetes = {
source = "hashicorp/kubernetes"
version = ">= 2.6.1"
}
helm = {
source = "hashicorp/helm"
version = ">= 2.4.1"
}
}
}
Based on the example provided in the Github repo [1], my guess is that the provider configuration blocks are missing for this to work as expected. Looking at the code provided in the question, it seems that the following needs to be added:
data "aws_region" "current" {}
data "aws_eks_cluster" "cluster" {
name = module.eks-ssp.eks_cluster_id
}
data "aws_eks_cluster_auth" "cluster" {
name = module.eks-ssp.eks_cluster_id
}
provider "aws" {
region = data.aws_region.current.id
alias = "default" # this should match the named profile you used if at all
}
provider "kubernetes" {
experiments {
manifest_resource = true
}
host = data.aws_eks_cluster.cluster.endpoint
cluster_ca_certificate = base64decode(data.aws_eks_cluster.cluster.certificate_authority.0.data)
token = data.aws_eks_cluster_auth.cluster.token
}
If helm is also required, I think the following block [2] needs to be added as well:
provider "helm" {
kubernetes {
host = data.aws_eks_cluster.cluster.endpoint
token = data.aws_eks_cluster_auth.cluster.token
cluster_ca_certificate = base64decode(data.aws_eks_cluster.cluster.certificate_authority.0.data)
}
}
Provider argument reference for kubernetes and helm is in [3] and [4] respectively.
[1] https://github.com/aws-samples/aws-eks-accelerator-for-terraform/blob/main/examples/eks-cluster-with-self-managed-node-groups/main.tf#L23-L47
[2] https://github.com/aws-samples/aws-eks-accelerator-for-terraform/blob/main/examples/eks-cluster-with-eks-addons/main.tf#L49-L55
[3] https://registry.terraform.io/providers/hashicorp/kubernetes/latest/docs#argument-reference
[4] https://registry.terraform.io/providers/hashicorp/helm/latest/docs#argument-reference
The above answer from Marko E seems to fix / just ran into this issue. After applying the above code, altogether in a separate providers.tf file, terraform now makes it past the error. Will post later as to whether the deployment makes it fully through.
For reference was able to go from 65 resources created down to 42 resources created before I hit this error. This was using the exact best practice / sample configuration recommended at the top of the README from AWS Consulting here: https://github.com/aws-samples/aws-eks-accelerator-for-terraform
In my case i was trying to deploy to the kubernetes cluster(GKE) using Terraform. I have replaced the kubeconfig path with the kubeconfig file's absolute path.
From
provider "kubernetes" {
config_path = "~/.kube/config"
#config_context = "my-context"
}
TO
provider "kubernetes" {
config_path = "/Users/<username>/.kube/config"
#config_context = "my-context"
}
I'm trying to create the base VPC network on google cloud platform using terraform on which I could then launch instances, GKE etc, however I end up with the following error message:
Error: Error creating RouterNat: googleapi: Error 404: The resource 'projects/lol/regions/mars-north1/addresses/39.144.42.123' was not found, notFound
This is the minimal terraform code without variables which leads to this state:
module "gcp_vpc" {
count = var.project_type == "aws" ? 0 : 1
source = "terraform-google-modules/network/google"
version = "3.0.1"
project_id = var.project_id
network_name = "${var.project_name}-lol-vpc"
routing_mode = "GLOBAL"
subnets = [
{
subnet_name = "${var.project_name}-lol-primary-subnet-${count.index}"
subnet_ip = "10.222.0.0/28"
subnet_region = var.gcp_region
subnet_private_access = "true"
subnet_flow_logs = "true"
}
]
secondary_ranges = {
"${var.project_name}-lol-secondary-subnets" = [
{
count = length(var.public_subnets)
range_name = "${var.project_name}-lol-subnet-alias-${count.index}"
ip_cidr_range = var.public_subnets[count.index]
},
{
count = length(var.private_subnets)
range_name = "${var.project_name}-lol-private-subnet-${count.index}"
ip_cidr_range = var.private_subnets[count.index]
},
]
}
}
module "cloud-nat" {
count = var.project_type == "aws" ? 0 : 1
source = "terraform-google-modules/cloud-nat/google"
version = "~> 1.3"
project_id = var.project_id
region = var.gcp_region
router = "${var.project_name}-lol-router"
create_router = true
network = module.gcp_vpc[0].network_name
# subnetworks = module.gcp_vpc[0].subnets_self_links
nat_ips = google_compute_address.nat.*.address
source_subnetwork_ip_ranges_to_nat = "ALL_SUBNETWORKS_ALL_PRIMARY_IP_RANGES"
min_ports_per_vm = "128"
icmp_idle_timeout_sec = "15"
tcp_established_idle_timeout_sec = "600"
tcp_transitory_idle_timeout_sec = "15"
udp_idle_timeout_sec = "15"
}
resource "google_compute_address" "nat" {
count = var.project_type == "aws" ? 0 : length(var.public_subnets)
name = "${var.project_name}-lol-eip-nat-${count.index}"
project = var.project_id
region = var.gcp_region
}
output "gcp_nat_ips" {
value = google_compute_address.nat.*.address
}
However, I can see the external ip address 39.144.42.123 on the GCP web console. Can someone please help with where should I be looking at to solve this issue? Is it terraform, the terraform provider or GCP?
Terraform CLI and Terraform AWS Provider Version
Installed from https://releases.hashicorp.com/terraform/0.13.5/terraform_0.13.5_linux_amd64.zip
hashicorp/aws v3.15.0
Affected Resource(s)
aws_rds_cluster
aws_rds_cluster_instance
Terraform Configuration Files
# inside ./modules/rds/main.tf
terraform {
required_providers {
aws = {
source = "hashicorp/aws"
}
}
required_version = "~> 0.13"
}
provider "aws" {
alias = "primary"
}
provider "aws" {
alias = "dr"
}
locals {
region_tags = ["primary", "dr"]
db_name = "${var.project_name}-${var.stage}-db"
db_cluster_0 = "${local.db_name}-cluster-${local.region_tags[0]}"
db_cluster_1 = "${local.db_name}-cluster-${local.region_tags[1]}"
db_instance_name = "${local.db_name}-instance"
}
resource "aws_rds_global_cluster" "global_db" {
global_cluster_identifier = "${var.project_name}-${var.stage}"
database_name = "${var.project_name}${var.stage}db"
engine = "aurora-mysql"
engine_version = "${var.mysql_version}.mysql_aurora.${var.aurora_version}"
// force_destroy = true
}
resource "aws_rds_cluster" "primary_cluster" {
depends_on = [aws_rds_global_cluster.global_db]
provider = aws.primary
cluster_identifier = "${local.db_name}-cluster-${local.region_tags[0]}"
# the database name does not allow dashes:
database_name = "${var.project_name}${var.stage}db"
# The engine and engine_version must be repeated in aws_rds_global_cluster,
# aws_rds_cluster, and aws_rds_cluster_instance to
# avoid "Value for engine should match" error
engine = "aurora-mysql"
engine_version = "${var.mysql_version}.mysql_aurora.${var.aurora_version}"
engine_mode = "global"
global_cluster_identifier = aws_rds_global_cluster.global_db.id
# backtrack and multi-master not supported by Aurora Global.
master_username = var.username
master_password = var.password
backup_retention_period = 5
preferred_backup_window = "07:00-09:00"
db_subnet_group_name = aws_db_subnet_group.primary.id
# We must have these values, because destroying or rolling back requires them
skip_final_snapshot = true
final_snapshot_identifier = "ci-aurora-cluster-backup"
tags = {
Name = local.db_cluster_0
Stage = var.stage
CreatedBy = var.created_by
}
}
resource "aws_rds_cluster_instance" "primary" {
depends_on = [aws_rds_global_cluster.global_db]
provider = aws.primary
cluster_identifier = aws_rds_cluster.primary_cluster.id
engine = "aurora-mysql"
engine_version = "${var.mysql_version}.mysql_aurora.${var.aurora_version}"
instance_class = "db.${var.instance_class}.${var.instance_size}"
db_subnet_group_name = aws_db_subnet_group.primary.id
tags = {
Name = local.db_instance_name
Stage = var.stage
CreatedBy = var.created_by
}
}
resource "aws_rds_cluster" "dr_cluster" {
depends_on = [aws_rds_cluster_instance.primary, aws_rds_global_cluster.global_db]
provider = aws.dr
cluster_identifier = "${local.db_name}-cluster-${local.region_tags[1]}"
# db name now allowed to specified on secondary regions
# The engine and engine_version must be repeated in aws_rds_global_cluster,
# aws_rds_cluster, and aws_rds_cluster_instance to
# avoid "Value for engine should match" error
engine = "aurora-mysql"
engine_version = "${var.mysql_version}.mysql_aurora.${var.aurora_version}"
engine_mode = "global"
global_cluster_identifier = aws_rds_global_cluster.global_db.id
# backtrack and multi-master not supported by Aurora Global.
# cannot specify username/password in cross-region replication cluster:
backup_retention_period = 5
preferred_backup_window = "07:00-09:00"
db_subnet_group_name = aws_db_subnet_group.dr.id
# We must have these values, because destroying or rolling back requires them
skip_final_snapshot = true
final_snapshot_identifier = "ci-aurora-cluster-backup"
tags = {
Name = local.db_cluster_1
Stage = var.stage
CreatedBy = var.created_by
}
}
resource "aws_rds_cluster_instance" "dr_instance" {
depends_on = [aws_rds_cluster_instance.primary, aws_rds_global_cluster.global_db]
provider = aws.dr
cluster_identifier = aws_rds_cluster.dr_cluster.id
engine = "aurora-mysql"
engine_version = "${var.mysql_version}.mysql_aurora.${var.aurora_version}"
instance_class = "db.${var.instance_class}.${var.instance_size}"
db_subnet_group_name = aws_db_subnet_group.dr.id
tags = {
Name = local.db_instance_name
Stage = var.stage
CreatedBy = var.created_by
}
}
resource "aws_db_subnet_group" "primary" {
name = "${local.db_name}-subnetgroup"
subnet_ids = var.subnet_ids
provider = aws.primary
tags = {
Name = "primary_subnet_group"
Stage = var.stage
CreatedBy = var.created_by
}
}
resource "aws_db_subnet_group" "dr" {
provider = aws.dr
name = "${local.db_name}-subnetgroup"
subnet_ids = var.dr_subnet_ids
tags = {
Name = "dr_subnet_group"
Stage = var.stage
CreatedBy = var.created_by
}
}
resource "aws_rds_cluster_parameter_group" "default" {
name = "rds-cluster-pg"
family = "aurora-mysql${var.mysql_version}"
description = "RDS default cluster parameter group"
parameter {
name = "character_set_server"
value = "utf8"
}
parameter {
name = "character_set_client"
value = "utf8"
}
parameter {
name = "aurora_parallel_query"
value = "ON"
apply_method = "pending-reboot"
}
}
Inside ./modules/sns/main.tf, this is the resource I'm adding when calling terraform apply from within the ./modules directory:
resource "aws_sns_topic" "foo_topic" {
name = "foo-${var.stage}-${var.topic_name}"
tags = {
Name = "foo-${var.stage}-${var.topic_name}"
Stage = var.stage
CreatedBy = var.created_by
CreatedOn = timestamp()
}
}
./modules/main.tf:
terraform {
backend "s3" {
bucket = "terraform-remote-state-s3-bucket-unique-name"
key = "terraform.tfstate"
region = "us-east-2"
dynamodb_table = "TerraformLockTable"
}
}
provider "aws" {
alias = "primary"
region = var.region
}
provider "aws" {
alias = "dr"
region = var.dr_region
}
module "vpc" {
stage = var.stage
source = "./vpc"
providers = {
aws = aws.primary
}
}
module "dr_vpc" {
stage = var.stage
source = "./vpc"
providers = {
aws = aws.dr
}
}
module "vpc_security_group" {
source = "./vpc_security_group"
vpc_id = module.vpc.vpc_id
providers = {
aws = aws.primary
}
}
module "rds" {
source = "./rds"
stage = var.stage
created_by = var.created_by
vpc_id = module.vpc.vpc_id
subnet_ids = [module.vpc.subnet_a_id, module.vpc.subnet_b_id, module.vpc.subnet_c_id]
dr_subnet_ids = [module.dr_vpc.subnet_a_id, module.dr_vpc.subnet_b_id, module.dr_vpc.subnet_c_id]
region = var.region
username = var.rds_username
password = var.rds_password
providers = {
aws.primary = aws.primary
aws.dr = aws.dr
}
}
module "sns_start" {
stage = var.stage
source = "./sns"
topic_name = "start"
created_by = var.created_by
}
./modules/variables.tf:
variable "region" {
default = "us-east-2"
}
variable "dr_region" {
default = "us-west-2"
}
variable "service" {
type = string
default = "foo-back"
description = "service to match what serverless framework deploys"
}
variable "stage" {
type = string
default = "sandbox"
description = "The stage to deploy: sandbox, dev, qa, uat, or prod"
validation {
condition = can(regex("sandbox|dev|qa|uat|prod", var.stage))
error_message = "The stage value must be a valid stage: sandbox, dev, qa, uat, or prod."
}
}
variable "created_by" {
description = "Company or vendor name followed by the username part of the email address"
}
variable "rds_username" {
description = "Username for rds"
}
variable "rds_password" {
description = "Password for rds"
}
./modules/sns/main.tf:
resource "aws_sns_topic" "foo_topic" {
name = "foo-${var.stage}-${var.topic_name}"
tags = {
Name = "foo-${var.stage}-${var.topic_name}"
Stage = var.stage
CreatedBy = var.created_by
CreatedOn = timestamp()
}
}
./modules/sns/output.tf:
output "sns_topic_arn" {
value = aws_sns_topic.foo_topic.arn
}
Debug Output
Both outputs have modified keys, names, account IDs, etc:
The plan output from running terraform apply:
https://gist.github.com/ystoneman/95df711ee0a11d44e035b9f8f39b75f3
The state before applying: https://gist.github.com/ystoneman/5c842769c28e1ae5969f9aaff1556b37
Expected Behavior
The entire ./modules/main.tf had already been created, and the only thing that was added was the SNS module, so only the SNS module should be created.
Actual Behavior
But instead, the RDS resources are affected too, and terraform "claims" that engine_mode has changed from provisioned to global, even though it already was global according to the console:
The plan output also says that cluster_identifier is only known after apply and therefore forces replacement, however, I think the cluster_identifier is necessary to let the aws_rds_cluster know it belongs to the aws_rds_global_cluster, and for the aws_rds_cluster_instance to know it belongs to the aws_rds_cluster, respectively.
Steps to Reproduce
comment out the module "sns_start"
cd ./modules
terraform apply (after this step is done is where the state file I included is at)
uncomment out the module "sns_start"
terraform apply (at this point is where I provide the debug output)
Important Factoids
This problem happens whether I run it from my Mac or within AWS CodeBuild.
References
Seems like AWS Terraform tried to destory and rebuild RDS cluster references this too, but it's not specific to a Global Cluster, where you do need identifiers so that instances and clusters know to what they belong to.
It seems like you are using an outdated version of the aws provider and are specifying the engine_mode incorrectly. There was a bug ticket relating to this: https://github.com/hashicorp/terraform-provider-aws/issues/16088
It is fixed in version 3.15.0 which you can use via
terraform {
required_providers {
aws = {
source = "hashicorp/aws"
version = "~> 3.15.0"
}
}
required_version = "~> 0.13"
}
Additionally you should drop the engine_mode property from your terraform specification completely.
I have a new issue with setting up GCP instance template. I am presuming there was an update on the terraform gcp provider.
resource "google_compute_instance_template" "backend-template" {
name = "${var.platform_name}-backend-instance-template"
description = "Template used for backend instances"
instance_description = "backend Instance"
machine_type = "n1-standard-1"
metadata_startup_script = "${lookup(var.startup_scripts,"backend-server")}"
disk {
boot = "true"
source_image = "backend-packer-image"
}
metadata {
APP_SETTINGS = "${var.app_settings}"
URL_STAGING = "${var.url_staging}"
API_URL_STAGING = "${var.api_url_staging}"
URL_PRODUCTION = "${var.url_production}"
API_URL_PRODUCTION = "${var.api_url_production}"
LOGIN_URL = "${var.login_url}"
API_URL = "${var.api_url}"
vault_server_IP = "${lookup(var.static_ips, "vault-server")}"
environment = "${var.environment}"
}
network_interface {
subnetwork = "${google_compute_subnetwork.private-fe-be.self_link}"
}
lifecycle {
create_before_destroy = true
}
tags = ["no-ip", "backend-server"]
service_account {
scopes = ["cloud-platform"]
}
}
This is the current error after running the script. However, the image backend-packer-image was already created and exists on GCP
* google_compute_instance_template.backend-template: 1 error(s) occurred:
* google_compute_instance_template.backend-template: error flattening disks: Error getting relative path for source image: String was not a self link: global/images/backend-packer-image
I had the exact same problem today, I had to go look directly into the pull request to find a way to use this correctly.
So, what I came with is this:
you must first be sure to be in your project before typing this command or you won't find the image you are looking for if it's a custom one:
gcloud compute images list --uri | grep "your image name"
Like this you will have the uri of your image, you can then put it fully for the image and it will work.
Replace the image name with the URI on source_image
resource "google_compute_instance_template" "backend-template" {
name = "${var.platform_name}-backend-instance-
template"
description = "Template used for backend instances"
instance_description = "backend Instance"
machine_type = "n1-standard-1"
metadata_startup_script = "${lookup(var.startup_scripts,"backend-server")}"
disk {
boot = "true"
source_image = "https://www.googleapis.com/compute/v1/projects/<project-name>/global/images/backend-packer-image"
}
metadata {
APP_SETTINGS = "${var.app_settings}"
URL_STAGING = "${var.url_staging}"
API_URL_STAGING = "${var.api_url_staging}"
URL_PRODUCTION = "${var.url_production}"
API_URL_PRODUCTION = "${var.api_url_production}"
LOGIN_URL = "${var.login_url}"
API_URL = "${var.api_url}"
vault_server_IP = "${lookup(var.static_ips, "vault-server")}"
environment = "${var.environment}"
}
network_interface {
subnetwork = "${google_compute_subnetwork.private-fe-be.self_link}"
}
lifecycle {
create_before_destroy = true
}
tags = ["no-ip", "backend-server"]
service_account {
scopes = ["cloud-platform"]
}
}
It is also possible to tie the terraform scripts to run previous versions
provider "google"{
version = "<= 1.17"
credentials = "${var.service_account_path}"
project = "${var.gcloud_project}"
region = "${var.gcloud_region}"
}
Using terraform to create nat-gateway using this module.
https://registry.terraform.io/modules/GoogleCloudPlatform/nat-gateway/google/1.1.3
using this code :
module "nat" {
source = "GoogleCloudPlatform/nat-gateway/google"
region = "${var.gcloud-region}"
network = "${google_compute_network.vpc-network.name}"
subnetwork = "${google_compute_subnetwork.vpc-subnetwork-public.name}"
machine_type = "${var.vm-type-nat-gateway}"
}
Other snippets :
variable "gcloud-region" { default = "europe-west1" }
variable "vm-type-nat-gateway" { default = "n1-standard-2"}
resource "google_compute_network" "vpc-network" {
name = "foobar-vpc-network"
auto_create_subnetworks = false
}
resource "google_compute_subnetwork" "vpc-subnetwork-public" {
name = "foobar-vpc-subnetwork-public"
ip_cidr_range = "10.0.1.0/24"
network = "${google_compute_network.vpc-network.self_link}"
region = "${var.gcloud-region}"
private_ip_google_access = false
}
================
module.nat.google_compute_route.nat-gateway: 1 error(s) occurred:
module.nat.google_compute_route.nat-gateway: element: element() may not be used with an empty list in:
${element(split("/", element(module.nat-gateway.instances[0], 0)),
10)}
Above errror coming up and whole terraform script get stop , and unable to run
terraform apply or terraform destroy at any changes,
any possible issue causing this ?