Update Cloud Run instance on secret update - google-cloud-platform

On GCP, I'm using Cloud Run with secrets with environment variables from Secret Manager.
How to efficiently update Cloud Run instance when I'm updating a secret ?
I tried with this Terraform code, no success :
// run.tf
module "cloud-run-app" {
source = "GoogleCloudPlatform/cloud-run/google"
version = "~> 0.0"
service_name = "${local.main_project}-cloudrun"
location = local.region
image = local.cloudrun_image
project_id = local.main_project
env_vars = local.envvars_injection
env_secret_vars = local.secrets_injection
service_account_email = google_service_account.app.email
ports = local.cloudrun_port
service_annotations = {
"run.googleapis.com/ingress" : "internal-and-cloud-load-balancing"
}
service_labels = {
"env_type" = var.env_name
}
template_annotations = {
"autoscaling.knative.dev/maxScale" : local.cloudrun_app_max_scale,
"autoscaling.knative.dev/minScale" : local.cloudrun_app_min_scale,
"generated-by" : "terraform",
"run.googleapis.com/client-name" : "terraform"
}
depends_on = [
google_project_iam_member.run_gcr,
google_project_iam_member.app_secretmanager,
google_secret_manager_secret_version.secrets
]
}
// secrets.tf
resource "google_secret_manager_secret" "secrets" {
for_each = local.secrets_definition
secret_id = each.key
replication {
automatic = true
}
}
resource "google_secret_manager_secret_version" "secrets" {
for_each = local.secrets_definition
secret = google_secret_manager_secret.secrets["${each.key}"].name
secret_data = each.value
}

The trick here is to mount the secret as a volume (a file) and not as an environment variable.
If you do that, point your secret version to the latest version, and read the file every time you need the secret content, you will read the latest version. Without reloading the Cloud Run instance or redeploying a version.

Related

AWS access key id provided does not exist in our records

I've an issue with terraform that i really don't understand.
Let me explain :
When i run
terraform init all good
terraform fmt all good
terraform validate all good
However when i run terraform plan i get an ERROR
terraform plan
I set the AWS_ACCESS_KEY & AWS_SECRET_key on the code to test it faster ( otherwise the value are passed by gitlab )
If i try without them on the variable.tf and use the value i export before to use AWS CLI everything work perfecty and i can deploy on aws .
variable.tf
variable "aws_region" {
default = "eu-central-1"
}
variable "bucket_name" {
type = string
default = "test-bucket"
}
variable "aws_access_key" {
default = "XXXXXXXXXXXXXXXXX"
}
variable "aws_secret_key" {
default = "XXXXXXXXXXXXXX"
}
main.tf
terraform {
required_providers {
aws = {
source = "hashicorp/aws"
version = "~> 4.9.0"
}
}
}
provider "aws" {
region = var.aws_region
access_key = var.aws_access_key
secret_key = var.aws_secret_key
# Make faster by skipping something
# https://registry.terraform.io/providers/hashicorp/aws/latest/docs#skip_get_ec2_platforms
skip_get_ec2_platforms = true
skip_metadata_api_check = true
skip_region_validation = true
skip_credentials_validation = true
skip_requesting_account_id = true
}
provider.tf
module "s3-bucket" {
source = "terraform-aws-modules/s3-bucket/aws"
version = "3.4.0"
bucket = var.bucket_name
acl = "private"
force_destroy = true
create_bucket = true
versioning = {
enabled = true
}
server_side_encryption_configuration = {
rule = {
apply_server_side_encryption_by_default = {
sse_algorithm = "AES256"
}
}
}
}
Thanks for your help guy .
I don't know what to do anymore
Try using
"region"
"access_key"
"secret_key"
without
aws_
as the prefix to your variable.tf and main.tf
Sometimes it creates conflict with terraform code.
It looks like the cause is aws_ prefix. When it is used in a variable names this error occurs.

Error: Post "http://localhost/api/v1/namespaces/kube-system/configmaps": dial tcp 127.0.0.1:80

I'm trying to deploy a cluster with self managed node groups. No matter what config options I use, I always come up with the following error:
Error: Post "http://localhost/api/v1/namespaces/kube-system/configmaps": dial tcp 127.0.0.1:80: connect: connection refusedwith module.eks-ssp.kubernetes_config_map.aws_auth[0]on .terraform/modules/eks-ssp/aws-auth-configmap.tf line 19, in resource "kubernetes_config_map" "aws_auth":resource "kubernetes_config_map" "aws_auth" {
​
The .tf file looks like this:
module "eks-ssp" {
source = "github.com/aws-samples/aws-eks-accelerator-for-terraform"
# EKS CLUSTER
tenant = "DevOpsLabs2"
environment = "dev-test"
zone = ""
terraform_version = "Terraform v1.1.4"
# EKS Cluster VPC and Subnet mandatory config
vpc_id = "xxx"
private_subnet_ids = ["xxx","xxx", "xxx", "xxx"]
# EKS CONTROL PLANE VARIABLES
create_eks = true
kubernetes_version = "1.19"
# EKS SELF MANAGED NODE GROUPS
self_managed_node_groups = {
self_mg = {
node_group_name = "DevOpsLabs2"
subnet_ids = ["xxx","xxx", "xxx", "xxx"]
create_launch_template = true
launch_template_os = "bottlerocket" # amazonlinux2eks or bottlerocket or windows
custom_ami_id = "xxx"
public_ip = true # Enable only for public subnets
pre_userdata = <<-EOT
yum install -y amazon-ssm-agent \
systemctl enable amazon-ssm-agent && systemctl start amazon-ssm-agent \
EOT
disk_size = 20
instance_type = "t2.small"
desired_size = 2
max_size = 10
min_size = 2
capacity_type = "" # Optional Use this only for SPOT capacity as capacity_type = "spot"
k8s_labels = {
Environment = "dev-test"
Zone = ""
WorkerType = "SELF_MANAGED_ON_DEMAND"
}
additional_tags = {
ExtraTag = "t2x-on-demand"
Name = "t2x-on-demand"
subnet_type = "public"
}
create_worker_security_group = false # Creates a dedicated sec group for this Node Group
},
}
}
module "eks-ssp-kubernetes-addons" {
source = "github.com/aws-samples/aws-eks-accelerator-for-terraform//modules/kubernetes-addons"
eks_cluster_id = module.eks-ssp.eks_cluster_id
# EKS Addons
enable_amazon_eks_vpc_cni = true
enable_amazon_eks_coredns = true
enable_amazon_eks_kube_proxy = true
enable_amazon_eks_aws_ebs_csi_driver = true
#K8s Add-ons
enable_aws_load_balancer_controller = true
enable_metrics_server = true
enable_cluster_autoscaler = true
enable_aws_for_fluentbit = true
enable_argocd = true
enable_ingress_nginx = true
depends_on = [module.eks-ssp.self_managed_node_groups]
}
Providers:
terraform {
backend "remote" {}
required_providers {
aws = {
source = "hashicorp/aws"
version = ">= 3.66.0"
}
kubernetes = {
source = "hashicorp/kubernetes"
version = ">= 2.6.1"
}
helm = {
source = "hashicorp/helm"
version = ">= 2.4.1"
}
}
}
Based on the example provided in the Github repo [1], my guess is that the provider configuration blocks are missing for this to work as expected. Looking at the code provided in the question, it seems that the following needs to be added:
data "aws_region" "current" {}
data "aws_eks_cluster" "cluster" {
name = module.eks-ssp.eks_cluster_id
}
data "aws_eks_cluster_auth" "cluster" {
name = module.eks-ssp.eks_cluster_id
}
provider "aws" {
region = data.aws_region.current.id
alias = "default" # this should match the named profile you used if at all
}
provider "kubernetes" {
experiments {
manifest_resource = true
}
host = data.aws_eks_cluster.cluster.endpoint
cluster_ca_certificate = base64decode(data.aws_eks_cluster.cluster.certificate_authority.0.data)
token = data.aws_eks_cluster_auth.cluster.token
}
If helm is also required, I think the following block [2] needs to be added as well:
provider "helm" {
kubernetes {
host = data.aws_eks_cluster.cluster.endpoint
token = data.aws_eks_cluster_auth.cluster.token
cluster_ca_certificate = base64decode(data.aws_eks_cluster.cluster.certificate_authority.0.data)
}
}
Provider argument reference for kubernetes and helm is in [3] and [4] respectively.
[1] https://github.com/aws-samples/aws-eks-accelerator-for-terraform/blob/main/examples/eks-cluster-with-self-managed-node-groups/main.tf#L23-L47
[2] https://github.com/aws-samples/aws-eks-accelerator-for-terraform/blob/main/examples/eks-cluster-with-eks-addons/main.tf#L49-L55
[3] https://registry.terraform.io/providers/hashicorp/kubernetes/latest/docs#argument-reference
[4] https://registry.terraform.io/providers/hashicorp/helm/latest/docs#argument-reference
The above answer from Marko E seems to fix / just ran into this issue. After applying the above code, altogether in a separate providers.tf file, terraform now makes it past the error. Will post later as to whether the deployment makes it fully through.
For reference was able to go from 65 resources created down to 42 resources created before I hit this error. This was using the exact best practice / sample configuration recommended at the top of the README from AWS Consulting here: https://github.com/aws-samples/aws-eks-accelerator-for-terraform
In my case i was trying to deploy to the kubernetes cluster(GKE) using Terraform. I have replaced the kubeconfig path with the kubeconfig file's absolute path.
From
provider "kubernetes" {
config_path = "~/.kube/config"
#config_context = "my-context"
}
TO
provider "kubernetes" {
config_path = "/Users/<username>/.kube/config"
#config_context = "my-context"
}

InvalidParameterException: Addon version specified is not supported

I've been trying to deploy a self managed node EKS cluster for a while now, with no success. The error I'm stuck on now are EKS addons:
Error: error creating EKS Add-On (DevOpsLabs2b-dev-test--eks:kube-proxy): InvalidParameterException: Addon version specified is not supported, AddonName: "kube-proxy", ClusterName: "DevOpsLabs2b-dev-test--eks", Message_: "Addon version specified is not supported" }
with module.eks-ssp-kubernetes-addons.module.aws_kube_proxy[0].aws_eks_addon.kube_proxy
on .terraform/modules/eks-ssp-kubernetes-addons/modules/kubernetes-addons/aws-kube-proxy/main.tf line 19, in resource "aws_eks_addon" "kube_proxy":
This error repeats for coredns as well, but ebs_csi_driver throws:
Error: unexpected EKS Add-On (DevOpsLabs2b-dev-test--eks:aws-ebs-csi-driver) state returned during creation: timeout while waiting for state to become 'ACTIVE' (last state: 'DEGRADED', timeout: 20m0s) [WARNING] Running terraform apply again will remove the kubernetes add-on and attempt to create it again effectively purging previous add-on configuration
My main.tf looks like this:
terraform {
backend "remote" {}
required_providers {
aws = {
source = "hashicorp/aws"
version = ">= 3.66.0"
}
kubernetes = {
source = "hashicorp/kubernetes"
version = ">= 2.7.1"
}
helm = {
source = "hashicorp/helm"
version = ">= 2.4.1"
}
}
}
data "aws_eks_cluster" "cluster" {
name = module.eks-ssp.eks_cluster_id
}
data "aws_eks_cluster_auth" "cluster" {
name = module.eks-ssp.eks_cluster_id
}
provider "aws" {
access_key = "xxx"
secret_key = "xxx"
region = "xxx"
assume_role {
role_arn = "xxx"
}
}
provider "kubernetes" {
host = data.aws_eks_cluster.cluster.endpoint
cluster_ca_certificate = base64decode(data.aws_eks_cluster.cluster.certificate_authority.0.data)
token = data.aws_eks_cluster_auth.cluster.token
}
provider "helm" {
kubernetes {
host = data.aws_eks_cluster.cluster.endpoint
token = data.aws_eks_cluster_auth.cluster.token
cluster_ca_certificate = base64decode(data.aws_eks_cluster.cluster.certificate_authority.0.data)
}
}
My eks.tf looks like this:
module "eks-ssp" {
source = "github.com/aws-samples/aws-eks-accelerator-for-terraform"
# EKS CLUSTER
tenant = "DevOpsLabs2b"
environment = "dev-test"
zone = ""
terraform_version = "Terraform v1.1.4"
# EKS Cluster VPC and Subnet mandatory config
vpc_id = "xxx"
private_subnet_ids = ["xxx","xxx", "xxx", "xxx"]
# EKS CONTROL PLANE VARIABLES
create_eks = true
kubernetes_version = "1.19"
# EKS SELF MANAGED NODE GROUPS
self_managed_node_groups = {
self_mg = {
node_group_name = "DevOpsLabs2b"
subnet_ids = ["xxx","xxx", "xxx", "xxx"]
create_launch_template = true
launch_template_os = "bottlerocket" # amazonlinux2eks or bottlerocket or windows
custom_ami_id = "xxx"
public_ip = true # Enable only for public subnets
pre_userdata = <<-EOT
yum install -y amazon-ssm-agent \
systemctl enable amazon-ssm-agent && systemctl start amazon-ssm-agent \
EOT
disk_size = 10
instance_type = "t2.small"
desired_size = 2
max_size = 10
min_size = 0
capacity_type = "" # Optional Use this only for SPOT capacity as capacity_type = "spot"
k8s_labels = {
Environment = "dev-test"
Zone = ""
WorkerType = "SELF_MANAGED_ON_DEMAND"
}
additional_tags = {
ExtraTag = "t2x-on-demand"
Name = "t2x-on-demand"
subnet_type = "public"
}
create_worker_security_group = false # Creates a dedicated sec group for this Node Group
},
}
}
module "eks-ssp-kubernetes-addons" {
source = "github.com/aws-samples/aws-eks-accelerator-for-terraform//modules/kubernetes-addons"
eks_cluster_id = module.eks-ssp.eks_cluster_id
# EKS Addons
enable_amazon_eks_vpc_cni = true
enable_amazon_eks_coredns = true
enable_amazon_eks_kube_proxy = true
enable_amazon_eks_aws_ebs_csi_driver = true
#K8s Add-ons
enable_aws_load_balancer_controller = true
enable_metrics_server = true
enable_cluster_autoscaler = true
enable_aws_for_fluentbit = true
enable_argocd = true
enable_ingress_nginx = true
depends_on = [module.eks-ssp.self_managed_node_groups]
}
What exactly am I missing?
K8s is hard to get right sometimes. The examples on Github are shown for version 1.21 [1]. Because of that, if you leave only this:
enable_amazon_eks_vpc_cni = true
enable_amazon_eks_coredns = true
enable_amazon_eks_kube_proxy = true
enable_amazon_eks_aws_ebs_csi_driver = true
#K8s Add-ons
enable_aws_load_balancer_controller = true
enable_metrics_server = true
enable_cluster_autoscaler = true
enable_aws_for_fluentbit = true
enable_argocd = true
enable_ingress_nginx = true
Images that will be downloaded by default will be the ones for K8s version 1.21 as shown in [2]. If you really need to use K8s version 1.19, then you will have to find the corresponding Helm charts for that version. Here's an example of how you can configure the images you need [3]:
amazon_eks_coredns_config = {
addon_name = "coredns"
addon_version = "v1.8.4-eksbuild.1"
service_account = "coredns"
resolve_conflicts = "OVERWRITE"
namespace = "kube-system"
service_account_role_arn = ""
additional_iam_policies = []
tags = {}
}
However, the CoreDNS version here (addon_version = v1.8.4-eksbuild.1) is used with K8s 1.21. To check the version you would need for 1.19, go here [4]. TL;DR: the CoreDNS version you would need to specify is 1.8.0. In order to make the add-on work for 1.19, for CoreDNS (and other add-ons based on the image version), you would have to have a code block like this:
enable_amazon_eks_coredns = true
# followed by
amazon_eks_coredns_config = {
addon_name = "coredns"
addon_version = "v1.8.0-eksbuild.1"
service_account = "coredns"
resolve_conflicts = "OVERWRITE"
namespace = "kube-system"
service_account_role_arn = ""
additional_iam_policies = []
tags = {}
}
For other EKS add-ons, you can find more information here [5]. If you click on the links from the Name column it will lead you straight to the AWS EKS documentation with the add-on image versions supported for the EKS versions currently supported by AWS (1.17 - 1.21).
Last, but not the least, a friendly advice: never ever configure the AWS provider by hard-coding the access key and secret access key in the provider block. Use named profiles [6] or just use the default one. Instead of the block you have currently:
provider "aws" {
access_key = "xxx"
secret_key = "xxx"
region = "xxx"
assume_role {
role_arn = "xxx"
}
}
Switch to:
provider "aws" {
region = "yourdefaultregion"
profile = "yourprofilename"
}
[1] https://github.com/aws-samples/aws-eks-accelerator-for-terraform/blob/main/examples/eks-cluster-with-eks-addons/main.tf#L62
[2] https://github.com/aws-samples/aws-eks-accelerator-for-terraform/blob/main/modules/kubernetes-addons/aws-kube-proxy/local.tf#L5
[3] https://github.com/aws-samples/aws-eks-accelerator-for-terraform/blob/main/examples/eks-cluster-with-eks-addons/main.tf#L148-L157
[4] https://docs.aws.amazon.com/eks/latest/userguide/managing-coredns.html
[5] https://github.com/aws-samples/aws-eks-accelerator-for-terraform/blob/main/docs/add-ons/managed-add-ons.md
[6] https://docs.aws.amazon.com/cli/latest/userguide/cli-configure-profiles.html

Terraform & GCP: Google kubernetes cluster problem: Can't see monitoring section (memory and cpu) inside workloads (deployments, statefulsets)

I spent 4 days already testing all configurations from kubernetes terraform gcp module and I can't see the metrics of my workloads, It never shows me CPU nor Memory (and even the standard default created kubernetes in the GUI has this activated.
Here's my code:
resource "google_container_cluster" "default" {
provider = google-beta
name = var.name
project = var.project_id
description = "Vectux GKE Cluster"
location = var.zonal_region
remove_default_node_pool = true
initial_node_count = var.gke_num_nodes
master_auth {
#username = ""
#password = ""
client_certificate_config {
issue_client_certificate = false
}
}
timeouts {
create = "30m"
update = "40m"
}
logging_config {
enable_components = ["SYSTEM_COMPONENTS", "WORKLOADS"]
}
monitoring_config {
enable_components = ["SYSTEM_COMPONENTS", "WORKLOADS"]
}
}
resource "google_container_node_pool" "default" {
name = "${var.name}-node-pool"
project = var.project_id
location = var.zonal_region
node_locations = [var.zonal_region]
cluster = google_container_cluster.default.name
node_count = var.gke_num_nodes
node_config {
preemptible = true
machine_type = var.machine_type
disk_size_gb = var.disk_size_gb
service_account = google_service_account.default3.email
oauth_scopes = [
"https://www.googleapis.com/auth/logging.write",
"https://www.googleapis.com/auth/monitoring",
"https://www.googleapis.com/auth/cloud-platform",
"compute-ro",
"storage-ro",
"service-management",
"service-control",
]
metadata = {
disable-legacy-endpoints = "true"
}
}
management {
auto_repair = true
auto_upgrade = true
}
}
resource "google_service_account" "default3" {
project = var.project_id
account_id = "terraform-vectux-33"
display_name = "tfvectux2"
provider = google-beta
}
Here's some info on the cluster (when I compare against the standard one with the metrics enabled I see no differences:
And here 's the workload view without the metrics that I'd like to see:
As I mentioned in the comment to solve your issue, you must add google_service_account_iam_binding module and grant your Service Account specific role - roles/monitoring.metricWriter. In comments I mention that you can also grant role/compute.admin but after another test I've run it's not necessary.
Below is a terraform snippet I've used to create a test cluster with Service Account called sa. I've changed some fields in node config. In your case, you would need to add the whole google_project_iam_binding module.
Terraform Snippet
### Creating Service Account
resource "google_service_account" "sa" {
project = "my-project-name"
account_id = "terraform-vectux-2"
display_name = "tfvectux2"
provider = google-beta
}
### Binding Service Account with IAM
resource "google_project_iam_binding" "sa_binding_writer" {
project = "my-project-name"
role = "roles/monitoring.metricWriter"
members = [
"serviceAccount:${google_service_account.sa.email}"
### in your case it will be "serviceAccount:${google_service_account.your-serviceaccount-name.email}"
]
}
resource "google_container_cluster" "default" {
provider = google-beta
name = "cluster-test-custom-sa"
project = "my-project-name"
description = "Vectux GKE Cluster"
location = "europe-west2"
remove_default_node_pool = true
initial_node_count = "1"
master_auth {
#username = ""
#password = ""
client_certificate_config {
issue_client_certificate = false
}
}
timeouts {
create = "30m"
update = "40m"
}
logging_config {
enable_components = ["SYSTEM_COMPONENTS", "WORKLOADS"]
}
monitoring_config {
enable_components = ["SYSTEM_COMPONENTS", "WORKLOADS"]
}
}
resource "google_container_node_pool" "default" {
name = "test-node-pool"
project = "my-project-name"
location = "europe-west2"
node_locations = ["europe-west2-a"]
cluster = google_container_cluster.default.name
node_count = "1"
node_config {
preemptible = "true"
machine_type = "e2-medium"
disk_size_gb = 50
service_account = google_service_account.sa.email
###service_account = google_service_account.your-serviceaccount-name.email
oauth_scopes = [
"https://www.googleapis.com/auth/logging.write",
"https://www.googleapis.com/auth/monitoring",
"https://www.googleapis.com/auth/cloud-platform",
"compute-ro",
"storage-ro",
"service-management",
"service-control",
]
metadata = {
disable-legacy-endpoints = "true"
}
}
management {
auto_repair = true
auto_upgrade = true
}
}
My Screens:
Whole workload
Node Workload
Additional Information
If you would add just roles/compute.admin you might see workload for the whole application,but you wouldn't be able to see each node workload. With "roles/monitoring.metricWriter" you are able to see the whole application workload and each node workload. To achieve what you want - see workloads in the node, you just need "roles/monitoring.metricWriter".
You need to use "google_project_iam_binding" as without this in IAM roles, you won't have your newly created Service Account and it will lack permission. In short, Your new SA will be visible in IAM & Admin > Service Accounts but there will be no entry in IAM & Admin > IAM.
If you want more information about IAM and Binding in terraform, please check this Terraform Documentation
As a last thing, please remember that Oauth Scope with "https://www.googleapis.com/auth/cloud-platform" gives access to all GCP resources.

Terraform GCP when creating instance template, Error getting relative path for source image

I have a new issue with setting up GCP instance template. I am presuming there was an update on the terraform gcp provider.
resource "google_compute_instance_template" "backend-template" {
name = "${var.platform_name}-backend-instance-template"
description = "Template used for backend instances"
instance_description = "backend Instance"
machine_type = "n1-standard-1"
metadata_startup_script = "${lookup(var.startup_scripts,"backend-server")}"
disk {
boot = "true"
source_image = "backend-packer-image"
}
metadata {
APP_SETTINGS = "${var.app_settings}"
URL_STAGING = "${var.url_staging}"
API_URL_STAGING = "${var.api_url_staging}"
URL_PRODUCTION = "${var.url_production}"
API_URL_PRODUCTION = "${var.api_url_production}"
LOGIN_URL = "${var.login_url}"
API_URL = "${var.api_url}"
vault_server_IP = "${lookup(var.static_ips, "vault-server")}"
environment = "${var.environment}"
}
network_interface {
subnetwork = "${google_compute_subnetwork.private-fe-be.self_link}"
}
lifecycle {
create_before_destroy = true
}
tags = ["no-ip", "backend-server"]
service_account {
scopes = ["cloud-platform"]
}
}
This is the current error after running the script. However, the image backend-packer-image was already created and exists on GCP
* google_compute_instance_template.backend-template: 1 error(s) occurred:
* google_compute_instance_template.backend-template: error flattening disks: Error getting relative path for source image: String was not a self link: global/images/backend-packer-image
I had the exact same problem today, I had to go look directly into the pull request to find a way to use this correctly.
So, what I came with is this:
you must first be sure to be in your project before typing this command or you won't find the image you are looking for if it's a custom one:
gcloud compute images list --uri | grep "your image name"
Like this you will have the uri of your image, you can then put it fully for the image and it will work.
Replace the image name with the URI on source_image
resource "google_compute_instance_template" "backend-template" {
name = "${var.platform_name}-backend-instance-
template"
description = "Template used for backend instances"
instance_description = "backend Instance"
machine_type = "n1-standard-1"
metadata_startup_script = "${lookup(var.startup_scripts,"backend-server")}"
disk {
boot = "true"
source_image = "https://www.googleapis.com/compute/v1/projects/<project-name>/global/images/backend-packer-image"
}
metadata {
APP_SETTINGS = "${var.app_settings}"
URL_STAGING = "${var.url_staging}"
API_URL_STAGING = "${var.api_url_staging}"
URL_PRODUCTION = "${var.url_production}"
API_URL_PRODUCTION = "${var.api_url_production}"
LOGIN_URL = "${var.login_url}"
API_URL = "${var.api_url}"
vault_server_IP = "${lookup(var.static_ips, "vault-server")}"
environment = "${var.environment}"
}
network_interface {
subnetwork = "${google_compute_subnetwork.private-fe-be.self_link}"
}
lifecycle {
create_before_destroy = true
}
tags = ["no-ip", "backend-server"]
service_account {
scopes = ["cloud-platform"]
}
}
It is also possible to tie the terraform scripts to run previous versions
provider "google"{
version = "<= 1.17"
credentials = "${var.service_account_path}"
project = "${var.gcloud_project}"
region = "${var.gcloud_region}"
}