Google Cloud Cloud NAT Gateway - google-cloud-platform

I want connect my "private" network (instances without external IP's) via terraform.
google_compute_router_nat.advanced-nat: Error patching router us-west2/my-router1: googleapi: Error 400: Invalid value for field 'resource.nats[0].subnetworks[0].name': 'test-us-west2-private-subnet'. The URL is malformed.
More details:
Reason: invalid, Message: Invalid value for field 'resource.nats[0].subnetworks[0].name': 'test-us-west2-private-subnet'. The URL is malformed.
Reason: invalid, Message: Invalid value for field 'resource.nats[0].natIps[0]': '10.0.0.0/16'. The URL is malformed.
Task: migrate classic scheme from AWS to GCP: One VPC Network, Bastion Host in Public Network, in Private network all machines without external IP's. Use NAT Gateway for Private Network.
resource "google_compute_router" "router" {
name = "my-router1"
network = "${var.gcp_project_name}-net"
bgp {
asn = 64514
}
}
resource "google_compute_router_nat" "advanced-nat" {
name = "nat-1"
router = "${google_compute_router.router.name}"
region = "us-west2"
nat_ip_allocate_option = "MANUAL_ONLY"
nat_ips = ["10.0.0.0/16"]
source_subnetwork_ip_ranges_to_nat = "LIST_OF_SUBNETWORKS"
subnetwork {
name = "${var.gcp_project_name}-${var.gcp_region_name}-private-subnet"
}
}

# VPC
resource "google_compute_network" "gcp_project_name" {
name = "${var.gcp_project_name}-net"
auto_create_subnetworks = "false"
}
# PRIVATE SUBNET
resource "google_compute_subnetwork" "gcp_project_name_private_subnet" {
name = "${var.gcp_project_name}-${var.gcp_region_name}-private-subnet"
ip_cidr_range = "10.0.0.0/16"
network = "${google_compute_network.gcp_project_name.self_link}"
region = "${var.gcp_region_name}"
}
# PUBLIC SUBNET
resource "google_compute_subnetwork" "gcp_project_name_public_subnet" {
name = "${var.gcp_project_name}-${var.gcp_region_name}-public-subnet"
ip_cidr_range = "10.8.0.0/16"
network = "${google_compute_network.gcp_project_name.self_link}"
region = "${var.gcp_region_name}"
}
resource "google_compute_router" "router" {
name = "${var.gcp_router_name}"
network = "${var.gcp_project_name}-net"
region = "${var.gcp_region_name}"
}
resource "google_compute_router_nat" "advanced-nat" {
name = "${var.gcp_nat_name}"
router = "${var.gcp_router_name}"
region = "${var.gcp_region_name}"
nat_ip_allocate_option = "AUTO_ONLY"
source_subnetwork_ip_ranges_to_nat = "LIST_OF_SUBNETWORKS"
subnetwork {
name = "${google_compute_subnetwork.gcp_project_name_private_subnet.self_link}"
}
}

Related

google cloud platform instance in MIG cannot access artifact registry

I'm trying to deploy a managed instance group with a load balancer which will be running a web server container.
The container is stored in the google artifcat registry.
If I manually create a VM and define the container usage, it is successfully able to pull and activate the container.
When I try to create the managed instance group via terraform, the VM does not pull nor activate the container.
When I ssh to the VM and try to manually pull the container, I get the following error:
Error response from daemon: Get https://us-docker.pkg.dev/v2/: net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)
The only notable difference between the VM I created manually to the VM created by terraform is that the manual VM has an external IP address. Not sure if this matters and not sure how to add one to the terraform file.
Below is my main.tf file. Can anyone tell me what I'm doing wrong?
terraform {
required_providers {
google = {
source = "hashicorp/google"
version = "3.53.0"
}
google-beta = {
source = "hashicorp/google-beta"
version = "~> 4.0"
}
}
}
provider "google" {
credentials = file("compute_lab2-347808-dab33a244827.json")
project = "lab2-347808"
region = "us-central1"
zone = "us-central1-f"
}
locals {
google_load_balancer_ip_ranges = [
"130.211.0.0/22",
"35.191.0.0/16",
]
}
module "gce-container" {
source = "terraform-google-modules/container-vm/google"
version = "~> 2.0"
cos_image_name = "cos-stable-77-12371-89-0"
container = {
image = "us-docker.pkg.dev/lab2-347808/web-server-repo/web-server-image"
volumeMounts = [
{
mountPath = "/cache"
name = "tempfs-0"
readOnly = false
},
]
}
volumes = [
{
name = "tempfs-0"
emptyDir = {
medium = "Memory"
}
},
]
restart_policy = "Always"
}
resource "google_compute_firewall" "rules" {
project = "lab2-347808"
name = "allow-web-ports"
network = "default"
description = "Opens the relevant ports for the web server"
allow {
protocol = "tcp"
ports = ["80", "8080", "5432", "5000", "443"]
}
source_ranges = ["0.0.0.0/0"]
#source_ranges = local.google_load_balancer_ip_ranges
target_tags = ["web-server-ports"]
}
resource "google_compute_autoscaler" "default" {
name = "web-autoscaler"
zone = "us-central1-f"
target = google_compute_instance_group_manager.default.id
autoscaling_policy {
max_replicas = 10
min_replicas = 1
cooldown_period = 60
cpu_utilization {
target = 0.5
}
}
}
resource "google_compute_instance_template" "default" {
name = "my-web-server-template"
machine_type = "e2-medium"
can_ip_forward = false
tags = ["ssh", "http-server", "https-server", "web-server-ports"]
disk {
#source_image = "cos-cloud/cos-73-11647-217-0"
source_image = module.gce-container.source_image
}
network_interface {
network = "default"
}
service_account {
#scopes = ["userinfo-email", "compute-ro", "storage-ro"]
scopes = ["cloud-platform"]
}
metadata = {
gce-container-declaration = module.gce-container.metadata_value
}
}
resource "google_compute_target_pool" "default" {
name = "web-server-target-pool"
}
resource "google_compute_instance_group_manager" "default" {
name = "web-server-igm"
zone = "us-central1-f"
version {
instance_template = google_compute_instance_template.default.id
name = "primary"
}
target_pools = [google_compute_target_pool.default.id]
base_instance_name = "web-server-instance"
}
Your VM templates haven't public IPs, therefore, you can't reach public IP.
However, you have 3 ways to solve that issue:
Add a public IP on the VM template (bad idea)
Add a Cloud NAT on your VM private IP range to allow outgoing traffic to the internet (good idea)
Activate the Google private access in the subnet that host the VM private iP range. It create a bridge to access to Google services without having a public IP (my prefered idea) -> https://cloud.google.com/vpc/docs/configure-private-google-access
Apparently I was missing the following acecss_config inside network_interface of the google_compute_instance_template as following:
network_interface {
network = "default"
access_config {
network_tier = "PREMIUM"
}

Elastic Beanstalk setup with public ALB and EC2 on private subnet falling health check

I am trying to setup a sample Elastic beanstalk app with ALB being in public subnets(internet facing) and ec2 instances in private subnets in terraform. If I put ec2 instances in public subnets then the elastic beanstalk app get created successfully but in private subnets I get the following error.
The EC2 instances failed to communicate with AWS Elastic Beanstalk, either because of configuration problems with the VPC or a failed EC2 instance. Check your VPC configuration and try launching the environment again.
aws_elastic_beanstalk_environment
setting {
namespace = "aws:ec2:vpc"
name = "Subnets"
value = join(",", module.vpc.private_subnets)
}
setting {
namespace = "aws:ec2:vpc"
name = "DBSubnets"
value = join(",", module.vpc.private_subnets)
}
setting {
namespace = "aws:ec2:vpc"
name = "ELBSubnets"
value = join(",", module.vpc.public_subnets)
}
setting {
namespace = "aws:ec2:vpc"
name = "AssociatePublicIpAddress"
value = "false"
}
I have also setup vpc endpoints as describe in https://aws.amazon.com/premiumsupport/knowledge-center/elastic-beanstalk-instance-failure/
module "endpoints" {
source = "terraform-aws-modules/vpc/aws//modules/vpc-endpoints"
vpc_id = module.vpc.vpc_id
security_group_ids = [data.aws_security_group.default.id]
endpoints = {
dynamodb = {
service = "dynamodb",
service_type = "Gateway"
route_table_ids = module.vpc.private_route_table_ids
tags = { Name = "dynamodb-vpc-endpoint" }
},
s3 = {
service = "s3",
service_type = "Gateway"
route_table_ids = module.vpc.private_route_table_ids
tags = { Name = "s3-vpc-endpoint" }
},
elasticbeanstalk-app = {
# interface endpoint
service_name = aws_vpc_endpoint_service.elasticbeanstalk.service_name
subnet_ids = module.vpc.private_subnets
tags = { Name = "elasticbeanstalk-app-vpc-endpoint" }
},
elasticbeanstalk = {
# interface endpoint
service_name = "com.amazonaws.${var.aws_region}.elasticbeanstalk"
subnet_ids = module.vpc.private_subnets
private_dns_enabled = true
tags = { Name = "elasticbeanstalk-${var.aws_region}-elasticbeanstalk-vpc-endpoint" }
}
elasticbeanstalk-hc = {
# interface endpoint
service_name = "com.amazonaws.${var.aws_region}.elasticbeanstalk-health"
subnet_ids = module.vpc.private_subnets
private_dns_enabled = true
tags = { Name = "elasticbeanstalk-${var.aws_region}-elasticbeanstalk-health-vpc-endpoint" }
},
sqs = {
# interface endpoint
service_name = "com.amazonaws.${var.aws_region}.sqs"
subnet_ids = module.vpc.private_subnets
private_dns_enabled = true
tags = { Name = "elasticbeanstalk-${var.aws_region}-sqs-vpc-endpoint" }
},
cloudformation = {
# interface endpoint
service_name = "com.amazonaws.${var.aws_region}.cloudformation"
subnet_ids = module.vpc.private_subnets
private_dns_enabled = true
tags = { Name = "elasticbeanstalk-${var.aws_region}-cloudformation-vpc-endpoint" }
},
ec2 = {
# interface endpoint
service_name = "com.amazonaws.${var.aws_region}.ec2"
subnet_ids = module.vpc.private_subnets
private_dns_enabled = true
tags = { Name = "elasticbeanstalk-${var.aws_region}-ec2-vpc-endpoint" }
},
ec2messages = {
# interface endpoint
service_name = "com.amazonaws.${var.aws_region}.ec2messages"
subnet_ids = module.vpc.private_subnets
private_dns_enabled = true
tags = { Name = "elasticbeanstalk-${var.aws_region}-ec2messages-vpc-endpoint" }
},
}
}
I have a vpc endpoint even for the elasticbeanstalk-app .The setup based on AWS beanstalk PrivateLink not connecting .
Security group
data "aws_security_group" "default" {
name = "default"
vpc_id = module.vpc.vpc_id
}
data "aws_vpc_endpoint_service" "dynamodb" {
service = "dynamodb"
filter {
name = "service-type"
values = ["Gateway"]
}
}
data "aws_vpc_endpoint_service" "s3" {
service = "s3"
filter {
name = "service-type"
values = ["Gateway"]
}
}
In order to be able to connect to service endpoints such as com.amazonaws.[aws_region].elasticbeanstal or com.amazonaws.[aws_region].elasticbeanstalk-health you need to have a security group which allows HTTP/HTTPS inbound connection.
My assumption is that aws_security_group.default security group, which is referenced from a data block, is a default security group and it does not allow HTTP/HTTPS inbound connectivity.

Completely private certificate with AWS in Terraform?

I would like to create a certificate signed by AWS for use by internal services. The internal services are only visible inside my VPC. I don't want anything about the internal services, such as the subdomain, to leak externally.
This is the bit of Terraform I am unsure about:
resource "aws_acm_certificate" "internal" {
domain_name = "*.internal.example.org."
# What goes here?
}
The internal service consumes the certificate like this:
resource "aws_elastic_beanstalk_environment" "foo" {
name = "foo-env"
# ...
setting {
namespace = "aws:ec2:vpc"
name = "ELBScheme"
value = "internal"
resource = ""
}
setting {
namespace = "aws:elbv2:listener:443"
name = "SSLCertificateArns"
value = aws_acm_certificate.internal.arn
resource = ""
}
}
I then assign an internal DNS entry like this:
resource "aws_route53_zone" "private" {
name = "example.org."
vpc {
vpc_id = aws_vpc.main.id
}
}
resource "aws_route53_record" "private_cname_foo" {
zone_id = aws_route53_zone.private.zone_id
name = "foo.internal.example.org."
type = "CNAME"
ttl = "300"
records = [
aws_elastic_beanstalk_environment.foo.cname
]
}
How do I get AWS to create a certificate for me?

Want to create a cloud sql instance with private and public ip on a separate vpc in gcp using terraform

I tried to configure a cloud sql instance with private and public ip both in separate vpc using terraform. Can able to assign private ip on that instance from separate vpc but unable to assign public ip along with that.
Here is my code.
resource "google_compute_global_address" "private_ip_address" {
provider = google-beta
name = "private-ip-address"
purpose = "VPC_PEERING"
address_type = "INTERNAL"
prefix_length = 16
network = "${var.vpc_self_link}"
}
resource "google_service_networking_connection" "private_vpc_connection" {
provider = google-beta
network = "${var.vpc_self_link}"
service = "servicenetworking.googleapis.com"
reserved_peering_ranges = [google_compute_global_address.private_ip_address.name]
}
# create database instance
resource "google_sql_database_instance" "instance" {
name = "test-${var.project_id}"
region = "us-central1"
database_version = "${var.db_version}"
depends_on = [google_service_networking_connection.private_vpc_connection]
settings {
tier = "${var.db_tier}"
activation_policy = "${var.db_activation_policy}"
disk_autoresize = "${var.db_disk_autoresize}"
disk_size = "${var.db_disk_size}"
disk_type = "${var.db_disk_type}"
pricing_plan = "${var.db_pricing_plan}"
database_flags {
name = "slow_query_log"
value = "on"
}
ip_configuration {
ipv4_enabled = "false"
private_network = "projects/${var.project_id}/global/networks/${var.vpc_name}"
}
}
}
but when I try to pass below parameter - to assign public ip, it gives error because of private_network flag .
ipv4_enabled = "true"
Please let me know how to figure out the issue with private and public ip from custom or separate vpc (not the default one).
According with the documentation, you can't
ipv4_enabled - (Optional) Whether this Cloud SQL instance should be assigned a public IPV4 address. Either ipv4_enabled must be enabled or a private_network must be configured.
Open a feature request.
the question is old, a lot of updates have been done, and you probably solved it by then, nonetheless just wanted to confirm that below is working having both public and private IP, and working in both scenarios of creating the resources from scratch, or modifying an existing instance previously using only public ip.
terraform {
required_providers {
google = {
source = "hashicorp/google"
version = "3.5.0"
}
}
backend "gcs" {
bucket = "<BUCKET>"
prefix = "<PREFIX>"
}
}
provider "google" {
project = var.project
region = var.region
zone = var.zone
}
### VPC
resource "google_compute_network" "private_network" {
name = "private-network"
auto_create_subnetworks = "false"
}
resource "google_compute_global_address" "private_ip_address" {
name = "private-ip-address"
purpose = "VPC_PEERING"
address_type = "INTERNAL"
prefix_length = 16
network = google_compute_network.private_network.id
}
resource "google_service_networking_connection" "private_vpc_connection" {
network = google_compute_network.private_network.id
service = "servicenetworking.googleapis.com"
reserved_peering_ranges = [google_compute_global_address.private_ip_address.name]
}
### INSTANCE
resource "google_sql_database_instance" "instance" {
name = "<INSTANCE>"
region = var.region
database_version = "MYSQL_5_7"
depends_on = [google_service_networking_connection.private_vpc_connection]
settings {
tier = "db-f1-micro"
ip_configuration {
ipv4_enabled = true
private_network = google_compute_network.private_network.id
authorized_networks {
name = "default"
value = "0.0.0.0/0"
}
}
}
}
### DATABASE
resource "google_sql_database" "database" {
name = "tf-db"
instance = google_sql_database_instance.instance.name
}
### USER
resource "google_sql_user" "users" {
name = var.sql_user
password = var.sql_pw
instance = google_sql_database_instance.instance.name
}

Terraform Shared VPC on GCP - Static Internal IP address

I am attempting to write automation to deploy instances in a shared VPC on GCP. I have a host network project and a service project. I can create a static internal IP address resource in the host project (resource "google_compute_address" "internal") in which I specify the VPC host project (NET_HUB_PROJ) but I am unable to use it when creating the instance. I receive the following error:
google_compute_instance.compute: Error creating instance: googleapi:
Error 400: Invalid value for field
'resource.networkInterfaces[0].networkIP': '10.128.0.10'. IP address
'projects/prototype-network-hub/regions/us-central1/addresses/bh-int-
ip' (10.128.0.10) is reserved by another project., invalid
My compute module:
data "google_compute_image" "image" {
name = "${var.IMAGE_NAME}"
project = "${var.IMAGE_PROJECT}"
}
resource "google_compute_address" "internal" {
name = "${var.NAME}-int-ip"
address_type = "INTERNAL"
address = "${var.PRIVATE_IP}"
subnetwork = "${var.NET_HUB_SUBNETWORK}"
region = "${var.NET_HUB_REGION}"
project = "${var.NET_HUB_PROJ}"
}
resource "google_compute_address" "external" {
count = "${var.EXT_IP_CREATE ? 1 : 0}"
name = "${var.NAME}-ext-ip"
address_type = "EXTERNAL"
region = "${var.REGION}"
}
resource "google_compute_instance" "compute" {
depends_on = ["google_compute_address.external"]
name = "${var.NAME}"
machine_type = "${var.MACHINE_TYPE}"
zone = "${var.ZONE}"
can_ip_forward = "${var.CAN_IP_FORWARD}"
deletion_protection ="${var.DELETION_PROTECTION}"
allow_stopping_for_update = "${var.ALLOW_STOPPING_FOR_UPDATE}"
tags = ["allow-ssh"]
metadata = {
"network" = "${var.NETWORK}"
"env" = "${var.ENV}"
"role" = "${var.ROLE}"
"region" = "${var.REGION}"
"zone" = "${var.ZONE}"
}
labels = {
"network" = "${var.NETWORK}"
"env" = "${var.ENV}"
"role" = "${var.ROLE}"
"region" = "${var.REGION}"
"zone" = "${var.ZONE}"
}
boot_disk {
device_name = "${var.NAME}"
auto_delete = "${var.BOOT_DISK_AUTO_DELETE}"
initialize_params {
size = "${var.BOOT_DISK_SIZE}"
type = "${var.BOOT_DISK_TYPE}"
image = "${data.google_compute_image.image.self_link}"
}
}
network_interface {
network_ip = "${google_compute_address.internal.address}"
subnetwork_project = "${var.NET_HUB_PROJ}"
subnetwork = "projects/prototype-network-hub/regions/us-central1/subnetworks/custom"
access_config {
nat_ip = "${element(concat(google_compute_address.external.*.address, list("")), 0)}"
}
}
service_account {
scopes = ["service-control", "service-management", "logging-write", "monitoring-write", "storage-ro", "https://www.googleapis.com/auth/trace.append" ]
}
}
The end goal would be to accomplish the following:
EDIT (new answer):
Per the GCP documentation, the static internal IP must belong to the service project (not the host network project as in your code) if you're looking to reserve internal IP on a shared VPC in a different project. See here:
https://cloud.google.com/vpc/docs/provisioning-shared-vpc#reserve_internal_ip
Seeing as a shared-vpc is unlikely to be found in your TF codebase, you'll have to use data to get the self_link of the subnetwork to use for google_compute_address. Something like the following:
data "google_compute_subnetwork" "subnet" {
name = "${var.NET_HUB_SUBNETWORK}"
project = "${var.NET_HUB_PROJ}"
region = "${var.NET_HUB_REGION}"
}
resource "google_compute_address" "internal" {
name = "${var.NAME}-int-ip"
address_type = "INTERNAL"
address = "${var.PRIVATE_IP}"
subnetwork = "${data.google_compute_subnetwork.subnet.self_link}"
}
This should create the resource under your service project, yet with an address within the designated subnet.
When you deploy your instance you should see it referenced under the internal_ip column on your VM instances tab for the assigned instance.
(old answer for posterity):
Unfortunately, google_compute_address doesn't contain a subnetwork_project like google_compute_instance. A fix around this is to provide a full URL to the subnetwork field in google_compute_address. Something like the following:
resource "google_compute_address" "internal" {
name = "${var.NAME}-int-ip"
address_type = "INTERNAL"
address = "${var.PRIVATE_IP}"
subnetwork = "https://www.googleapis.com/compute/v1/projects/${var.NET_HUB_PROJ}/regions/${var.NET_HUB_REGION}/subnetworks/${var.NET_HUB_SUBNETWORK}"
}
Adding my solution below:-
resource "google_compute_address" "internal_ip" {
count = 1
name = "${local.cluster_name}-int-ip-${count.index}"
project = <service project id>
subnetwork = <host project subnet self_link>
address_type = "INTERNAL"
region = "asia-northeast1"
purpose = "GCE_ENDPOINT"
}
output "internal_ipaddr_info" {
value = google_compute_address.internal_ip
}
resource "google_compute_instance" "test" {
project = module.gcp_service_project.project_id
name = "eps-gce-vm-d-swmgr-ane1-test"
machine_type = "n1-standard-1"
zone = "asia-northeast1-a"
can_ip_forward = true
boot_disk {
initialize_params {
image = "centos7"
}
}
network_interface {
subnetwork = <host project subnet self_link>
network_ip = google_compute_address.internal_ip[0].self_link
}
}