how to run a script in GCP machine using terraform - google-cloud-platform

i have a script file named as auto.sh in my local laptop and i want to run this script file in GCP machine as soon as provision
i created this terraform file
resource "google_compute_attached_disk" "default3" {
disk = google_compute_disk.default2.id
instance = google_compute_instance.default.id
}
resource "google_compute_instance" "default" {
name = "test"
machine_type = "custom-8-16384"
zone = "us-central1-a"
tags = ["foo", "bar"]
boot_disk {
initialize_params {
image = "centos-cloud/centos-7"
}
}
network_interface {
network = "default"
access_config {
}
}
metadata_startup_script = "touch abcd.txt"
lifecycle {
ignore_changes = [attached_disk]
}
}
resource "google_compute_disk" "default2" {
name = "test-disk"
type = "pd-balanced"
zone = "us-central1-a"
image = "centos-7-v20210609"
size = 100
}
and this is working fine now i want to run that script

You should replace the
metadata_startup_script = "touch abcd.txt"
with either you script inline if it's short enough, or with something like
metadata_startup_script = "${file("/path/to/your/file")}"
To load it from a file
See metadata_startup_script docs

Related

Is it possible to get the terraform state file from the remote backend and set it for modifications

I need to get the terraform state file from the backend GCS and to use it while I update the resource is it possible to overwrite the excising terraform state file and fetch it on need.
this is my main.tf
provider "google" {
project = var.project
region = var.region
zone = var.zone
}
###################################################
########################################################
data "google_compute_default_service_account" "default" {
}
#create instance1
resource "random_id" "bucket_prefix" {
byte_length = 8
}
#create instance
resource "google_compute_instance" "vm_instance" {
name = var.instance_name
machine_type = var.machine_type
zone = var.zone
metadata_startup_script = var.script
allow_stopping_for_update = true
#metadata = {
# enable-oslogin = "TRUE"
#}
service_account {
email = data.google_compute_default_service_account.default.email
scopes = ["cloud-platform"]
}
boot_disk {
initialize_params {
image = var.image
#image = "ubuntu-2004-lts" # TensorFlow Enterprise
size = 30
}
}
# Install Flask
tags = ["http-server","allow-ssh-ingress-from-iap", "https-server"]
network_interface {
network = "default"
access_config {
}
}
guest_accelerator{
#type = "nvidia-tesla-t4" // Type of GPU attahced
type = var.type
count = var.gpu_count
#count = 2 // Num of GPU attached
}
scheduling{
on_host_maintenance = "TERMINATE"
automatic_restart = true// Need to terminate GPU on maintenance
}
}
This is my variables.tfvars:
instance_name = "test-vm-v5"
machine_type = "n1-standard-16"
region = "europe-west4"
zone = "europe-west4-a"
image = "tf28-np-pandas-nltk-scikit-py39"
#image = "debian-cloud/debian-10"
project = "my_project"
network = "default"
type = ""
gpu_count = "0"
I wanted to create multiple instances by changing the variables.tfvars and need to modify the instance on the basis of the name of vm.

GCP Compute Engine using Terraform

How to provision multiple instances in GCP Compute Engine using Terraform. I've tried using 'count' parameter in the resource block. But terraform is not provisioning more than one instance because the VM with a particular name is created once when first count is executed.
provider "google" {
version = "3.5.0"
credentials = file("battleground01-5c86f5873d44.json")
project = "battleground01"
region = "us-east1"
zone = "us-east1-b"
}
variable "node_count" {
default = "3"
}
resource "google_compute_instance" "appserver" {
count = "${var.node_count}"
name = "battleground"
machine_type = "f1-micro"
boot_disk {
initialize_params {
image = "debian-cloud/debian-9"
}
}
network_interface {
network = "default"
}
}
In order for this to work, you would have to make a slight change in the way you are naming your compute instances:
provider "google" {
version = "3.5.0"
credentials = file("battleground01-5c86f5873d44.json")
project = "battleground01"
region = "us-east1"
zone = "us-east1-b"
}
variable "node_count" {
type = number
default = 3
}
resource "google_compute_instance" "appserver" {
count = var.node_count
name = "battleground-${count.index}"
machine_type = "f1-micro"
boot_disk {
initialize_params {
image = "debian-cloud/debian-9"
}
}
network_interface {
network = "default"
}
}
As you are using the count meta-argument, the way to access the array index is by using the count.index attribute [1]. You have also set the node_count variable default value to be a string and even though it would probably get converted to a number by Terraform, make sure to use the right variable types.
[1] https://www.terraform.io/language/meta-arguments/count#the-count-object

how to run a bash script in gcp vm using terraform

hay folks ,
I want to run a script in gcp machine for that i created a resource below file
disk = google_compute_disk.default2.id
instance = google_compute_instance.default.id
} # aatach disk to vm
resource "google_compute_firewall" "firewall" {
name = "gritfy-firewall-externalssh"
network = "default"
allow {
protocol = "tcp"
ports = ["22"]
}
source_ranges = ["0.0.0.0/0"]
target_tags = ["externalssh"]
} # allow ssh
resource "google_compute_address" "static" {
name = "vm-public-address"
project = "fit-visitor-305606"
region = "asia-south1"
depends_on = [ google_compute_firewall.firewall ]
} # reserve ip
resource "google_compute_instance" "default" {
name = "new"
machine_type = "custom-8-16384"
zone = "asia-south1-a"
tags = ["foo", "bar"]
boot_disk {
initialize_params {
image = "centos-cloud/centos-7"
}
}
network_interface {
network = "default"
access_config {
nat_ip = google_compute_address.static.address
}
}
metadata = {
ssh-keys = "${var.user}:${file(var.publickeypath)}"
}
lifecycle {
ignore_changes = [attached_disk]
}
provisioner "file" {
source = "autoo.sh"
destination = "/tmp/autoo.sh"
}
provisioner "remote-exec" {
connection {
host = google_compute_address.static.address
type = "ssh"
user = var.user
timeout = "500s"
private_key = file(var.privatekeypath)
}
inline = [
"sudo yum -y install epel-release",
"sudo yum -y install nginx",
"sudo nginx -v",
]
}
} # Create VM
resource "google_compute_disk" "default2" {
name = "test-disk"
type = "pd-balanced"
zone = "asia-south1-a"
image = "centos-7-v20210609"
size = 100
} # Create Disk
using this I am able to create VM and disk and also able to attach vm to disk but not able to run my script
error log are =
and private key part is working fine the key is assign to VM and I try to connect with that key it is connected may the problem with the provision part only
any help or guidance would be really helpful...
Like error message says, you need connection configuration for provisioner. Also you need remote-exec provisoner for running scripts.
provisioner "file" {
source = "autoo.sh"
destination = "/tmp/autoo.sh"
connection {
type = "ssh"
user = var.user
private_key = file(var.privatekeypath)
}
}
provisioner "remote-exec" {
inline = [
"chmod +x /tmp/autoo.sh",
"cd /tmp",
"./autoo.sh"
]
connection {
type = "ssh"
user = var.user
private_key = file(var.privatekeypath)
}
source: https://stackoverflow.com/a/36668395/5454632

how to declare gcp compute engine images from gcp marketplace through terraform

I've a request in company to write a terraform script to deploy a compute engine image from GCP marketplace? This is most likely a deep-learning image. Can anyone please help?
Example image - https://console.cloud.google.com/marketplace/details/click-to-deploy-images/deeplearning?q=compute%20engine%20images&id=8857b4a3-f60f-40b2-9b32-22b4428fd256
Please take a look at the following examples here
resource "random_id" "instance_id" {
byte_length = 8
}
resource "google_compute_instance" "default" {
name = "vm-${random_id.instance_id.hex}"
machine_type = var.instance_type
zone = var.zone
boot_disk {
initialize_params {
image = "deeplearning-platform-release/tf-ent-latest-gpu" # TensorFlow Enterprise
size = 50 // 50 GB Storage
}
}
network_interface {
network = "default"
access_config {}
}
guest_accelerator {
type = var.gpu_type
count = var.gpu_count
}
scheduling {
automatic_restart = true
on_host_maintenance = "TERMINATE"
}
metadata = {
install-nvidia-driver = "True"
proxy-mode = "service_account"
}
tags = ["deeplearning-vm"]
service_account {
scopes = ["https://www.googleapis.com/auth/cloud-platform"]
}
}
I recently added support for AI Platform Notebooks in Terraform as well.

Terraform multiple VM with multiple disk GCP

I am trying to create list of VM to which there is list of disk needs to be created and attached per VM.
In below example I have to create test-d01 Compute with 3 disk test-d01-data and test-d01-data-disk, and test-d01-commitlog-disk
similarly test-d02 Compute with 2 disk test-d02-data-01 and test-d02-data-02.
For expample below VM_info represents required configuration and it would
{
name = "test-d01"
zone = "us-east1-b"
disk = [
{
disk_name = "test-d01-data"
disk_type = "pd-ssd"
disk_size = "60"
},
{
disk_name = "test-d01-data-disk"
disk_type = "pd-standard"
disk_size = "15"
},
{
disk_name = "test-d01-commitlog-disk"
disk_type = "pd-ssd"
disk_size = "30"
}
]
},
{
name = "test-d02"
zone = "us-east1-b"
disk=[
{
disk_name = "test-d02-data"
disk_type = "pd-ssd"
disk_size = "60"
},
{
disk_name = "test-d02-data-disk"
disk_type = "pd-standard"
disk_size = "15"
}
]
},
]
great idea. When I use this I do get a
terraform plan
var.disks
Enter a value: 2
var.instance_name
Enter a value: DDVE5
Error: Reference to undeclared resource
on main.tf line 39, in resource "google_compute_attached_disk" "vm_attached_disk":
39: instance = google_compute_instance.vm_instance.self_link
A managed resource "google_compute_instance" "vm_instance" has not been
declared in the root module.
cat main.tf
variable "instance_name" {}
variable "instance_zone" {
default = "europe-west3-c"
}
variable "instance_type" {
default = "n1-standard-1"
}
variable "instance_subnetwork" {
default = "default"
}
variable "disks" {}
provider "google" {
credentials = file("key.json")
project = "ddve50"
region = "europe-west3"
zone = "europe-west3-a"
}
resource "google_compute_instance" "vm-instance" {
name = "ddve-gcp-5-7-2-0-20-65"
machine_type = "f1-micro"
tags = ["creator", "juergen"]
boot_disk {
initialize_params {
image = "ddve"
type = "pd-ssd"
}
}
network_interface {
network = "default"
}
}
resource "google_compute_attached_disk" "vm_attached_disk" {
for_each = toset(var.disks)
disk = each.key
instance = google_compute_instance.vm_instance.self_link
}
cat ../my_instances.tf
resource "google_compute_disk" "ddve-gcp-5-7-2-0-20-65-nvram" {
name = "ddve-gcp-5-7-2-0-20-65-nvram"
type = "pd-ssd"
size = 10
}
resource "google_compute_disk" "ddve-gcp-5-7-2-0-20-65-m1" {
name = "ddve-gcp-5-7-2-0-20-65-m1"
type = "pd-standard"
size = 1024
}
module "ddve-test-d01" {
source = "./instance"
instance_name = "ddve-test-d01"
disks = [
google_compute_disk.ddve-gcp-5-7-2-0-20-65-nvram,
google_compute_disk.ddve-gcp-5-7-2-0-20-65-m1
]
}
Terraform by HashiCorp > Compute Engine > Resources > google_compute_attached_disk:
Persistent disks can be attached to a compute instance using the
attached_disk section within the compute instance configuration.
However there may be situations where managing the attached disks via
the compute instance config isn't preferable or possible, such as
attaching dynamic numbers of disks using the count variable.
Therefore a straightforward approach with creation of google_compute_instance with attributes attached_disk may not be applicable in this case.
In brief the idea is to create persistent disks first, then create VM instances and attach new disks to the freshly created instances. This test deployment consists of a root configuration file my_instances.tf and reusable module ./instance/main.tf.
1. Persistent disks google_compute_disk can be created independently. For the sake of simplicity 5 literal blocks in the root file my_instances.tf were used.
2. Call the reusable module instance/main.tf and pass the VM attributes and a list of disks to the module so that:
Create a VM instance google_compute_instance;
Use binding objects google_compute_attached_disk to attach new empty disks to the freshly created VM instance.
To process the disk list the for_each meta-argument is used.
Since the for_each accepts only a map or a set of string, the toset function is used to convert a list of disks to a set.
Terraform by HashiCorp > Compute Engine > Data Sources > google_compute_instance
Terraform by HashiCorp > Compute Engine > Resources > google_compute_disk
Terraform by HashiCorp > Compute Engine > Resources > google_compute_attached_disk
Terraform by HashiCorp > Configuration language > Resources > lifecycle.ignore_changes
Terraform by HashiCorp > Configuration language > Resources > When to Use for_each Instead of count
Terraform by HashiCorp > Configuration language > Resources > The each Object
Terraform by HashiCorp > Configuration language > Resources > Using Sets
$ cat my_instances.tf
resource "google_compute_disk" "test-d01-data" {
name = "test-d01-data"
type = "pd-ssd"
size = 60
zone = "europe-west3-c"
}
resource "google_compute_disk" "test-d01-data-disk" {
name = "test-d01-data-disk"
type = "pd-standard"
size = 15
zone = "europe-west3-c"
}
resource "google_compute_disk" "test-d01-commitlog-disk" {
name = "test-d01-commitlog-disk"
type = "pd-ssd"
size = 30
zone = "europe-west3-c"
}
resource "google_compute_disk" "test-d02-data" {
name = "test-d02-data"
type = "pd-ssd"
size = 60
zone = "europe-west3-c"
}
resource "google_compute_disk" "test-d02-data-disk" {
name = "test-d02-data-disk"
type = "pd-standard"
size = 15
zone = "europe-west3-c"
}
module "test-d01" {
source = "./instance"
instance_name = "test-d01"
disks = [
google_compute_disk.test-d01-data.name,
google_compute_disk.test-d01-data-disk.name,
google_compute_disk.test-d01-commitlog-disk.name
]
}
module "test-d02" {
source = "./instance"
instance_name = "test-d02"
disks = [
google_compute_disk.test-d02-data.name,
google_compute_disk.test-d02-data-disk.name
]
}
$ cat instance/main.tf
variable "instance_name" {}
variable "instance_zone" {
default = "europe-west3-c"
}
variable "instance_type" {
default = "n1-standard-1"
}
variable "instance_subnetwork" {
default = "default"
}
variable "disks" {}
resource "google_compute_instance" "vm_instance" {
name = var.instance_name
zone = var.instance_zone
machine_type = var.instance_type
boot_disk {
initialize_params {
image = "debian-cloud/debian-9"
}
}
network_interface {
subnetwork = "${var.instance_subnetwork}"
access_config {
# Allocate a one-to-one NAT IP to the instance
}
}
lifecycle {
ignore_changes = [attached_disk]
}
}
resource "google_compute_attached_disk" "vm_attached_disk" {
for_each = toset(var.disks)
disk = each.key
instance = google_compute_instance.vm_instance.self_link
}
$ terraform fmt
$ terraform init
$ terraform plan
$ terraform apply
you can alos get the disks attached with:
resource "google_compute_disk" "default" {
name = "test-disk"
type = "pd-ssd"
zone = var.instance_zone
# image = "debian-9-stretch-v20200805"
labels = {
environment = "dev"
}
physical_block_size_bytes = 4096
}
resource "google_compute_attached_disk" "vm_attached_disk" {
count = var.disks
disk = google_compute_disk.default.id
instance = google_compute_instance.vm-instance.id
}