parametrized terraform template - templates

I have a terraform project to create a 99 virtual machines in Openstack i can not use cloud-init and i must modify the hostname of every machine
hostname.tplt :
sudo sed -i -e "s/debian[7-9]/${host_name}/g" /etc/hostname
sudo invoke-rc.d hostname.sh start
sudo sed -i -e "s/127\.0\.1\.1.*/127.0.1.1\t${host_name}.${domain_name} ${host_name}/g" /etc/hosts
sudo apt-get update && sudo apt-get -y install dbus && sudo hostnamectl set-hostname ${host_name}
part of main.tf :
data "template_file" "hostname_servers" {
template = "${file("templates/hostname.tplt")}"
vars {
host_name = "${format("%s-proxy-%02d", var.prefix_name, count.index+1)}"
domain_name = "${var.domain_name}"
}
}
Ressource
resource "openstack_compute_instance_v2" "proxy-instance" {
count = "${var.count_proxy}"
name = "${format("%s-proxy-%02d", var.prefix_name, count.index+1)}"
image_name = "${var.image}"
flavor_name = "${var.flavor_proxy}"
network {
name = "${format("%s-%s", var.prefix_name, var.network_name)}"
}
connection {
user = "${var.user}"
}
provisioner "remote-exec" {
inline = [
"${data.template_file.hostname_servers.rendered}"
]
}
}
the use case :
when i start a terraform plan it works for the proxy-instance resource but i need to do that for the 99 machines,
i don't like to duplicate the templates data 99 times,
and i don't know how to parammetrize the template to be able to apply for all the machines
any idea ?

If you set count to the same value on multiple resources then you can use count.index to create correspondences between the instances of one block and the instances of another, like this:
data "template_file" "hostname_servers" {
count = "${var.count_proxy}"
template = "${file("templates/hostname.tplt")}"
vars {
host_name = "${format("%s-proxy-%02d", var.prefix_name, count.index+1)}"
domain_name = "${var.domain_name}"
}
}
resource "openstack_compute_instance_v2" "proxy-instance" {
count = "${var.count_proxy}"
name = "${format("%s-proxy-%02d", var.prefix_name, count.index+1)}"
image_name = "${var.image}"
flavor_name = "${var.flavor_proxy}"
network {
name = "${format("%s-%s", var.prefix_name, var.network_name)}"
}
connection {
user = "${var.user}"
}
provisioner "remote-exec" {
inline = [
# use count.index to match the template instance corresponding
# to this compute instance instance.
"${data.template_file.hostname_servers.*.rendered[count.index]}"
]
}
}

Related

Issue with custom log routing with ECS fargate, Firelense and fluentbit to cloudwatch

I am trying to get logs from my app container to cloudwatch using firelesne and fluentbit by aws, and not getting it.
Application writes log on /opt/app/log/*.log
here is my task definition and fluentbit config file.
`
resource "aws_ecs_task_definition" "batching_task" {
family = "${var.project}-${var.environment}-node1"
container_definitions = jsonencode([
{
essential = true
image = "fluent-bit image"
repositoryCredentials = {
credentialsParameter = var.docker_login
}
name = "log_router"
firelensConfiguration = {
type = "fluentbit"
options={
enable-ecs-log-metadata ="false"
config-file-type = "file"
config-file-value = "/fluent-bit.conf"
}
}
logConfiguration = {
logDriver = "awslogs"
options = {
awslogs-group = "/ecs/app-${var.environment}"
awslogs-region = "us-east-1"
awslogs-create-group = "true"
awslogs-stream-prefix= "firelens"
}
}
mountPoints = [
{
"containerPath" : "/opt/app/log/",
"sourceVolume" : "var-log"
}
]
memoryReservation = 50
},
{
name = "node"
image = "app from private docker registry"
repositoryCredentials = {
credentialsParameter = var.docker_login
}
essential = true
mountPoints = [
{
"containerPath" : "/opt/app/log/",
"sourceVolume" : "var-log"
}
]
environment = [
{
name = "APP_PORT"
value = "80"
]
portMappings = [
{
containerPort = 80
hostPort = 80
protocol = "tcp"
}
]
logConfiguration = {
logDriver = "awsfirelens"
options = {
Name = "cloudwatch"
region = "us-east-1"
enable-ecs-log-metadata = "false"
log_group_name = "/ecs/app"
auto_create_group = "true"
log_stream_name = "$(ecs_task_id)"
retry_limit = "2"
}
}
dependsOn = [
{
"containerName": "log_router",
"condition": "START"
}
]
}
])
volume {
name = "var-log"
}
execution_role_arn = aws_iam_role.app.arn
task_role_arn = aws_iam_role.app.arn
network_mode = "awsvpc"
requires_compatibilities = ["FARGATE"]
cpu = var.fargate_cpu
memory = var.fargate_memory
}
`
Dockerfile from where Fluentbit image is created
`
FROM amazon/aws-for-fluent-bit:latest
ADD fluent-bit.conf /fluent-bit.conf
ADD test.log /test.log
ENV AWS_REGION=us-east-1
ARG AWS_ACCESS_KEY_ID # you could give this a default value as well
ENV AWS_ACCESS_KEY_ID=$AWS_ACCESS_KEY_ID
ARG AWS_SECRET_ACCESS_KEY # you could give this a default value as well
ENV AWS_SECRET_ACCESS_KEY=$AWS_SECRET_ACCESS_KEY
RUN mkdir ~/.aws && cd ~/.aws && touch credentials
RUN echo -e '\
[default]\n\
$AWS_ACCESS_KEY_ID\n\
$AWS_SECRET_ACCESS_KEY\
' > ~/.aws/credentials
`
Fluent-bit.conf
`
[SERVICE]
Flush 5
Deamon off
[INPUT]
# test log
Name tail
Path /opt/app/log/test.log
Tag test
[OUTPUT]
# test log
Name cloudwatch_logs
Match test*
region us-east-1
log_group_name /ecs/app
log_stream_name app-$(ecs_task_id)
auto_create_group true
log_retention_days 90
`
I have been following this docs
https://github.com/aws-samples/amazon-ecs-firelens-under-the-hood/tree/9ecd26e02cb5e13bb5c312c651a3ac601f7f42cd/fluent-bit-log-pipeline
https://docs.fluentbit.io/manual/v/1.0/configuration/file
https://github.com/aws-samples/amazon-ecs-firelens-examples/blob/mainline/examples/fluent-bit/ecs-log-collection/task-definition-tail.json
https://docs.aws.amazon.com/AmazonECS/latest/developerguide/firelens-example-taskdefs.html
I have two log streams created which are part of task-definition and it only forwards stdout logs I need app logs which are not being forwarded.
log streams which are part of fluent-bit config are not created
Que: 1) how does my log router sidecar container reads log from the app containers filesystem, do I have to set anything for that?
2) is my configuration file okay does it need anything else?
3) what m I missing?

Terraform: How to config null_resource with murtiply connection

Suppose that the ec2 module has two server, dynamic created. Like:
module "ec2-web" {
source = "terraform-aws-modules/ec2-instance/aws"
version = "4.1.4"
count = 2
name = "${local.appName}-webserver-${count.index + 1}"
.....
}
Now I have a null_resource config file, which has a connection only:
resource "null_resource" "web-upload" {
depends_on = [module.ec2-web]
connection {
type = "ssh"
host = module.ec2-web[0].public_ip
user = "ec2-user"
password = ""
private_key = file("keypair/a-ssh-key.pem")
timeout = "2m"
}
provisioner "remote-exec" {
inline = [
"sudo mkdir -p /var/www/html",
"sudo chown -R ec2-user:ec2-user /var/www/html",
]
}
provisioner "file" {
source = "web/"
destination = "/var/www/html"
}
}
Now how should I update any config can let finally terraform upload files to both server accordingly?
You would use the same approach with the count meta-argument:
resource "null_resource" "web-upload" {
count = 2
connection {
type = "ssh"
host = module.ec2-web[count.index].public_ip
user = "ec2-user"
password = ""
private_key = file("keypair/a-ssh-key.pem")
timeout = "2m"
}
provisioner "remote-exec" {
inline = [
"sudo mkdir -p /var/www/html",
"sudo chown -R ec2-user:ec2-user /var/www/html",
]
}
provisioner "file" {
source = "web/"
destination = "/var/www/html"
}
}
The explicit dependency using depends_on meta-argument is not required as the reference to the module output is used (module.ec2-web[count.index].public_ip). This means terraform will wait for the module to be done with creating resources prior to attempting the null_resource.

Error: Post "http://localhost/api/v1/namespaces/kube-system/configmaps": dial tcp 127.0.0.1:80

I'm trying to deploy a cluster with self managed node groups. No matter what config options I use, I always come up with the following error:
Error: Post "http://localhost/api/v1/namespaces/kube-system/configmaps": dial tcp 127.0.0.1:80: connect: connection refusedwith module.eks-ssp.kubernetes_config_map.aws_auth[0]on .terraform/modules/eks-ssp/aws-auth-configmap.tf line 19, in resource "kubernetes_config_map" "aws_auth":resource "kubernetes_config_map" "aws_auth" {
​
The .tf file looks like this:
module "eks-ssp" {
source = "github.com/aws-samples/aws-eks-accelerator-for-terraform"
# EKS CLUSTER
tenant = "DevOpsLabs2"
environment = "dev-test"
zone = ""
terraform_version = "Terraform v1.1.4"
# EKS Cluster VPC and Subnet mandatory config
vpc_id = "xxx"
private_subnet_ids = ["xxx","xxx", "xxx", "xxx"]
# EKS CONTROL PLANE VARIABLES
create_eks = true
kubernetes_version = "1.19"
# EKS SELF MANAGED NODE GROUPS
self_managed_node_groups = {
self_mg = {
node_group_name = "DevOpsLabs2"
subnet_ids = ["xxx","xxx", "xxx", "xxx"]
create_launch_template = true
launch_template_os = "bottlerocket" # amazonlinux2eks or bottlerocket or windows
custom_ami_id = "xxx"
public_ip = true # Enable only for public subnets
pre_userdata = <<-EOT
yum install -y amazon-ssm-agent \
systemctl enable amazon-ssm-agent && systemctl start amazon-ssm-agent \
EOT
disk_size = 20
instance_type = "t2.small"
desired_size = 2
max_size = 10
min_size = 2
capacity_type = "" # Optional Use this only for SPOT capacity as capacity_type = "spot"
k8s_labels = {
Environment = "dev-test"
Zone = ""
WorkerType = "SELF_MANAGED_ON_DEMAND"
}
additional_tags = {
ExtraTag = "t2x-on-demand"
Name = "t2x-on-demand"
subnet_type = "public"
}
create_worker_security_group = false # Creates a dedicated sec group for this Node Group
},
}
}
module "eks-ssp-kubernetes-addons" {
source = "github.com/aws-samples/aws-eks-accelerator-for-terraform//modules/kubernetes-addons"
eks_cluster_id = module.eks-ssp.eks_cluster_id
# EKS Addons
enable_amazon_eks_vpc_cni = true
enable_amazon_eks_coredns = true
enable_amazon_eks_kube_proxy = true
enable_amazon_eks_aws_ebs_csi_driver = true
#K8s Add-ons
enable_aws_load_balancer_controller = true
enable_metrics_server = true
enable_cluster_autoscaler = true
enable_aws_for_fluentbit = true
enable_argocd = true
enable_ingress_nginx = true
depends_on = [module.eks-ssp.self_managed_node_groups]
}
Providers:
terraform {
backend "remote" {}
required_providers {
aws = {
source = "hashicorp/aws"
version = ">= 3.66.0"
}
kubernetes = {
source = "hashicorp/kubernetes"
version = ">= 2.6.1"
}
helm = {
source = "hashicorp/helm"
version = ">= 2.4.1"
}
}
}
Based on the example provided in the Github repo [1], my guess is that the provider configuration blocks are missing for this to work as expected. Looking at the code provided in the question, it seems that the following needs to be added:
data "aws_region" "current" {}
data "aws_eks_cluster" "cluster" {
name = module.eks-ssp.eks_cluster_id
}
data "aws_eks_cluster_auth" "cluster" {
name = module.eks-ssp.eks_cluster_id
}
provider "aws" {
region = data.aws_region.current.id
alias = "default" # this should match the named profile you used if at all
}
provider "kubernetes" {
experiments {
manifest_resource = true
}
host = data.aws_eks_cluster.cluster.endpoint
cluster_ca_certificate = base64decode(data.aws_eks_cluster.cluster.certificate_authority.0.data)
token = data.aws_eks_cluster_auth.cluster.token
}
If helm is also required, I think the following block [2] needs to be added as well:
provider "helm" {
kubernetes {
host = data.aws_eks_cluster.cluster.endpoint
token = data.aws_eks_cluster_auth.cluster.token
cluster_ca_certificate = base64decode(data.aws_eks_cluster.cluster.certificate_authority.0.data)
}
}
Provider argument reference for kubernetes and helm is in [3] and [4] respectively.
[1] https://github.com/aws-samples/aws-eks-accelerator-for-terraform/blob/main/examples/eks-cluster-with-self-managed-node-groups/main.tf#L23-L47
[2] https://github.com/aws-samples/aws-eks-accelerator-for-terraform/blob/main/examples/eks-cluster-with-eks-addons/main.tf#L49-L55
[3] https://registry.terraform.io/providers/hashicorp/kubernetes/latest/docs#argument-reference
[4] https://registry.terraform.io/providers/hashicorp/helm/latest/docs#argument-reference
The above answer from Marko E seems to fix / just ran into this issue. After applying the above code, altogether in a separate providers.tf file, terraform now makes it past the error. Will post later as to whether the deployment makes it fully through.
For reference was able to go from 65 resources created down to 42 resources created before I hit this error. This was using the exact best practice / sample configuration recommended at the top of the README from AWS Consulting here: https://github.com/aws-samples/aws-eks-accelerator-for-terraform
In my case i was trying to deploy to the kubernetes cluster(GKE) using Terraform. I have replaced the kubeconfig path with the kubeconfig file's absolute path.
From
provider "kubernetes" {
config_path = "~/.kube/config"
#config_context = "my-context"
}
TO
provider "kubernetes" {
config_path = "/Users/<username>/.kube/config"
#config_context = "my-context"
}

how to run a bash script in gcp vm using terraform

hay folks ,
I want to run a script in gcp machine for that i created a resource below file
disk = google_compute_disk.default2.id
instance = google_compute_instance.default.id
} # aatach disk to vm
resource "google_compute_firewall" "firewall" {
name = "gritfy-firewall-externalssh"
network = "default"
allow {
protocol = "tcp"
ports = ["22"]
}
source_ranges = ["0.0.0.0/0"]
target_tags = ["externalssh"]
} # allow ssh
resource "google_compute_address" "static" {
name = "vm-public-address"
project = "fit-visitor-305606"
region = "asia-south1"
depends_on = [ google_compute_firewall.firewall ]
} # reserve ip
resource "google_compute_instance" "default" {
name = "new"
machine_type = "custom-8-16384"
zone = "asia-south1-a"
tags = ["foo", "bar"]
boot_disk {
initialize_params {
image = "centos-cloud/centos-7"
}
}
network_interface {
network = "default"
access_config {
nat_ip = google_compute_address.static.address
}
}
metadata = {
ssh-keys = "${var.user}:${file(var.publickeypath)}"
}
lifecycle {
ignore_changes = [attached_disk]
}
provisioner "file" {
source = "autoo.sh"
destination = "/tmp/autoo.sh"
}
provisioner "remote-exec" {
connection {
host = google_compute_address.static.address
type = "ssh"
user = var.user
timeout = "500s"
private_key = file(var.privatekeypath)
}
inline = [
"sudo yum -y install epel-release",
"sudo yum -y install nginx",
"sudo nginx -v",
]
}
} # Create VM
resource "google_compute_disk" "default2" {
name = "test-disk"
type = "pd-balanced"
zone = "asia-south1-a"
image = "centos-7-v20210609"
size = 100
} # Create Disk
using this I am able to create VM and disk and also able to attach vm to disk but not able to run my script
error log are =
and private key part is working fine the key is assign to VM and I try to connect with that key it is connected may the problem with the provision part only
any help or guidance would be really helpful...
Like error message says, you need connection configuration for provisioner. Also you need remote-exec provisoner for running scripts.
provisioner "file" {
source = "autoo.sh"
destination = "/tmp/autoo.sh"
connection {
type = "ssh"
user = var.user
private_key = file(var.privatekeypath)
}
}
provisioner "remote-exec" {
inline = [
"chmod +x /tmp/autoo.sh",
"cd /tmp",
"./autoo.sh"
]
connection {
type = "ssh"
user = var.user
private_key = file(var.privatekeypath)
}
source: https://stackoverflow.com/a/36668395/5454632

terraform depends_on for provisioner file

i want data "template_file" in below terraform code to execute after provisioner "file" (basically ansible playbook) is copied to the ec2 instance. I am not able to successfully use "depends_on" in this scenario. Can some one please help me how can i achieve this? below is the sample code snippet.
resource "aws_eip" "opendj-source-ami-eip" {
instance = "${aws_instance.opendj-source-ami-server.id}"
vpc = true
connection {
host = "${aws_eip.opendj-source-ami-eip.public_ip}"
user = "ubuntu"
timeout = "3m"
agent = false
private_key = "${file(var.private_key)}"
}
provisioner "file" {
source = "./${var.copy_password_file}"
destination = "/home/ubuntu/${var.copy_password_file}"
}
provisioner "file" {
source = "./${var.ansible_playbook}"
destination = "/home/ubuntu/${var.ansible_playbook}"
}
}
data "template_file" "run-ansible-playbooks" {
template = <<-EOF
#!/bin/bash
ansible-playbook /home/ubuntu/${var.copy_password_file} && ansible-playbook /home/ubuntu/${var.ansible_playbook}
EOF
#depends_on = ["<< not sure what to put here>>"]
}
The correct format for depends_on is pegged to the resource as a whole; so the format in your case would look like:
data "template_file" "run-ansible-playbooks" {
template = <<-EOF
#!/bin/bash
ansible-playbook /home/ubuntu/${var.copy_password_file} && ansible-playbook /home/ubuntu/${var.ansible_playbook}
EOF
depends_on = ["aws_eip.opendj-source-ami-eip"]
}