I am having this error when I run the terraform Error: Error creating IAM Role s3_access: MalformedPolicyDocument: Has prohibited field Resource status code: 400, What am I missing in IAM role? I am using this role to fetch a certain file from s3. I want to give limited permission to this role like only fetch certain bucket contents
resource "aws_instance" "web" {
count = var.ec2_count
ami = var.ami_id
instance_type = var.instance_type
subnet_id = var.subnet_id
key_name = var.key_name
source_dest_check = false
associate_public_ip_address = true
#user_data = "${file("userdata.sh")}"1
security_groups = [aws_security_group.ec2_sg.id]
user_data = "${file("${path.module}/template/userdata.sh")}"
tags = {
Name = "Webserver"
}
}
resource "aws_iam_role" "s3_access" {
name = "s3_access"
assume_role_policy = <<EOF
{
"Version": "2012-10-17",
"Statement": [
{
"Sid": "1",
"Effect": "Allow",
"Principal": {
"Service": "ec2.amazonaws.com"
},
"Action": [
"s3:ListBucket",
"s3:GetObjectVersion",
"s3:GetObject",
"s3:GetBucketVersioning",
"s3:GetBucketLocation"
],
"Resource": [
"arn:aws:s3:::webserver/*",
"arn:aws:s3:::webserver"
]
}
]
}
EOF
tags = {
tag-key = "tag-value"
}
}
resource "aws_security_group" "ec2_sg" {
name = "ec2-sg"
description = "Allow TLS inbound traffic"
vpc_id = var.vpc_id
ingress {
description = "incoming for ec2-instance"
from_port = 0
to_port = 0
protocol = "-1"
cidr_blocks = ["0.0.0.0/0"]
}
egress {
from_port = 0
to_port = 0
protocol = "-1"
cidr_blocks = ["0.0.0.0/0"]
}
tags = {
Name = "ec2-sg"
}
}
Any type of help would be appreciated. I have tried doing it myself but I am stucked.
resource "aws_iam_role" "s3_access" {
name = "s3_access"
assume_role_policy = <<EOF
{
"Version": "2012-10-17",
"Statement": [
{
"Sid": "1",
"Effect": "Allow",
"Principal": {
"Service": "ec2.amazonaws.com"
},
"Action": "sts:AssumeRole"
}
]
}
EOF
tags = {
tag-key = "tag-value"
}
}
resource "aws_iam_role_policy" "s3_access_policy" {
name = "s3_access_policy"
role = "${aws_iam_role.s3_access.id}"
policy = <<EOF
{
"Version": "2012-10-17",
"Statement": [
{
"Sid": "2",
"Effect": "Allow",
"Action": [
"s3:ListBucket",
"s3:GetObjectVersion",
"s3:GetObject",
"s3:GetBucketVersioning",
"s3:GetBucketLocation"
],
"Resource": [
"arn:aws:s3:::webserver/*",
"arn:aws:s3:::webserver"
]
}
]
}
EOF
}
Related
I have Terraform code that almost successfully builds an AWS Batch Compute Environment with an Fsx file share mount to it.
However, despite passing the aws_fsx_lustre_file_system module a deployment type of PERSISTENT_2:
resource "aws_fsx_lustre_file_system" "storage" {
storage_capacity = 1200
subnet_ids = [var.subnet_id]
deployment_type = "PERSISTENT_2"
per_unit_storage_throughput = 250
}
the Fsx is only spun up at a scratch drive (viewable via AWS management console).
What additional information can I post here to help debug why this Terraform code is not respecting the deployment_type parameter?
Full code:
// ==========================================================
// Module input variables
// ----------------------------------------------------------
variable "region" {
type = string
}
variable "compute_environment_name" {
type = string
}
variable "job_queue_name" {
type = string
}
variable "max_vcpus" {
type = number
}
variable "vpc_id" {
type = string
}
variable "subnet_id" {
type = string
}
variable "security_group_id" {
type = string
}
variable "mounted_storage_bucket" {
type = string
}
// ==========================================================
// Components for batch processing for AWS Batch
// ----------------------------------------------------------
resource "aws_iam_role" "batch_role" {
name = "batch_role"
assume_role_policy = <<EOF
{
"Version": "2012-10-17",
"Statement":
[
{
"Action": "sts:AssumeRole",
"Effect": "Allow",
"Principal": {
"Service": "batch.amazonaws.com"
}
},
{
"Action": "sts:AssumeRole",
"Effect": "Allow",
"Principal": {
"Service": "ec2.amazonaws.com"
}
},
{
"Action": "sts:AssumeRole",
"Effect": "Allow",
"Principal": {
"Service": "ecs.amazonaws.com"
}
},
{
"Action": "sts:AssumeRole",
"Effect": "Allow",
"Principal": {
"Service": "ecs-tasks.amazonaws.com"
}
}
]
}
EOF
tags = {
created-by = "Terraform"
}
}
# Attach the Batch policy to the Batch role
resource "aws_iam_role_policy_attachment" "batch_service_role" {
role = aws_iam_role.batch_role.name
policy_arn = "arn:aws:iam::aws:policy/service-role/AWSBatchServiceRole"
}
resource "aws_iam_role_policy_attachment" "elastic_container_service_role" {
role = aws_iam_role.batch_role.name
policy_arn = "arn:aws:iam::aws:policy/service-role/AmazonEC2ContainerServiceforEC2Role"
}
# Security Group for batch processing
resource "aws_security_group" "batch_security_group" {
name = "batch_security_group"
description = "AWS Batch Security Group for batch jobs"
vpc_id = var.vpc_id
egress {
from_port = 0
to_port = 65535
protocol = "tcp"
cidr_blocks = ["0.0.0.0/0"]
}
tags = {
created-by = "Terraform"
}
}
# IAM Role for underlying EC2 instances
resource "aws_iam_role" "ec2_role" {
name = "ec2_role"
assume_role_policy = <<EOF
{
"Version": "2012-10-17",
"Statement": [
{
"Action": "sts:AssumeRole",
"Principal": {
"Service": "ec2.amazonaws.com"
},
"Effect": "Allow",
"Sid": ""
}
]
}
EOF
tags = {
created-by = "Terraform"
}
}
# Assign the EC2 role to the EC2 profile
resource "aws_iam_instance_profile" "ec2_profile" {
name = "ec2_profile"
role = aws_iam_role.ec2_role.name
}
# Attach the EC2 container service policy to the EC2 role
resource "aws_iam_role_policy_attachment" "ec2_policy_attachment" {
role = aws_iam_role.ec2_role.name
policy_arn = "arn:aws:iam::aws:policy/service-role/AmazonEC2ContainerServiceforEC2Role"
}
# IAM Role for jobs
resource "aws_iam_role" "job_role" {
name = "job_role"
assume_role_policy = <<EOF
{
"Version": "2012-10-17",
"Statement":
[
{
"Action": "sts:AssumeRole",
"Effect": "Allow",
"Principal": {
"Service": "ecs-tasks.amazonaws.com"
}
}
]
}
EOF
tags = {
created-by = "Terraform"
}
}
# S3 read/write policy
resource "aws_iam_policy" "s3_policy" {
name = "s3_policy"
policy = <<EOF
{
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Action": [
"s3:Get*",
"s3:List*",
"s3:Put*"
],
"Resource": [
"arn:aws:s3:::${var.mounted_storage_bucket}",
"arn:aws:s3:::${var.mounted_storage_bucket}/*"
]
}
]
}
EOF
}
resource "aws_iam_policy" "ecs_policy" {
name = "ecs_policy"
policy = <<EOF
{
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Action": [
"ecs:*"
],
"Resource": [
"*"
]
}
]
}
EOF
}
# Attach the policy to the job role
resource "aws_iam_role_policy_attachment" "job_policy_attachment_s3" {
role = aws_iam_role.job_role.name
policy_arn = aws_iam_policy.s3_policy.arn
}
resource "aws_iam_role_policy_attachment" "job_policy_attachment_ecs" {
role = aws_iam_role.job_role.name
policy_arn = aws_iam_policy.ecs_policy.arn
}
resource "aws_fsx_lustre_file_system" "storage" {
storage_capacity = 1200
subnet_ids = [var.subnet_id]
deployment_type = "PERSISTENT_2"
per_unit_storage_throughput = 250
}
resource "aws_fsx_data_repository_association" "storage_association" {
file_system_id = aws_fsx_lustre_file_system.storage.id
data_repository_path = "s3://${var.mounted_storage_bucket}"
file_system_path = "/data/fsx"
s3 {
auto_export_policy {
events = ["NEW", "CHANGED", "DELETED"]
}
auto_import_policy {
events = ["NEW", "CHANGED", "DELETED"]
}
}
}
resource "aws_launch_template" "launch_template" {
name = "launch_template"
update_default_version = true
user_data = base64encode(<<EOF
MIME-Version: 1.0
Content-Type: multipart/mixed; boundary="==MYBOUNDARY=="
--==MYBOUNDARY==
Content-Type: text/cloud-config; charset="us-ascii"
runcmd:
- region=${var.region}
- amazon-linux-extras install -y lustre2.10
- mkdir -p /data/fsx
- mount -t lustre ${aws_fsx_lustre_file_system.storage.dns_name}#tcp:fsx" /data/fsx
--==MYBOUNDARY==--
EOF
)
}
// ==========================================================
// Batch setup
// - compute environment
// - job queue
// ----------------------------------------------------------
resource "aws_iam_role" "ecs_instance_role" {
name = "ecs_instance_role"
assume_role_policy = <<EOF
{
"Version": "2012-10-17",
"Statement": [
{
"Action": "sts:AssumeRole",
"Effect": "Allow",
"Principal": {
"Service": "ec2.amazonaws.com"
}
}
]
}
EOF
}
resource "aws_iam_role_policy_attachment" "ecs_instance_role" {
role = "${aws_iam_role.ecs_instance_role.name}"
policy_arn = "arn:aws:iam::aws:policy/service-role/AmazonEC2ContainerServiceforEC2Role"
}
resource "aws_iam_instance_profile" "ecs_instance_role" {
name = "ecs_instance_role"
role = "${aws_iam_role.ecs_instance_role.name}"
}
resource "aws_iam_role" "aws_batch_service_role" {
name = "aws_batch_service_role"
assume_role_policy = <<EOF
{
"Version": "2012-10-17",
"Statement": [
{
"Action": "sts:AssumeRole",
"Effect": "Allow",
"Principal": {
"Service": "batch.amazonaws.com"
}
}
]
}
EOF
}
resource "aws_iam_role_policy_attachment" "aws_batch_service_role" {
role = "${aws_iam_role.aws_batch_service_role.name}"
policy_arn = "arn:aws:iam::aws:policy/service-role/AWSBatchServiceRole"
}
resource "aws_batch_compute_environment" "batch_environment" {
compute_environment_name = var.compute_environment_name
compute_resources {
instance_role = "${aws_iam_instance_profile.ecs_instance_role.arn}"
launch_template {
launch_template_name = aws_launch_template.launch_template.name
version = "$Latest"
}
instance_type = [
"c6g.large",
"c6g.xlarge",
"c6g.2xlarge",
"c6g.4xlarge",
"c6g.8xlarge",
"c6g.12xlarge"
]
max_vcpus = 16
min_vcpus = 0
security_group_ids = [
aws_security_group.batch_security_group.id,
]
subnets = [
var.subnet_id
]
type = "EC2"
}
service_role = "${aws_iam_role.aws_batch_service_role.arn}"
type = "MANAGED"
depends_on = [aws_iam_role_policy_attachment.aws_batch_service_role]
tags = {
created-by = "Terraform"
}
}
resource "aws_batch_job_queue" "job_queue" {
name = "job_queue"
state = "ENABLED"
priority = 1
compute_environments = [
aws_batch_compute_environment.batch_environment.arn
]
depends_on = [aws_batch_compute_environment.batch_environment]
tags = {
created-by = "Terraform"
}
}
output "batch_compute_environment_id" {
value = aws_batch_compute_environment.batch_environment.id
}
output "batch_job_queue_id" {
value = aws_batch_job_queue.job_queue.id
}
output "batch_storage_mount_target" {
value = aws_fsx_lustre_file_system.storage.arn
}
output "batch_storage_mount_target_mount" {
value = aws_fsx_lustre_file_system.storage.mount_name
}
I created EKS cluster via terraform:
resource "aws_eks_cluster" "eks-cluster" {
name = "tf-example"
role_arn = aws_iam_role.eks_role.arn
vpc_config {
subnet_ids = var.subnet_ids
}
depends_on = [
aws_iam_role_policy_attachment.eks-cluster-policy,
aws_iam_role_policy_attachment.eks-cluster-security-group-policy
]
}
resource "aws_eks_node_group" "eks-node-group" {
cluster_name = aws_eks_cluster.eks-cluster.name
instance_types = var.instance_types
node_group_name = "tf-example"
node_role_arn = aws_iam_role.eks-node-group.arn
subnet_ids = var.subnet_ids
scaling_config {
desired_size = 1
max_size = 1
min_size = 1
}
update_config {
max_unavailable = 1
}
depends_on = [
aws_iam_role_policy_attachment.eks-node-group-worker-node-policy,
aws_iam_role_policy_attachment.eks-node-group-cni-policy,
aws_iam_role_policy_attachment.eks-node-group-registry-read-only-policy
]
}
The IAM roles and policies look like
resource "aws_iam_role" "eks_role" {
name = "tf-${var.stack_name}-eks-cluster-role"
assume_role_policy = <<POLICY
{
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Principal": {
"Service": "eks.amazonaws.com",
},
"Action": "sts:AssumeRole"
}
]
}
POLICY
}
resource "aws_iam_role" "eks-node-group" {
name = "tf-${var.stack_name}-eks-node-group-role"
assume_role_policy = <<POLICY
{
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Principal": {
"Service": [
"eks.amazonaws.com",
"ec2.amazonaws.com"
]
},
"Action": "sts:AssumeRole"
}
]
}
POLICY
}
When I run the aws eks update-kubeconfig --name cluster_name --region region_name
I get
error: You must be logged in to the server (Unauthorized)
I'm new to AWS and used to GCP. What policies does my user need? What role do I need to even run any kubectl command?
I reference the code at https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_instance_profile, I created iam.tf file. I tried to attach the policy to an ec2 instance. I got an error:
aws_iam_role.role: Creating...
Error: failed creating IAM Role (jenkins_server_role): MalformedPolicyDocument: Has prohibited field Resource
status code: 400, request id: c2b8db57-357f-4657-a692-a3e6026a6b7b
with aws_iam_role.role,
on iam.tf line 6, in resource "aws_iam_role" "role":
6: resource "aws_iam_role" "role" Releasing state lock. This may take a few moments...
ERRO[0011] Terraform invocation failed in /home/pluo/works/infra/jenkins
ERRO[0011] 1 error occurred:
* exit status 1
Here is the iam.tf:
resource "aws_iam_instance_profile" "jenkins_server" {
name = "jenkins_server"
role = aws_iam_role.role.name
}
resource "aws_iam_role" "role" {
name = "jenkins_server_role"
path = "/"
assume_role_policy = <<EOF
{
"Version": "2012-10-17",
"Statement": [
{
"Action": "ec2:*",
"Effect": "Allow",
"Resource": "*"
},
{
"Effect": "Allow",
"Action": "elasticloadbalancing:*",
"Resource": "*"
},
{
"Effect": "Allow",
"Action": "cloudwatch:*",
"Resource": "*"
},
{
"Effect": "Allow",
"Action": "autoscaling:*",
"Resource": "*"
},
{
"Effect": "Allow",
"Action": "iam:CreateServiceLinkedRole",
"Resource": "*",
"Condition": {
"StringEquals": {
"iam:AWSServiceName": [
"autoscaling.amazonaws.com",
"ec2scheduled.amazonaws.com",
"elasticloadbalancing.amazonaws.com",
"spot.amazonaws.com",
"spotfleet.amazonaws.com",
"transitgateway.amazonaws.com"
]
}
}
}
]
}
EOF
}
Here is the module to create ec2 instance.
module "ec2" {
source = "terraform-aws-modules/ec2-instance/aws"
version = "4.1.4"
name = var.ec2_name
ami = var.ami
instance_type = var.instance_type
availability_zone = var.availability_zone
subnet_id = data.terraform_remote_state.vpc.outputs.public_subnets[0]
vpc_security_group_ids = [aws_security_group.WebServerSG.id]
associate_public_ip_address = true
key_name = var.key_name
monitoring = true
iam_instance_profile = aws_iam_instance_profile.jenkins_server.name
enable_volume_tags = false
root_block_device = [
{
encrypted = true
volume_type = "gp3"
throughput = 200
volume_size = 100
tags = {
Name = "jenkins_server"
}
},
]
tags = {
Name = "WebServerSG"
}
}
Your assume_role_policy is incorrect. For ec2 instances it should be:
{
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Principal": { "Service": "ec2.amazonaws.com"},
"Action": "sts:AssumeRole"
}
]
}
Then you current assume_role_policy should be written in aws_iam_role_policy instead.
Just to provide more information on top of the good answer from Marcin. In AWS IAM roles there are two kinds of policies you might specify (which at first might be confusing):
assume role policy - i.e. "who"/what can assume this role, as Marcin mentions here you would like to specify that EC2 instances can assume this role - i.e. act in this role
role policy - i.e. what can this role do; in your case it will be all these elasticloadbalancing, cloudwatch, etc.
So, putting that into Terraform perspective:
the former should go to assume_role_policy of a aws_iam_role
the latter should go to a separate resource "iam_role_policy"
I'm trying to figure out a "one command" approach to spin up spot instances with terraform.
I've managed to get this far:
resource "aws_iam_role" "spot_role" {
name_prefix = "spot_role_"
assume_role_policy = <<-EOF
{
"Version": "2012-10-17",
"Statement": [
{
"Action": "sts:AssumeRole",
"Principal": {
"Service": "spotfleet.amazonaws.com"
},
"Effect": "Allow",
"Sid": ""
}
]
}
EOF
}
resource "aws_iam_policy" "spot_policy" {
name_prefix = "spot_policy_"
description = "EC2 Spot Fleet Policy"
policy = <<EOF
{
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Action": [
"ec2:DescribeImages",
"ec2:DescribeSubnets",
"ec2:RequestSpotInstances",
"ec2:TerminateInstances",
"ec2:DescribeInstanceStatus",
"ec2:CreateTags",
"ec2:RunInstances"
],
"Resource": [
"*"
]
},
{
"Effect": "Allow",
"Action": "iam:PassRole",
"Condition": {
"StringEquals": {
"iam:PassedToService": [
"ec2.amazonaws.com",
"ec2.amazonaws.com.cn"
]
}
},
"Resource": [
"*"
]
},
{
"Effect": "Allow",
"Action": [
"elasticloadbalancing:RegisterInstancesWithLoadBalancer"
],
"Resource": [
"arn:aws:elasticloadbalancing:*:*:loadbalancer/*"
]
},
{
"Effect": "Allow",
"Action": [
"elasticloadbalancing:RegisterTargets"
],
"Resource": [
"arn:aws:elasticloadbalancing:*:*:*/*"
]
}
]
}
EOF
}
locals {
role = aws_iam_role.spot_role
zone = join("", [var.region, "a"])
}
// -----------------------------------------------------------------------------
resource "aws_iam_role_policy_attachment" "spot_role_policy_attachment" {
role = local.role.name
policy_arn = aws_iam_policy.spot_policy.arn
}
// -----------------------------------------------------------------------------
resource "aws_security_group" "spot_security_group" {
name = "allow_ssh"
description = "Allow SSH inbound traffic"
vpc_id = aws_vpc.spot_vpc.id
ingress {
description = "SSH from anywhere"
from_port = 22
to_port = 22
protocol = "tcp"
cidr_blocks = ["0.0.0.0/0"]
}
egress {
from_port = 0
to_port = 0
protocol = "-1"
cidr_blocks = ["0.0.0.0/0"]
}
tags = {
Name = "allow_ssh"
}
}
resource "aws_vpc" "spot_vpc" {
cidr_block = "10.0.0.0/16"
tags = {
Name = "spot_vpc"
}
}
resource "aws_subnet" "spot_subnet" {
vpc_id = aws_vpc.spot_vpc.id
cidr_block = "10.0.0.0/24"
map_public_ip_on_launch = true
availability_zone = local.zone
tags = {
Name = "spot_subnet"
}
}
resource "aws_spot_fleet_request" "spot_fleet_request" {
iam_fleet_role = local.role.arn
spot_price = "0.022"
target_capacity = 1
launch_specification {
instance_type = "r6g.medium"
ami = var.ami
key_name = var.key_name
subnet_id = aws_subnet.spot_subnet.id
availability_zone = local.zone
}
launch_specification {
instance_type = "a1.large"
ami = var.ami
key_name = var.key_name
subnet_id = aws_subnet.spot_subnet.id
availability_zone = local.zone
}
}
// -----------------------------------------------------------------------------
output "spot_fleet_request_id" {
value = aws_spot_fleet_request.spot_fleet_request.id
}
But I get the following error in the Spot Requests -> Details view:
spotFleetRequestConfigurationInvalid
r6g.medium, ami-0c582118883b46f4f, Linux/UNIX (Amazon VPC): The provided credentials do not have permission to create the service-linked role for EC2 Spot Instances.
EDIT
aws iam create-service-linked-role --aws-service-name spot.amazonaws.com
An error occurred (InvalidInput) when calling the CreateServiceLinkedRole operation: Service role name AWSServiceRoleForEC2Spot has been taken in this account, please try a different suffix.
arn:aws:iam::20XXXXXXX22:role/aws-service-role/spot.amazonaws.com/AWSServiceRoleForEC2Spot has the following AWSEC2SpotServiceRolePolicy attached
{
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Action": [
"ec2:DescribeInstances",
"ec2:StartInstances",
"ec2:StopInstances",
"ec2:RunInstances"
],
"Resource": [
"*"
]
},
{
"Effect": "Deny",
"Action": [
"ec2:RunInstances"
],
"Resource": [
"arn:aws:ec2:*:*:instance/*"
],
"Condition": {
"StringNotEquals": {
"ec2:InstanceMarketType": "spot"
}
}
},
{
"Effect": "Allow",
"Action": [
"iam:PassRole"
],
"Resource": [
"*"
],
"Condition": {
"StringEquals": {
"iam:PassedToService": [
"ec2.amazonaws.com",
"ec2.amazonaws.com.cn"
]
}
}
},
{
"Effect": "Allow",
"Action": [
"ec2:CreateTags"
],
"Resource": "*",
"Condition": {
"StringEquals": {
"ec2:CreateAction": "RunInstances"
}
}
}
]
}
I'm trying to attach an IAM roles to EC2 instances (not ECS) so they can pull images from ECR.
Do something like this. Note you may want to limit which ECR repos are accessible.
resource "aws_instance" "test" {
...
}
resource "aws_launch_configuration" "ecs_cluster" {
...
iam_instance_profile = "${aws_iam_instance_profile.test.id}"
}
resource "aws_iam_role" "test" {
name = "test_role"
assume_role_policy = "..."
}
resource "aws_iam_instance_profile" "test" {
name = "ec2-instance-profile"
role = "${aws_iam_role.test.name}"
}
resource "aws_iam_role_policy_attachment" "test" {
role = "${aws_iam_role.test.name}"
policy_arn = "${aws_iam_policy.test.arn}"
}
resource "aws_iam_policy" "test" {
name = "ec2-instance-pulls-from-ecr"
description = "EC2 instance can pull from ECR"
policy = <<EOF
{
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Action": [
"ecr:GetAuthorizationToken",
"ecr:BatchCheckLayerAvailability",
"ecr:GetDownloadUrlForLayer",
"ecr:BatchGetImage"
],
"Resource": "*"
}
]
}
EOF
}
This is known to work in Terraform v0.11.13
cluster.tf
locals {
cluster_name = "cluster-${terraform.workspace}"
}
resource "aws_iam_role_policy" "cluster_member" {
name = "${local.cluster_name}"
role = "${aws_iam_role.cluster_member.id}"
policy = <<EOF
{
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Action": [
"ecs:UpdateContainerInstancesState",
"ecs:DeregisterContainerInstance",
"ecs:DiscoverPollEndpoint",
"ecs:Poll",
"ecs:RegisterContainerInstance",
"ecs:StartTelemetrySession",
"ecs:Submit*",
"ecr:GetAuthorizationToken",
"ecr:BatchCheckLayerAvailability",
"ecr:GetDownloadUrlForLayer",
"ecr:BatchGetImage",
"logs:CreateLogStream",
"logs:PutLogEvents"
],
"Resource": "*"
}
]
}
EOF
}
resource "aws_iam_role" "cluster_member" {
name = "${local.cluster_name}"
assume_role_policy = <<EOF
{
"Version": "2012-10-17",
"Statement": [
{
"Action": "sts:AssumeRole",
"Effect": "Allow",
"Principal": {
"Service": "ec2.amazonaws.com"
},
"Sid": ""
}
]
}
EOF
}
resource "aws_iam_instance_profile" "cluster_member" {
name = "${local.cluster_name}"
role = "${aws_iam_role.cluster_member.name}"
}
data "template_file" "cloud_config" {
template = "${file("${path.module}/templates/user_data.sh")}"
vars {
ecs_cluster = "${local.cluster_name}"
}
}
resource "aws_instance" "cluster_member" {
# http://docs.aws.amazon.com/AmazonECS/latest/developerguide/instance_IAM_role.html
iam_instance_profile = "${aws_iam_instance_profile.cluster_member.name}"
user_data = "${data.template_file.cloud_config.rendered}"
}
templates/user_data.sh
#!/bin/bash
# See https://docs.aws.amazon.com/AmazonECS/latest/developerguide/launch_container_instance.html
cat <<'EOF' >> /etc/ecs/ecs.config
ECS_CLUSTER=${ecs_cluster}
EOF
Answer Given by #Eric M. Johnson is correct, Just for the completeness,
resource "aws_launch_configuration" "ecs_launch_configuration" {
name = "${var.application_name}-${var.stack}"
image_id = var.image_id
instance_type = var.instance_type
iam_instance_profile = aws_iam_instance_profile.esc_launch_configuration_iam_instance_profile.arn
security_groups = split(",",aws_security_group.ecs_launch_configuration_security_group.id)
associate_public_ip_address = "true"
// key_name = "${var.ecs-key-pair-name}"
user_data = data.template_file.user_data.rendered
}
data "template_file" "user_data" {
template = file("${path.module}/ec2/user-data.sh")
vars = {
ecs_cluster_name = "${var.application_name}-${var.stack}"
}
}
resource "aws_iam_instance_profile" "esc_launch_configuration_iam_instance_profile" {
name = "${var.application_name}-${var.stack}"
role = aws_iam_role.iam_role.name
}
resource "aws_iam_role" "iam_role" {
name = "${var.application_name}-${var.stack}"
force_detach_policies = true
assume_role_policy = <<EOF
{
"Version": "2012-10-17",
"Statement": [
{
"Action": "sts:AssumeRole",
"Principal": {
"Service": [
"ec2.amazonaws.com",
"ecs.amazonaws.com"
]
},
"Effect": "Allow",
"Sid": ""
}
]
}
EOF
}
resource "aws_iam_role_policy_attachment" "iam_role_policy_attachment" {
role = aws_iam_role.iam_role.name
policy_arn = aws_iam_policy.ecs_iam_policy.arn
}
resource "aws_iam_policy" "ecs_iam_policy" {
name = "${var.application_name}-${var.stack}"
description = "EC2 instance can pull from ECR"
policy = <<EOF
{
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Action": [
"ec2:DescribeTags",
"ecs:CreateCluster",
"ecs:DeregisterContainerInstance",
"ecs:DiscoverPollEndpoint",
"ecs:Poll",
"ecs:RegisterContainerInstance",
"ecs:StartTelemetrySession",
"ecs:UpdateContainerInstancesState",
"ecs:Submit*",
"ecr:GetAuthorizationToken",
"ecr:BatchCheckLayerAvailability",
"ecr:GetDownloadUrlForLayer",
"ecr:BatchGetImage",
"logs:CreateLogStream",
"logs:PutLogEvents"
],
"Resource": "*"
}
]
}
EOF
}