I am working on an Amazon S3 replication using terraform . I want to enable rule "Repilcate modification sync" but I don't think so it is defined in terraform .
Right now my code looks :
replication_configuration {
role = "${aws_iam_role.source_replication.arn}"
rules {
id = "${local.replication_name}"
status = "Enabled"
prefix = "${var.replicate_prefix}"
destination {
bucket = "${local.dest_bucket_arn}"
storage_class = "STANDARD"
access_control_translation = {
owner = "Destination"
}
account_id = "${data.aws_caller_identity.dest.account_id}"
}
source_selection_criteria {
replica_modifications {
Status = "Enabled"
}
}
}
}
It gives an error :
Error: Unsupported block type
on s3_bucket.tf line 61, in resource "aws_s3_bucket" "bucket":
61: replica_modifications {
Blocks of type "replica_modifications" are not expected here.
The rules which I have to enable looks like this in console.
With AWS CLI in terraform , I am not sure how can I use variables like destination ${local.dest_bucket_arn} and ${aws_iam_role.source_replication.arn} in my son file which I am calling.
resource "null_resource" "awsrepl" {
# ...
provisioner "local-exec" {
command = "aws s3api put-bucket-replication --replication-configuration templatefile://replication_source.json --bucket ${var.bucket_name}"
}
}
replication_source.json looks like :
{
"Rules": [
{
"Status": "Enabled",
"DeleteMarkerReplication": { "Status": "Enabled" },
"SourceSelectionCriteria": {
"ReplicaModifications":{
"Status": "Enabled"
}
},
"Destination": {
"Bucket": "${local.dest_bucket_arn}"
},
"Priority": 1
}
],
"Role": "${aws_iam_role.source_replication.arn}"
}
You are correct. It is not yet supported, but there is a GitHub issue for that already:
Amazon S3 Two-way Replication via Replica Modification Sync
By the way, Delete marker replication is also not supported.
Your options are to either do it manually after you deploy your bucket, or use local-exec to run AWS CLI to do it, or aws_lambda_invocation.
Was able to achieve this using local-exec and temmplate_file in terraform :
data "template_file" "replication_dest" {
template = "${file("replication_dest.json")}"
vars = {
srcarn = "${aws_s3_bucket.bucket.arn}"
destrolearn = "${aws_iam_role.dest_replication.arn}"
kmskey = "${data.aws_caller_identity.current.account_id}"
keyalias = "${data.aws_kms_key.s3.key_id}"
srcregion = "${data.aws_region.active.name}"
}
}
resource "null_resource" "awsdestrepl" {
# ...
provisioner "local-exec" {
command = "aws s3api put-bucket-replication --bucket ${aws_s3_bucket.dest.bucket} --replication-configuration ${data.template_file.replication_dest.rendered}"
}
depends_on = [aws_s3_bucket.dest]
}
And replication_dest.json looks like this :
"{
\"Rules\": [
{
\"Status\": \"Enabled\",
\"DeleteMarkerReplication\": { \"Status\": \"Enabled\" },
\"Filter\": {\"Prefix\": \"\"},
\"SourceSelectionCriteria\": {
\"ReplicaModifications\":{
\"Status\": \"Enabled\"
},
\"SseKmsEncryptedObjects\":{
\"Status\": \"Enabled\"
}
},
\"Destination\": {
\"Bucket\": \"${bucketarn}\",
\"EncryptionConfiguration\": {
\"ReplicaKmsKeyID\": \"arn:aws:kms:${destregion}:${kmskey}:${keyalias}\"
}
},
\"Priority\": 1
}
],
\"Role\": \"${rolearn}\"
}"
And you are good to go . :)
Related
I'm trying to deploy some event rules using Terraform. From what I've seen in the docs, my (JSON) format is fine. I can't figure out why it's throwing that error.
resource "aws_kinesis_firehose_delivery_stream" "kinesis_stream" {
name = var.delivery_stream_name
destination = "s3"
s3_configuration {
role_arn = aws_iam_role.kinesis_data_firehose_role.arn
bucket_arn = aws_s3_bucket.s3_bucket.arn
}
}
resource "aws_cloudwatch_event_rule" "successful_sign_in_rule" {
description = "Auth0 User Successfully signed in"
event_bus_name = aws_cloudwatch_event_bus.event_bridge_event_bus.arn
event_pattern = <<EOF
{
"detail-type": [
"s"
]
}
EOF
}
resource "aws_cloudwatch_event_target" "successful_sign_in_rule_target" {
rule = aws_cloudwatch_event_rule.successful_sign_in_rule.name
arn = aws_kinesis_firehose_delivery_stream.kinesis_stream.arn
}
I have terraformed a stack from dynamodb -> aws glue -> athena, I can see all the columns have been created in aws glue and the table exists there but when looking at athena it seems only the database is there and even though when querying the database the tabled schema and columns exist the queries do not work.
SELECT tenant, COUNT(DISTINCT id) counts
FROM "account-profiles-glue-db"."account_profiles"
group by tenant
the above query fails:
my tf looks like:
locals {
table-name = var.table-name
athena-results-s3-name = "${local.table-name}-analytics"
athena-workgroup-name = "${local.table-name}"
glue-db-name = "${local.table-name}-glue-db"
glue-crawler-name = "${local.table-name}-crawler"
glue-crawler-role-name = "${local.table-name}-crawler-role"
glue-crawler-policy-name = "${local.table-name}-crawler"
}
resource "aws_kms_key" "aws_kms_key" {
description = "KMS key for whole project"
deletion_window_in_days = 10
}
##################################################################
# glue
##################################################################
resource "aws_glue_catalog_database" "aws_glue_catalog_database" {
name = local.glue-db-name
}
resource "aws_glue_crawler" "aws_glue_crawler" {
database_name = aws_glue_catalog_database.aws_glue_catalog_database.name
name = local.glue-crawler-name
role = aws_iam_role.aws_iam_role_glue_crawler.arn
configuration = jsonencode(
{
"Version" : 1.0
CrawlerOutput = {
Partitions = { AddOrUpdateBehavior = "InheritFromTable" }
}
}
)
dynamodb_target {
path = local.table-name
}
}
resource "aws_iam_role" "aws_iam_role_glue_crawler" {
name = local.glue-crawler-role-name
assume_role_policy = jsonencode(
{
"Version" : "2012-10-17",
"Statement" : [
{
"Action" : "sts:AssumeRole",
"Principal" : {
"Service" : "glue.amazonaws.com"
},
"Effect" : "Allow",
"Sid" : ""
}
]
}
)
}
resource "aws_iam_role_policy" "aws_iam_role_policy_glue_crawler" {
name = local.glue-crawler-policy-name
role = aws_iam_role.aws_iam_role_glue_crawler.id
policy = jsonencode(
{
"Version" : "2012-10-17",
"Statement" : [
{
"Effect" : "Allow",
"Action" : [
"*"
],
"Resource" : [
"*"
]
}
]
}
)
}
##################################################################
# athena
##################################################################
resource "aws_s3_bucket" "aws_s3_bucket_analytics" {
bucket = local.athena-results-s3-name
acl = "private"
versioning {
enabled = true
}
server_side_encryption_configuration {
rule {
apply_server_side_encryption_by_default {
kms_master_key_id = aws_kms_key.aws_kms_key.arn
sse_algorithm = "aws:kms"
}
}
}
}
resource "aws_athena_workgroup" "aws_athena_workgroup" {
name = local.athena-workgroup-name
configuration {
enforce_workgroup_configuration = true
publish_cloudwatch_metrics_enabled = true
result_configuration {
output_location = "s3://${aws_s3_bucket.aws_s3_bucket_analytics.bucket}/output/"
encryption_configuration {
encryption_option = "SSE_KMS"
kms_key_arn = aws_kms_key.aws_kms_key.arn
}
}
}
}
Looking at the Terraform you provided and the Glue Documentation on AWS, you are only crawling the DynamoDB table, you aren't triggering any jobs for it. The Glue jobs are where you run your business logic to transform and load the data. This is where you would declare to send your source data to S3 to be read by Athena.
If you need help generating the code for your Glue job, I would recommend using the Glue Studio which has a visual editor that will also generate your code. You can select your source, destination, and any transforms you need. At that point, you can use the Terraform glue_job resource and reference the script that you generated in the Glue Studio.
Unless you are needing to perform some ETL on the data, consider either connecting Athena directly to DynamoDB with the Athena-DynamoDB-Connector provided in the AWSLabs GitHub. You can also export your DynamoDB data to S3 and then connect Athena to that S3 bucket.
I just created an AWS ECS cluster and task definition and ran it all just fine. I was able to connect to the server. The task is running on Fargate and runs on demand. I am now attempting to create a Lambda that will run the RunTask command to start the server. Here is my Lambda definition in Terraform.
data "aws_iam_policy_document" "startup_lambda_assume_role" {
statement {
effect = "Allow"
actions = ["sts:AssumeRole"]
principals {
type = "Service"
identifiers = ["lambda.amazonaws.com"]
}
}
}
resource "aws_iam_role" "startup_lambda" {
name = "report_lambda_role"
assume_role_policy = data.aws_iam_policy_document.startup_lambda_assume_role.json
}
resource "aws_cloudwatch_log_group" "startup_lambda" {
name = "/aws/lambda/${aws_lambda_function.startup.function_name}"
retention_in_days = 14
}
data "aws_iam_policy_document" "startup_lambda" {
statement {
effect = "Allow"
actions = [
"logs:CreateLogStream",
"logs:CreateLogGroup",
]
resources = [aws_cloudwatch_log_group.startup_lambda.arn]
}
statement {
effect = "Allow"
actions = ["logs:PutLogEvents"]
resources = ["${aws_cloudwatch_log_group.startup_lambda.arn}:*"]
}
statement {
effect = "Allow"
actions = [
"ecs:RunTask",
]
resources = [
aws_ecs_task_definition.game.arn
]
}
statement {
effect = "Allow"
actions = [
"iam:PassRole",
]
resources = [
aws_iam_role.ecs_task_execution.arn,
aws_iam_role.game_task.arn
]
}
}
resource "aws_iam_role_policy" "startup_lambda" {
name = "startup_lambda_policy"
policy = data.aws_iam_policy_document.startup_lambda.json
role = aws_iam_role.startup_lambda.id
}
data "archive_file" "startup_lambda" {
type = "zip"
source_file = "${path.module}/startup/lambda_handler.py"
output_path = "${path.module}/startup/lambda_handler.zip"
}
resource "aws_lambda_function" "startup" {
function_name = "startup_lambda"
filename = data.archive_file.startup_lambda.output_path
handler = "lambda_handler.handler"
source_code_hash = data.archive_file.startup_lambda.output_base64sha256
runtime = "python3.8"
role = aws_iam_role.startup_lambda.arn
environment {
variables = {
CLUSTER_ARN = aws_ecs_cluster.game.arn,
TASK_ARN = aws_ecs_cluster.game.arn,
SUBNET_IDS = "${aws_subnet.subnet_a.id},${aws_subnet.subnet_b.id},${aws_subnet.subnet_c.id}"
}
}
}
This is my Python code located in startup/lambda_handler.py which does appear properly as the code for the function when I checked in the AWS console.
import os
import boto3
def handler (event, callback):
client = boto3.client("ecs")
response = client.run_task(
cluster = os.getenv("CLUSTER_ARN"),
taskDefinition = os.getenv("TASK_ARN"),
launchType = "FARGATE",
count = 1,
networkConfiguration = {
"awsvpcConfiguration": {
"subnets": os.getenv("SUBNET_IDS", "").split(","),
"assignPublicIp": "ENABLED",
},
},
)
When I run a test of the Lambda function in the console using an empty JSON object as an argument, I expect to see my ECS task spin up, but instead I get the following error.
Response
{
"errorMessage": "An error occurred (AccessDeniedException) when calling the RunTask operation: User: arn:aws:sts::703606424838:assumed-role/report_lambda_role/startup_lambda is not authorized to perform: ecs:RunTask on resource: * because no identity-based policy allows the ecs:RunTask action",
"errorType": "AccessDeniedException",
"stackTrace": [
" File \"/var/task/lambda_handler.py\", line 6, in handler\n response = client.run_task(\n",
" File \"/var/runtime/botocore/client.py\", line 386, in _api_call\n return self._make_api_call(operation_name, kwargs)\n",
" File \"/var/runtime/botocore/client.py\", line 705, in _make_api_call\n raise error_class(parsed_response, operation_name)\n"
]
}
Notice that I do have a statement for ecs:RunTask allowed on my task defintion in the IAM policy document attached to my Lambda. I am not sure why this doesn't give the Lambda permission to run the task.
The TASK_ARN you pass to your lambda container is wrong. Should probably be aws_ecs_task_definition.game.arn instead of a duplicate aws_ecs_cluster.game.arn.
I want to create a policy so a specific aws role (not in the same account) let's say arn:aws:iam::123123123123:role/sns-read-role can subscribe and receive messages from my SNS topic in AWS.
From the official terraform docs about aws_sns_topic_policy example it would be
resource "aws_sns_topic" "test" {
name = "my-topic-with-policy"
}
resource "aws_sns_topic_policy" "default" {
arn = aws_sns_topic.test.arn
policy = data.aws_iam_policy_document.sns_topic_policy.json
}
data "aws_iam_policy_document" "sns_topic_policy" {
statement {
actions = [
"SNS:Subscribe",
"SNS:Receive"
]
condition {
test = "StringEquals"
variable = "AWS:SourceOwner"
values = [
123123123123
]
}
effect = "Allow"
principals {
type = "AWS"
identifiers = ["*"]
}
resources = [
aws_sns_topic.test.arn
]
}
}
But this would translate to arn:aws:iam::123123123123:root and filter only on account-id.
From AWS JSON policy elements: Principal I understand the AWS syntax is
"Principal": { "AWS": "arn:aws:iam::AWS-account-ID:role/role-name" }
Adding the role in the condition like this
condition {
test = "StringEquals"
variable = "AWS:SourceOwner"
values = [
arn:aws:iam::123123123123:role/sns-read-role
]
}
does not work.
It would make sense to add the role to the principal like this
principals {
type = "AWS"
identifiers = ["arn:aws:iam::123123123123:role/sns-read-role"]
}
When I try to subscribe, I get an AuthorizationError: "Couldn't subscribe to topic..."
Do I need the condition together with the principal? Why even bother with the condition if you can use the principal in the first place?
After some experimenting, I found that I don't need the condition. This works for me:
resource "aws_sns_topic" "test" {
name = "my-topic-with-policy"
}
resource "aws_sns_topic_policy" "default" {
arn = aws_sns_topic.test.arn
policy = data.aws_iam_policy_document.sns_topic_policy.json
}
data "aws_iam_policy_document" "sns_topic_policy" {
statement {
actions = [
"SNS:Subscribe",
"SNS:Receive"
]
effect = "Allow"
principals {
type = "AWS"
identifiers = [
"arn:aws:iam::123123123123:role/sns-read-role"
]
}
resources = [
aws_sns_topic.test.arn
]
}
}
In case you want to use parameters for your module:
principals {
type = "AWS"
identifiers = [
"${var.account_arn}:role/${var.role}"
]
}
I want to deploy Cloud Function by Terraform but it fails.
export TF_LOG=DEBUG
terraform init
terraform plan # it does not fail
terraform apply # this fail
{
"error": {
"code": 400,
"message": "The request has errors",
"errors": [
{
"message": "The request has errors",
"domain": "global",
"reason": "badRequest"
}
],
"status": "INVALID_ARGUMENT"
}
}
What I tired
I tried to change the trigger to HTTP but the deployment also failed.
enable TF_LOG
do terraform plan but it succeeded
terraform template
below is my main.tf file
resource "google_pubsub_topic" "topic" {
name = "rss-webhook-topic"
project = "${var.project_id}"
}
resource "google_cloudfunctions_function" "function" {
name = "rss-webhook-function"
entry_point = "helloGET"
available_memory_mb = 256
project = "${var.project_id}"
event_trigger {
event_type = "google.pubsub.topic.publish"
resource = "${google_pubsub_topic.topic.name}"
}
source_archive_bucket = "${var.bucket_name}"
source_archive_object = "${google_storage_bucket_object.archive.name}"
}
data "archive_file" "function_src" {
type = "zip"
output_path = "function_src.zip"
source {
content = "${file("src/index.js")}"
filename = "index.js"
}
}
resource "google_storage_bucket_object" "archive" {
name = "function_src.zip"
bucket = "${var.bucket_name}"
source = "function_src.zip"
depends_on = ["data.archive_file.function_src"]
}
environment
Terraform version: 0.11.13
Go runtime version: go1.12
+ provider.archive v1.2.2
+ provider.google v2.5.1
property "runtime" is required.
below works.
resource "google_cloudfunctions_function" "function" {
name = "rss-webhook-function"
entry_point = "helloGET"
available_memory_mb = 256
project = "${var.project_id}"
runtime = "nodejs8"
event_trigger {
event_type = "google.pubsub.topic.publish"
resource = "${google_pubsub_topic.topic.name}"
}
source_archive_bucket = "${var.bucket_name}"
source_archive_object = "${google_storage_bucket_object.archive.name}"
}