I have created two lambda functions. now i wanted to pass all the cloudwatch logs from first lambda to second lambda. i have created new log group name and subscription filter to pass the cloudwatch logs to second lambda.
I am not sure if this configuration needs to be added any resource.
resource "aws_lambda_function" "audit-logs" {
filename = var.audit_filename
function_name = var.audit_function
source_code_hash = filebase64sha256(var.audit_filename)
role = module.lambda_role.arn
handler = "cloudwatch.lambda_handler"
runtime = "python3.9"
timeout = 200
description = "audit logs"
depends_on = [module.lambda_role, module.security_group]
}
resource "aws_cloudwatch_log_group" "splunk_cloudwatch_loggroup" {
name = "/aws/lambda/audit_logs"
}
resource "aws_lambda_permission" "allow_cloudwatch_for_splunk" {
statement_id = "AllowExecutionFromCloudWatch"
action = "lambda:InvokeFunction"
function_name = aws_lambda_function.splunk-logs.arn
principal = "logs.amazonaws.com" #"logs.region.amazonaws.com"
source_arn = "${aws_cloudwatch_log_group.splunk_cloudwatch_loggroup.arn}:*"
}
resource "aws_cloudwatch_log_subscription_filter" "splunk_cloudwatch_trigger" {
depends_on = [aws_lambda_permission.allow_cloudwatch_for_splunk]
destination_arn = aws_lambda_function.splunk-logs.arn
filter_pattern = ""
log_group_name = aws_cloudwatch_log_group.splunk_cloudwatch_loggroup.name
name = "splunk_filter"
}
# splunk logs lambda function
resource "aws_lambda_function" "splunk-logs" {
filename = var.splunk_filename
function_name = var.splunk_function
source_code_hash = filebase64sha256(var.splunk_filename)
role = module.lambda_role.arn
handler = "${var.splunk_handler}.handler"
runtime = "python3.9"
timeout = 200
description = "audit logs"
depends_on = [module.lambda_role, module.security_group]
}
how can i pass all the logs from first lambda to newly created log group ? any help ?
Related
I want to execute multiple lambdas in a single API gateway using terraform module. I define my variable for lambdas like
variable "lambdas" {
description = "Map of Lambda function names and API gateway resource paths."
type = map(string)
default = {
"name" = "get-lambda-function",
"name" = "post-lambda-function",
"name" = "put-lambda-function",
"name" = "qr-lambda-function"
}
}
And my lambda.tf looks like
resource "aws_lambda_function" "lambda_functions" {
for_each = var.lambdas
function_name = each.value.name
filename = data.archive_file.lambda.output_path
source_code_hash = filebase64sha256(data.archive_file.lambda.output_path)
handler = var.handler
runtime = var.runtime
depends_on = [
#aws_iam_role_policy_attachment.lambda_logs
#aws_iam_role_policy_attachment.lambda_vpc,
aws_cloudwatch_log_group.lambda_fun
]
role = aws_iam_role.lambda_role.arn
}
Here for Lambda integration with API, invoking multiple lambdas with [each.key] URL looks like :
resource "aws_api_gateway_integration" "lambda" {
for_each = aws_api_gateway_method.proxyMethod
rest_api_id = each.value.rest_api_id
resource_id = each.value.resource_id
http_method = each.value.http_method
integration_http_method = "POST"
type = "AWS_PROXY"
uri = aws_lambda_function.lambda_functions[each.key].invoke_arn
}
So is it possible to integrate multiple lambdas in single api gateway using terraform?
How can i add resource aws_cloudwatch_event_api_destination and aws_cloudwatch_event_connection to resource aws_cloudwatch_event_rule in terraform?
This Code in below
resource "aws_cloudwatch_event_rule" "test123_schedule_everyday" {
name = "${var.test123_bucket_name}-schedule-everyday-${var.env}"
description = "Meter reader get api data every 11:00 PM at lotus"
schedule_expression = "cron(0 16 * * ? *)"
is_enabled = "${var.test123_cloudwatch_is_enable}"
lifecycle {
ignore_changes = [schedule_expression, description, is_enabled]
}
}
resource "aws_cloudwatch_event_api_destination" "test123_event_api" {
name = "test-dev-api"
description = "test-dev-api destination"
invocation_endpoint = "https://test.com"
invocation_rate_limit_per_second = "1"
http_method = "GET"
connection_arn = aws_cloudwatch_event_connection.test123_event_connection.arn
}
resource "aws_cloudwatch_event_connection" "test123_event_connection" {
name = "test-dev-connection"
description = "A connection description"
authorization_type = "API_KEY"
auth_parameters {
api_key {
key = "test-key"
value = "TUVURVJSRUFESU5HOkNCWktXWFU3NzZDS0dGTk5LNjdGWUFVNFFRNE1HV0o3"
}
}
}
I believe what you seek is an aws_cloudwatch_event_target. This allows you to specify your destination as a target for a given rule, as documented in the official API destinations documentation.
resource "aws_cloudwatch_event_target" "this" {
rule = aws_cloudwatch_event_rule.test123_schedule_everyday.name
arn = aws_cloudwatch_event_api_destination.test123_event_api.arn
}
I am trying to create an Event Bridge rule that will run my Lambda function every 30 mins.
I based my code on this answer I found here on SO Use terraform to set up a lambda function triggered by a scheduled event source
Here is my terraform code:
monitoring/main.tf:
...
module "cloudwatch_event_rule" {
source = "./cloudwatch_event_rule"
extra_tags = local.extra_tags
}
module "lambda_function" {
source = "./lambda_functions"
extra_tags = local.extra_tags
alb_names = var.alb_names
slack_webhook_url = var.slack_webhook_url
environment_tag = local.environment_tag
}
module "cloudwatch_event_target" {
source = "./cloudwatch_event_target"
lambda_function_arn = module.lambda_function.detect_bad_rejects_on_alb_lambda_arn
cloudwatch_event_rule_name = module.cloudwatch_event_rule.cloudwatch_event_rule_name
extra_tags = local.extra_tags
}
monitoring/lambda_functions/main.tf:
resource "aws_lambda_function" "detect_bad_rejects_on_alb" {
filename = var.filename
function_name = var.function_name
role = aws_iam_role.detect_bad_reject_on_alb.arn
handler = var.handler
source_code_hash = filebase64sha256(var.filename)
runtime = var.runtime
timeout = var.timeout
environment {
...
}
}
monitoring/cloudwatch_event_rule/main.tf
resource "aws_cloudwatch_event_rule" "event_rule" {
name = var.rule_name
description = var.description
schedule_expression = var.schedule_expression
tags = ...
}
monitoring/cloudwatch_event_rule/variables.tf
...
variable "schedule_expression" {
type = string
default = "rate(30 minutes)"
}
...
monitoring/cloudwatch_event_target/main.tf
resource "aws_cloudwatch_event_target" "event_target" {
arn = var.lambda_function_arn
rule = var.cloudwatch_event_rule_name
input = var.input
}
This ends up creating the lambda function and the event bridge rule with my lambda function as its target with the schedule expression "rate(30 minutes)" but the lambda function is never executed? What am I doing wrong?
From what you posted is seems that you are not adding permissions for invocations. Your code does not show creation of aws_lambda_permission with proper rules. So you should add such permissions so that EventBridge can invoke your function (example):
resource "aws_lambda_permission" "event-invoke" {
statement_id = "AllowExecutionFromCloudWatch"
action = "lambda:InvokeFunction"
function_name = var.function_name
principal = "events.amazonaws.com"
source_arn = module.cloudwatch_event_rule.cloudwatch_event_rule_arn
}
Make sure source_arn correctly points to the ARN of your event rule.
all our AWS infra managed by Terraform, including the Sagemaker resources. We want to implement Autoscaling in our SM resources. We can't find Terraform solution to build our infra as a code.
In generally, ASG should be located in aws_sagemaker_endpoint_configuration >> production_variants blocks
references:
AWS documentation: https://aws.amazon.com/blogs/aws/auto-scaling-is-now-available-for-amazon-sagemaker/
TF documentation: https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/sagemaker_endpoint_configuration
Thanks in advance for your response
so, from my researches it should be something as:
resource "aws_appautoscaling_target" "sagemaker_target" {
max_capacity = var.max_instance_count
min_capacity = var.min_instance_count
resource_id = "endpoint/${aws_sagemaker_endpoint.endpoint.name}/variant/${var.service_name}-${var.site}-${var.environment}"
role_arn = aws_iam_role.sm_execution.arn
scalable_dimension = "sagemaker:variant:DesiredInstanceCount"
service_namespace = "sagemaker"
}
resource "aws_appautoscaling_policy" "sagemaker_policy" {
name = "${var.service_name}-${var.site}-${var.environment}-target-tracking"
policy_type = "TargetTrackingScaling"
resource_id = aws_appautoscaling_target.sagemaker_target.resource_id
scalable_dimension = aws_appautoscaling_target.sagemaker_target.scalable_dimension
service_namespace = aws_appautoscaling_target.sagemaker_target.service_namespace
target_tracking_scaling_policy_configuration {
predefined_metric_specification {
predefined_metric_type = "SageMakerVariantInvocationsPerInstance"
}
target_value = var.target_invocations
scale_in_cooldown = var.target_scale_in_cooldown
scale_out_cooldown = var.target_scale_out_cooldown
}
}
Taken a reference from the other answer, below is what my code looks like. Pasting here for some one's reference.
Note : I had to remove role_arn key from aws_appautoscaling_target resource so that it uses default service IAM role & also had to use SageMakerEndpointInvocationScalingPolicy string for policy name in aws_appautoscaling_policy resource to get same behavior as we get when we create auto scalling policy for sagemaker endpoint from aws console.
PS : Without above adjustments, TargetValue was not getting rendered wehn viewing from aws console & I was not manually able to change target value of terraform created auto scalling policy of sagemaker endpoint (as while updating TargetValue from console , was getting Validation Exception like Only one Target Tracking Scaling policy for a given metric specification is allowed. error etc
resource "aws_appautoscaling_target" "register_myendpoint_target" {
max_capacity = 2
min_capacity = 1
resource_id = "endpoint/${aws_sagemaker_endpoint.my_model-endpoint.name}/variant/${variant_name}"
scalable_dimension = "sagemaker:variant:DesiredInstanceCount"
service_namespace = "sagemaker"
}
resource "aws_appautoscaling_policy" "autoscale_policy_my_endpoint" {
name = "SageMakerEndpointInvocationScalingPolicy" # Had to use this name ditto so that on console it doesn't show some custom policy is configured etc etc
policy_type = "TargetTrackingScaling"
resource_id = aws_appautoscaling_target.register_myendpoint_target.resource_id
scalable_dimension = aws_appautoscaling_target.register_myendpoint_target.scalable_dimension
service_namespace = aws_appautoscaling_target.register_myendpoint_target.service_namespace
target_tracking_scaling_policy_configuration {
target_value = 100.0
predefined_metric_specification {
predefined_metric_type = "SageMakerVariantInvocationsPerInstance"
}
scale_in_cooldown = 300
scale_out_cooldown = 300
}
}
I'm currently having an issue with my aws_s3_notification resource creation. Whenever I attempt to deploy this resource, I receive this error
Error putting S3 notification configuration: InvalidArgument: Unable to validate the following destination configurations
I've tried setting depends_on parameters and adjusting permissions. One interesting thing is in my main.tf file, I'm creating two lambda functions. Both are extremely similar (just vary by code). My "controller" configuration deploys with no issue but my "chunker" function seems to have an issue creating the s3_notification.. I have included both configs for comparison.
#S3
resource "aws_s3_bucket" "ancb" {
for_each = toset(var.ancb_bucket)
bucket = format("ancb-%s-%s-%s",var.env,var.product_name,each.value)
acl = "private"
versioning {
enabled = true
}
tags = {
Environment = var.env
Terraform = true
}
}
#Chunker
resource "aws_lambda_function" "ancb_chunker" {
function_name = format("ancb-chunker-%s-%s",var.env,var.product_name)
s3_bucket = aws_s3_bucket.ancb["config"].id
s3_key = var.lambda_zip_chunker
handler = "handler.chunk"
runtime = "nodejs8.10"
role = aws_iam_role.lambda_exec.arn
environment {
variables = {
ORIGINAL_BUCKET = aws_s3_bucket.ancb["original"].id
TO_PROCESS_BUCKET = aws_s3_bucket.ancb["to-process"].id
ENVIRONMENT = var.env
CHUNK_SIZE = 5000
}
}
tags = {
Environment = var.env
Terraform = true
}
depends_on = [
aws_s3_bucket_object.ancb["chunker.zip"],
aws_s3_bucket.ancb["chunker"]
]
}
resource "aws_lambda_permission" "ancb_chunker_s3" {
statement_id = "AllowExecutionFromS3Bucket-Chunker"
action = "lambda:InvokeFunction"
function_name = aws_lambda_function.ancb_controller.arn
principal = "s3.amazonaws.com"
source_arn = aws_s3_bucket.ancb["original"].arn
}
resource "aws_s3_bucket_notification" "chunker" {
bucket = aws_s3_bucket.ancb["original"].id
lambda_function {
lambda_function_arn = aws_lambda_function.ancb_chunker.arn
events = ["s3:ObjectCreated:*"]
}
depends_on = [
aws_lambda_permission.ancb_chunker_s3,
aws_lambda_function.ancb_chunker,
aws_s3_bucket.ancb["original"]
]
}
#Controller
resource "aws_lambda_function" "ancb_controller" {
function_name = format("ancb-controller-%s-%s",var.env,var.product_name)
s3_bucket = aws_s3_bucket.ancb["config"].id
s3_key = var.lambda_zip_controller
handler = "handler.controller"
runtime = "nodejs8.10"
role = aws_iam_role.lambda_exec.arn
environment {
variables = {
DESTINATION_BUCKET = aws_s3_bucket.ancb["destination"].id
ENVIRONMENT = var.env
ERROR_BUCKET = aws_s3_bucket.ancb["error"].id
GEOCODIO_APIKEY = <insert>
GEOCODIO_ENDPOINT = <insert>
GEOCODIO_VERSION = <insert>
ORIGINAL_BUCKET = aws_s3_bucket.ancb["original"].id
SOURCE_BUCKET = aws_s3_bucket.ancb["source"].id
TO_PROCESS_BUCKET = aws_s3_bucket.ancb["to-process"].id
WORKING_BUCKET = aws_s3_bucket.ancb["working"].id
}
}
tags = {
Environment = var.env
Terraform = true
}
depends_on = [
aws_s3_bucket_object.ancb["controller.zip"]
]
}
resource "aws_lambda_permission" "ancb_controller_s3" {
statement_id = "AllowExecutionFromS3Bucket-Controller"
action = "lambda:InvokeFunction"
function_name = aws_lambda_function.ancb_controller.arn
principal = "s3.amazonaws.com"
source_arn = aws_s3_bucket.ancb["source"].arn
}
resource "aws_s3_bucket_notification" "controller" {
bucket = aws_s3_bucket.ancb["source"].id
lambda_function {
lambda_function_arn = aws_lambda_function.ancb_controller.arn
events = ["s3:ObjectCreated:*"]
}
depends_on = [
aws_lambda_permission.ancb_controller_s3,
aws_s3_bucket.ancb["source"]
]
}
UPDATE: If I manually create the trigger and run terraform apply again, terraform is able to move forward with no problem....