i am trying to build the terraform for sagemaker private work force with private cognito
Following : https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/sagemaker_workforce
it working fine
main.tf
resource "aws_sagemaker_workforce" "workforce" {
workforce_name = "workforce"
cognito_config {
client_id = aws_cognito_user_pool_client.congnito_client.id
user_pool = aws_cognito_user_pool_domain.domain.user_pool_id
}
}
resource "aws_cognito_user_pool" "user_pool" {
name = "sagemaker-cognito-userpool"
}
resource "aws_cognito_user_pool_client" "congnito_client" {
name = "congnito-client"
generate_secret = true
user_pool_id = aws_cognito_user_pool.user_pool.id
}
resource "aws_cognito_user_group" "user_group" {
name = "user-group"
user_pool_id = aws_cognito_user_pool.user_pool.id
}
resource "aws_cognito_user_pool_domain" "domain" {
domain = "sagemaker-user-pool-ocr-domain"
user_pool_id = aws_cognito_user_pool.user_pool.id
}
resource "aws_sagemaker_workteam" "workteam" {
workteam_name = "worker-team"
workforce_name = aws_sagemaker_workforce.workforce.id
description = "worker-team"
member_definition {
cognito_member_definition {
client_id = aws_cognito_user_pool_client.congnito_client.id
user_pool = aws_cognito_user_pool_domain.domain.user_pool_id
user_group = aws_cognito_user_group.user_group.id
}
}
}
resource "aws_sagemaker_human_task_ui" "template" {
human_task_ui_name = "human-task-ui-template"
ui_template {
content = file("${path.module}/sagemaker-human-task-ui-template.html")
}
}
resource "aws_sagemaker_flow_definition" "definition" {
flow_definition_name = "flow-definition"
role_arn = var.aws_iam_role
human_loop_config {
human_task_ui_arn = aws_sagemaker_human_task_ui.template.arn
task_availability_lifetime_in_seconds = 1
task_count = 1
task_description = "Task description"
task_title = "Please review the Key Value Pairs in this document"
workteam_arn = aws_sagemaker_workteam.workteam.arn
}
output_config {
s3_output_path = "s3://${var.s3_output_path}"
}
}
it's creating the cognito user pool with callback urls. These callback urls is coming from aws_sagemaker_workforce.workforce.subdomain and getting set in cognito automatically which is what i want.
But i also want to set config in cognito userpool like
allowed_oauth_flows = ["code", "implicit"]
allowed_oauth_scopes = ["email", "openid", "profile"]
now when i add above two line we need to add callbackurl also which i dont want.
i tried
allowed_oauth_flows = ["code", "implicit"]
allowed_oauth_scopes = ["email", "openid", "profile"]
callback_urls = [aws_sagemaker_workforce.workforce.subdomain]
which is giving error :
Cycle: module.sagemaker.aws_cognito_user_pool_client.congnito_client, module.sagemaker.aws_sagemaker_workforce.workforce
as both resource are dependent on each other, i want to pass those two line but it forces me to add callback url also.
here is the final main.tf which is failing with that three line
resource "aws_sagemaker_workforce" "workforce" {
workforce_name = "workforce"
cognito_config {
client_id = aws_cognito_user_pool_client.congnito_client.id
user_pool = aws_cognito_user_pool_domain.domain.user_pool_id
}
}
resource "aws_cognito_user_pool" "user_pool" {
name = "sagemaker-cognito-userpool"
}
resource "aws_cognito_user_pool_client" "congnito_client" {
name = "congnito-client"
generate_secret = true
user_pool_id = aws_cognito_user_pool.user_pool.id
explicit_auth_flows = ["ALLOW_REFRESH_TOKEN_AUTH", "ALLOW_USER_PASSWORD_AUTH", "ALLOW_CUSTOM_AUTH", "ALLOW_USER_SRP_AUTH"]
allowed_oauth_flows_user_pool_client = true
supported_identity_providers = ["COGNITO"]
allowed_oauth_flows = ["code", "implicit"]
allowed_oauth_scopes = ["email", "openid", "profile"]
callback_urls = [aws_sagemaker_workforce.workforce.subdomain]
}
resource "aws_cognito_user_group" "user_group" {
name = "user-group"
user_pool_id = aws_cognito_user_pool.user_pool.id
}
resource "aws_cognito_user_pool_domain" "domain" {
domain = "sagemaker-user-pool-ocr-domain"
user_pool_id = aws_cognito_user_pool.user_pool.id
}
resource "aws_sagemaker_workteam" "workteam" {
workteam_name = "worker-team"
workforce_name = aws_sagemaker_workforce.workforce.id
description = "worker-team"
member_definition {
cognito_member_definition {
client_id = aws_cognito_user_pool_client.congnito_client.id
user_pool = aws_cognito_user_pool_domain.domain.user_pool_id
user_group = aws_cognito_user_group.user_group.id
}
}
}
resource "aws_sagemaker_human_task_ui" "template" {
human_task_ui_name = "human-task-ui-template"
ui_template {
content = file("${path.module}/sagemaker-human-task-ui-template.html")
}
}
resource "aws_sagemaker_flow_definition" "definition" {
flow_definition_name = "flow-definition"
role_arn = var.aws_iam_role
human_loop_config {
human_task_ui_arn = aws_sagemaker_human_task_ui.template.arn
task_availability_lifetime_in_seconds = 1
task_count = 1
task_description = "Task description"
task_title = "Please review the Key Value Pairs in this document"
workteam_arn = aws_sagemaker_workteam.workteam.arn
}
output_config {
s3_output_path = "s3://${var.s3_output_path}"
}
}
You do not need to specify the callback URL for the workforce. It is sufficient to specify the following in order to create the aws_cognito_user_pool_client resource:
callback_urls = [
"https://${aws_cognito_user_pool_domain.domain>.cloudfront_distribution_arn}",
]
Then you reference the user pool client in your workforce definition:
resource "aws_sagemaker_workforce" "..." {
workforce_name = "..."
cognito_config {
client_id = aws_cognito_user_pool_client.<client_name>.id
user_pool = aws_cognito_user_pool_domain.<domain_name>.user_pool_id
}
}
Existence of the callback URLs can be proven after applying the terraform configuration by running aws cognito-idp describe-user-pool-client --user-pool-id <pool_id> --client-id <client_id>:
"UserPoolClient": {
...
"CallbackURLs": [
"https://____.cloudfront.net",
"https://____.labeling.eu-central-1.sagemaker.aws/oauth2/idpresponse"
],
"LogoutURLs": [
"https://____.labeling.eu-central-1.sagemaker.aws/logout"
],
It seems as terraform itself does not do anything special on workforce creation (see https://github.com/hashicorp/terraform-provider-aws/blob/main/internal/service/sagemaker/workforce.go). So the callback urls seem to be added by AWS SageMaker itself.
This means that you have to instruct terraform to ignore changes on those attributes in the aws_cognito_user_pool_client configuration:
lifecycle {
ignore_changes = [
callback_urls, logout_urls
]
}
Related
Using terraform version 1.0.10 and AWS provider 4, My code deploy an AWS s3 bucket using a module that has aws_s3_bucket_lifecycle_configuration as required and works fine:
main.tf code:
source = "./modules/aws_plain_resource/s3/"
bucket_name = var.bucket_name_cloud_events
sse_algorithm = var.sse_algorithm
logging_target_bucket = var.logging_target_bucket
lifecycle_status = "Enabled"
first_period = 30
fp_storage_class = "STANDARD_IA"
second_period = 0
sp_storage_class = "GLACIER"
object_expiration = 1060
common_tags = {
data_classification = "confidential"
risk_classification = "high"
tyro_team = "operations_team"
}
}
Module code:
bucket = var.bucket_name
tags = merge(var.common_tags, {
"Name" = var.bucket_name
})
}
resource "aws_s3_bucket_acl" "this" {
bucket = aws_s3_bucket.this.id
acl = "private"
}
resource "aws_s3_bucket_server_side_encryption_configuration" "this" {
bucket = aws_s3_bucket.this.id
rule {
apply_server_side_encryption_by_default {
kms_master_key_id = var.kms_master_key_arn
sse_algorithm = var.sse_algorithm
}
}
}
resource "aws_s3_bucket_lifecycle_configuration" "this" {
bucket = aws_s3_bucket.this.id
rule {
id = "iac_lifecycle_rule"
status = var.lifecycle_status
transition {
days = var.first_period
storage_class = var.fp_storage_class
}
transition {
days = var.second_period
storage_class = var.sp_storage_class
}
expiration {
days = var.object_expiration
}
}
}
output "bucket-id" {
value = aws_s3_bucket.this.id
}
However, I would like to leave the argument rule in the maint.tf file to be configurable. Is there a way to pass the rule configuration as variable to the terraform S3 module?
Rule is:
rule {
id = "iac_lifecycle_rule"
status = var.lifecycle_status
transition {
days = var.first_period
storage_class = var.fp_storage_class
}
transition {
days = var.second_period
storage_class = var.sp_storage_class
}
expiration {
days = var.object_expiration
}
}
You have to define a rule map variable in your module, and then pass it in:
module "bucket"
source = "./modules/aws_plain_resource/s3/"
rule = {
id = "iac_lifecycle_rule"
status = var.lifecycle_status
transition = {
days = var.first_period
storage_class = var.fp_storage_class
}
transition = {
days = var.second_period
storage_class = var.sp_storage_class
}
expiration = {
days = var.object_expiration
}
}
and then you have to explicitly set all the values in aws_s3_bucket_lifecycle_configuration:
resource "aws_s3_bucket_lifecycle_configuration" "this" {
bucket = aws_s3_bucket.this.id
rule {
id = rule["id"]
status = rule["status"]
transition {
days = rule["transition"]["days"]
storage_class = rule["transition"]["storage_class"]
}
# and so on for the rest
}
}
Below is my terraform code to create AWS Cognito User Pool:
resource "aws_cognito_user_pool" "CognitoUserPool" {
name = "cgup-aws-try-cogn-createcgup-001"
password_policy {
minimum_length = 8
require_lowercase = true
require_numbers = true
require_symbols = true
require_uppercase = true
temporary_password_validity_days = 7
}
lambda_config {
}
schema {
attribute_data_type = "String"
developer_only_attribute = false
mutable = false
name = "sub"
string_attribute_constraints {
max_length = "2048"
min_length = "1"
}
required = true
}
}
the code consists of several schemas, but I think that may be enough.
It was exported from aws from an existing cognito user pool, but when I try a terraform plan I get the following error:
Error: "schema.1.name" cannot be longer than 20 character
with aws_cognito_user_pool.CognitoUserPool,
on main.tf line 216, in resource "aws_cognito_user_pool" "CognitoUserPool":
216: resource "aws_cognito_user_pool" "CognitoUserPool" {
No matter how much I reduce the length of the name, I get the same error.
I tried to deploy
resource "aws_cognito_user_pool" "CognitoUserPool" {
name = "cgup-aws-try"
password_policy {
minimum_length = 8
require_lowercase = true
require_numbers = true
require_symbols = true
require_uppercase = true
temporary_password_validity_days = 7
}
lambda_config {
}
schema {
attribute_data_type = "String"
developer_only_attribute = false
mutable = false
name = "sub"
string_attribute_constraints {
max_length = "2048"
min_length = "1"
}
required = true
}
}
and it was succesfull.
Maybe try to start fresh in a new workspace.
I am trying to build a terraform template that creates an AWS S3 Bucket, Cloudfront Distribution and a Lambda function that should be associated with the Cloudfront Distribution.
As soon as I add "lambda_function_association" to the Cloudfront ressource I experience following error.
Error: error updating CloudFront Distribution (XXXXXXXXXXXXXXX): InvalidLambdaFunctionAssociation: The function cannot have environment variables. Function: arn:aws:lambda:us-east-1:XXXXXXXXXXXXX:function:testtools:4
status code: 400, request id: 3ce25af1-8341-41c0-8d35-4c3c91c2c001
with aws_cloudfront_distribution.testtools,
on main.tf line 42, in resource "aws_cloudfront_distribution" "testtools":
42: resource "aws_cloudfront_distribution" "testtools" {
lambda_function_association {
event_type = "origin-response"
lambda_arn = "${aws_lambda_function.testtools.qualified_arn}"
include_body = false
}
I think it is related to the lambda_arn that is used inside the function association.
resource "aws_cloudfront_distribution" "testtools" {
depends_on = [aws_s3_bucket.testtools, aws_lambda_function.testtools]
origin {
domain_name = aws_s3_bucket.testtools.bucket_regional_domain_name
origin_id = var.s3_origin_id
s3_origin_config {
origin_access_identity = aws_cloudfront_origin_access_identity.testtools.cloudfront_access_identity_path
}
}
enabled = true
is_ipv6_enabled = true
comment = "testtools"
default_root_object = "index.html"
provider = aws
logging_config {
include_cookies = false
bucket = "testtools.s3.amazonaws.com"
prefix = "testtools"
}
aliases = ["testtools.int.test.net"]
default_cache_behavior {
allowed_methods = ["DELETE", "GET", "HEAD", "OPTIONS", "PATCH", "POST", "PUT"]
cached_methods = ["GET", "HEAD"]
target_origin_id = var.s3_origin_id
forwarded_values {
query_string = false
cookies {
forward = "none"
}
}
viewer_protocol_policy = "allow-all"
min_ttl = 0
default_ttl = 3600
max_ttl = 86400
lambda_function_association {
event_type = "origin-response"
lambda_arn = "${aws_lambda_function.testtools.qualified_arn}"
include_body = false
}
}
price_class = "PriceClass_200"
restrictions {
geo_restriction {
restriction_type = "whitelist"
locations = ["DE", "AU", "CH", "BG"]
}
}
tags = {
Environment = "production"
}
viewer_certificate {
acm_certificate_arn = var.ssl_cert_arn
ssl_support_method = "sni-only"
minimum_protocol_version = "TLSv1"
}
}
resource "aws_lambda_function" "testtools" {
filename = "lambda_function_payload.zip"
function_name = "testtools"
role = aws_iam_role.testtools.arn
handler = "index.test"
publish = true
provider = aws.useast1
source_code_hash = filebase64sha256("lambda_function_payload.zip")
runtime = "nodejs12.x"
environment {
variables = {
foo = "bar"
}
}
}
When using a Lambda#edge, your lambda has a lot more restrictions that it has to adhere to. Some restrictions also depend on whether you're linking the lambda to origin req/res or viewer req/res.
One of these restrictions is that you can't use environment variables. You can find more info on this page: Lambda#Edge function restrictions
I am trying to pass the values s3 name and create_user into local block in main.tf so that both of them have the value in list and then I am passing list_of_bucket in local block in module s3 to create the buckets and looping of user_to_create in module s3_user to create the user if the boolean is set to true. All of these values are passed to variable.tf and then to main.tf
dev.tfvars
wea-nonprod = {
services = {
s3 = [
sthree = {
create_user = true,
}
sfour = {
create_user = true,
}
sfive = {
create_user = true,
}
]
}
}
variable.tf
variable "s3_buckets" {
type = list(map)
}
main.tf
locals {
users_to_create = ""
list_of_buckets = ""
}
module "s3" {
source = "../../s3"
name = join("-", [var.name_prefix, "s3"])
tags = merge(var.tags, {Name = join("-", [var.name_prefix, "s3"])})
buckets = list_of_buckets
sse_algorithm = "AES256"
access_log_bucket_name = var.access_log_bucket_name
}
module "s3_user" {
for_each = local.users_to_create
source = "./service-s3-bucket-user"
name = join("-", [var.name_prefix, each.key])
tags = var.tags
bucket_arn = module.s3.bucket_arns[each.key]
depends_on = [module.s3]
}
Just iterate over your wea-nonprod map:
locals {
users_to_create = [ for name in var.wea-nonprod.services.s3 if name.create_user == true ]
list_of_buckets = [ for bucket in var.wea-nonprod.services.s3 ]
}
And a few changes to your module blocks:
module "s3" {
source = "../../s3"
name = "${var.name_prefix}-s3"
tags = merge(var.tags, { Name = "${var.name_prefix}-s3" })
buckets = local.list_of_buckets
sse_algorithm = "AES256"
access_log_bucket_name = var.access_log_bucket_name
}
module "s3_user" {
count = length(local.users_to_create)
source = "./service-s3-bucket-user"
name = "${var.name_prefix}${local.users_to_create[count.index]}"
tags = var.tags
bucket_arn = module.s3.bucket_arns[local.users_to_create[count.index]]
depends_on = [module.s3]
}
I'm using Terraform to create a Cognito User pool. I'd like to use a lambda function for sending a custom message when a user signs up. When I run attempt to sign up on the client, I get an error saying that "CustomMessage invocation failed due to error AccessDeniedException." I've used Lambda Permissions before, but I can't find any examples of this configuration. How do I give the lambda function permission? The following is my current configuration.
resource "aws_cognito_user_pool" "main" {
name = "${var.user_pool_name}_${var.stage}"
username_attributes = [ "email" ]
schema {
attribute_data_type = "String"
mutable = true
name = "name"
required = true
}
schema {
attribute_data_type = "String"
mutable = true
name = "email"
required = true
}
password_policy {
minimum_length = "8"
require_lowercase = true
require_numbers = true
require_symbols = true
require_uppercase = true
}
mfa_configuration = "OFF"
lambda_config {
custom_message = aws_lambda_function.custom_message.arn
post_confirmation = aws_lambda_function.post_confirmation.arn
}
}
...
resource "aws_lambda_permission" "get_blog" {
statement_id = "AllowExecutionFromCognito"
action = "lambda:InvokeFunction"
function_name = aws_lambda_function.custom_message.function_name
principal = "cognito-idp.amazonaws.com"
source_arn = "${aws_cognito_user_pool.main.arn}/*/*"
depends_on = [ aws_lambda_function.custom_message ]
}
...
resource "aws_lambda_function" "custom_message" {
filename = "${var.custom_message_path}/${var.custom_message_file_name}.zip"
function_name = var.custom_message_file_name
role = aws_iam_role.custom_message.arn
handler = "${var.custom_message_file_name}.handler"
source_code_hash = filebase64sha256("${var.custom_message_path}/${var.custom_message_file_name}.zip")
runtime = "nodejs12.x"
timeout = 10
layers = [ var.node_layer_arn ]
environment {
variables = {
TABLE_NAME = var.table_name
RESOURCENAME = "blogAuthCustomMessage"
REGION = "us-west-2"
}
}
tags = {
Name = var.developer
}
depends_on = [
data.archive_file.custom_message,
]
}
Based on OP's feedback in the comment section, changing source_arn property in the aws_lambda_permission.get_blog to aws_cognito_user_pool.main.arn works.