Terraform trying to replace existing resource when new user is added - amazon-web-services

i have this yaml file
team1:
test#gmail,com: ${live_user_role}
some#gmail.com: ${live_user_role}
tem#gmail.com: ${live_admin_role}
terraform code
locals {
render_membership = templatefile("${path.module}/teammembers.yaml",
{
live_user_role = var.liveteam_user_role_id
live_admin_role = var.liveteam_admin_role_id
}
)
membership_nested = yamldecode(local.render_membership)
team_names = keys(local.membership_nested)
membership_flat = flatten(
[
for team_name in local.team_names : [
for user_email, roles in local.membership_nested[team_name] : {
team_name = team_name
user_name = user_email
roles = [roles]
}
]
]
)
}
resource "squadcast_team_member" "membership" {
for_each = { for i, v in local.membership_flat : i => v }
team_id = data.squadcast_team.teams[each.key].id
user_id = data.squadcast_user.users[each.key].id
role_ids = each.value.roles
lifecycle {
create_before_destroy = true
}
}
data "squadcast_team" "teams" {
for_each = { for i, v in local.membership_flat : i => v }
name = each.value.team_name
}
data "squadcast_user" "users" {
for_each = { for i, v in local.membership_flat : i => v }
email = each.value.user_name
}
Now when i add the new member in the list
let's say like this:
team1:
test#gmail,com: ${live_user_role}
some#gmail.com: ${live_user_role}
tem#gmail.com: ${live_admin_role}
roy#gmail.com: ${live_admin_role}
terraform is deleting the previous user and recreating all the users again
squadcast_team_member.membership["1"] must be replaced
+/- resource "squadcast_team_member" "membership" {
~ id = "62ed115ab4b4017fa2a4b786" -> (known after apply)
~ role_ids = [
- "61b08676e4466d68c4866db0",
+ "61b08676e4466d68c4866db1",
]
~ user_id = "62ed115ab4b4017fa2a4b786" -> "61b7d915a14c2569ea9edde6" # forces replacement
# (1 unchanged attribute hidden)
}
how to modify the code that will create a new member only and not change the other members during its creation

This happens because membership_flat results in a list of maps. In a list, order is important. Thus its better to flatten your data, to get a map instead:
membership_flat = merge([
for team_name in local.team_names : {
for user_email, roles in local.membership_nested[team_name] :
"${team_name}-${user_email}" => {
team_name = team_name
user_name = user_email
roles = [roles]
}
}
]...) # dots are important. Do not Remove them.
then
data "squadcast_team" "teams" {
for_each = local.membership_flat
name = each.value.team_name
}
data "squadcast_user" "users" {
for_each = local.membership_flat
email = each.value.user_name
}
resource "squadcast_team_member" "membership" {
for_each = local.membership_flat
team_id = data.squadcast_team.teams[each.key].id
user_id = data.squadcast_user.users[each.key].id
role_ids = each.value.roles
lifecycle {
create_before_destroy = true
}
}

Related

how to enable dynamic block conditionally for creating GCS buckets

Im trying to add retention policy but I want to enable it conditionally, as you can see from the code
buckets.tf
locals {
team_buckets = {
arc = { app_id = "20390", num_buckets = 2, retention_period = null }
ana = { app_id = "25402", num_buckets = 2, retention_period = 631139040 }
cha = { app_id = "20391", num_buckets = 2, retention_period = 631139040 } #20 year
}
}
module "team_bucket" {
source = "../../../../modules/gcs_bucket"
for_each = {
for bucket in flatten([
for product_name, bucket_info in local.team_buckets : [
for i in range(bucket_info.num_buckets) : {
name = format("%s-%02d", product_name, i + 1)
team = "ei_${product_name}"
app_id = bucket_info.app_id
retention_period = bucket_info.retention_period
}
]
]) : bucket.name => bucket
}
project_id = var.project
name = "teambucket-${each.value.name}"
app_id = each.value.app_id
team = each.value.team
retention_period = each.value.retention_period
}
root module is defined as follows
main.tf
resource "google_storage_bucket" "bucket" {
project = var.project_id
name = "${var.project_id}-${var.name}"
location = var.location
labels = {
app_id = var.app_id
ei_team = var.team
cost_center = var.cost_center
}
uniform_bucket_level_access = var.uniform_bucket_level_access
dynamic "retention_policy" {
for_each = var.retention_policy == null ? [] : [var.retention_period]
content {
retention_period = var.retention_period
}
}
}
but I can't seem to make the code pick up the value,
for example as you see below the value doesn't get implemented
~ resource "google_storage_bucket" "bucket" {
id = "teambucket-cha-02"
name = "teambucket-cha-02"
# (11 unchanged attributes hidden)
- retention_policy {
- is_locked = false -> null
- retention_period = 3155760000 -> null
}
}
variables.tf for retention policy is as follows
variable "retention_policy" {
description = "Configuation of the bucket's data retention policy for how long objects in the bucket should be retained"
type = any
default = null
}
variable "retention_period" {
default = null
}
Your var.retention_policy is always null, as its default value. You are not changing the default value at all. Probably you wanted the following:
for_each = var.retention_period == null ? [] : [var.retention_period]
instead of
for_each = var.retention_policy == null ? [] : [var.retention_period]

for_each loop with dynamic block and values from tfvars

I'm trying to create certain BigQuery tables with time_partitioning with the dynamic block and I want to use the values from tfvars in runtime as follows:
./tables/tables.tf:
resource "google_bigquery_table" "tables" {
for_each = var.tables == [] ? [] : toset(var.tables)
dataset_id = var.db_id
deletion_protection = false
table_id = each.key
dynamic "time_partitioning" {
for_each = var.partitioned_tables
content {
type = "DAY"
field = time_partitioning.value.field
}
}
labels = {
environment = var.environment
application = var.application
}
schema = fileexists("${path.module}/${var.db_id}/${each.key}.json") ? file("${path.module}/${var.db_id}/${each.key}.json") : null
}
main.tf:
resource "google_bigquery_dataset" "database" {
count = length(var.dbs)
dataset_id = var.dbs[count.index].db_id
friendly_name = var.dbs[count.index].db_name
description = "TF"
location = "US"
delete_contents_on_destroy = var.delete_contents_on_destroy
labels = {
environment = var.environment
application = var.dbs[count.index].app_name
}
}
module "tables" {
source = "./tables"
count = length(var.dbs)
db_id = google_bigquery_dataset.database[count.index].dataset_id
environment = var.environment
application = var.dbs[count.index].app_name
tables = var.dbs[count.index].tables
partitioned_tables = var.dbs[count.index].partitioned_tables
}
module "iam" {
source = "./iam"
count = length(var.dbs)
db_id = google_bigquery_dataset.database[count.index].dataset_id
iam_members = var.dbs[count.index].iam_members
}
dev.tfvars:
region = "us-central1"
project_id = "some-project"
dbs = [
{
db_id = "dataset1"
db_name = "dataset1"
app_name = "hello"
iam_members = [
{
role = "roles/bigquery.dataEditor"
member = "serviceAccount:ser-sa#some-project.iam.gserviceaccount.com",
}
]
tables = ["daily_inventory", "dc_inventory", "products", "daily_sales", "planned_inventory", "stores", "stores_in_program"]
partitioned_tables = [
{
table = "daily_sales"
field = "sales_timestamp"
},
{
table = "daily_inventory"
field = "inventory_timestamp"
}
]
},
{
db_id = "dataset2"
db_name = "dataset2"
app_name = "hello"
iam_members = [
{
role = "roles/bigquery.dataEditor"
member = "serviceAccount:ser-sa#some-project.iam.gserviceaccount.com"
}
]
tables = []
}
]
environment = "development"
delete_contents_on_destroy = true
var.dbs is type = list(any)
Getting:
The given value is not suitable for var.dbs declared at
variables.tf:9,1-15: all list elements must have the same type.
Thanks in advance!
list(any) does not mean that you can have elements of "any" type in your list. All elements must have same type, and you can't mix types, as you do now (i.e. second element is missing partitioned_tables). any only means that TF will infer the single type for the elements, but all elements must be of that single type. So you have three choices:
remove type = list(any)
Fully define your type with optional arguments, instead of using any
Add partitioned_tables to the second element:
[
{
db_id = "dataset1"
db_name = "dataset1"
app_name = "hello"
iam_members = [
{
role = "roles/bigquery.dataEditor"
member = "serviceAccount:ser-sa#some-project.iam.gserviceaccount.com",
}
]
tables = ["daily_inventory", "dc_inventory", "products", "daily_sales", "planned_inventory", "stores", "stores_in_program"]
partitioned_tables = [
{
table = "daily_sales"
field = "sales_timestamp"
},
{
table = "daily_inventory"
field = "inventory_timestamp"
}
]
},
{
db_id = "dataset2"
db_name = "dataset2"
app_name = "hello"
iam_members = [
{
role = "roles/bigquery.dataEditor"
member = "serviceAccount:ser-sa#some-project.iam.gserviceaccount.com"
}
]
partitioned_tables = []
tables = []
}
]

Nested for_each in terraform, attempting to self-merge map

i'm trying to account for an annoying case when creating my own private hosted zones where the kinesis-streams endpoint has several non-standard DNS records. I am aiming to create a custom map of objects for each endpoint with information that I need (below):
locals {
endpoint_service_names = {
ec2 = {
name = "com.amazonaws.${data.aws_region.current.name}.ec2"
type = "Interface"
private_dns = false
phz_names = ["ec2.${data.aws_region.current.name}.amazonaws.com"]
phz_wildcard = false
}
"ecr.dkr" = {
name = "com.amazonaws.${data.aws_region.current.name}.ecr.dkr"
type = "Interface"
private_dns = false
phz_names = ["dkr.ecr.${data.aws_region.current.name}.amazonaws.com"]
phz_wildcard = true
}
"ecr.api" = {
name = "com.amazonaws.${data.aws_region.current.name}.ecr.api"
type = "Interface"
private_dns = false
phz_names = ["api.ecr.${data.aws_region.current.name}.amazonaws.com"]
phz_wildcard = false
}
kinesis-streams = {
name = "com.amazonaws.${data.aws_region.current.name}.kinesis-streams"
type = "Interface"
private_dns = false
phz_names = [
"kinesis.${data.aws_region.current.name}.amazonaws.com",
"data-kinesis.${data.aws_region.current.name}.amazonaws.com",
"control-kinesis.${data.aws_region.current.name}.amazonaws.com"
]
phz_wildcard = true
}
}
In order to use this in my impl, however, I would need to perform a nested for_each which Terraform does not directly allow, so I need to merge my map info into a new map with the service and DNS names in the one object. My problem is similar to: Looping in for_each nested resources with terraform
except I do not have a list in each map element, only on the phz_names. I can't figure out how to get the syntax right to produce something like this:
endpoint_service_dns_list = {
kinesis-streams = [
"kinesis.${data.aws_region.current.name}.amazonaws.com",
"data-kinesis.${data.aws_region.current.name}.amazonaws.com",
"control-kinesis.${data.aws_region.current.name}.amazonaws.com"
]
[...]
}
}
My attempt:
endpoint_service_dns_list = merge([
for servicename, service in local.endpoint_service_names : {
for phz_name in service[0].phz_names :
"${servicename}-${phz_name}" => {
service_name = servicename
phz_name = phz.phz_names
}
}
])
but the syntax highlighting/attempt to indexing obviously fails because I do not have a list for each service, but I am not sure what to replace it with.
Maybe I have missed the point but based on your comments and your kind of expected result in the question you could have something like this. I have added a format that maps then service name to all its urls as a list. Or incase you wanted a mapping of each url to its service name I added an additional local var.
locals {
endpoint_service_names = {
ec2 = {
name = "com.amazonaws.${data.aws_region.current.name}.ec2"
type = "Interface"
private_dns = false
phz_names = ["ec2.${data.aws_region.current.name}.amazonaws.com"]
phz_wildcard = false
}
"ecr.dkr" = {
name = "com.amazonaws.${data.aws_region.current.name}.ecr.dkr"
type = "Interface"
private_dns = false
phz_names = ["dkr.ecr.${data.aws_region.current.name}.amazonaws.com"]
phz_wildcard = true
}
"ecr.api" = {
name = "com.amazonaws.${data.aws_region.current.name}.ecr.api"
type = "Interface"
private_dns = false
phz_names = ["api.ecr.${data.aws_region.current.name}.amazonaws.com"]
phz_wildcard = false
}
kinesis-streams = {
name = "com.amazonaws.${data.aws_region.current.name}.kinesis-streams"
type = "Interface"
private_dns = false
phz_names = [
"kinesis.${data.aws_region.current.name}.amazonaws.com",
"data-kinesis.${data.aws_region.current.name}.amazonaws.com",
"control-kinesis.${data.aws_region.current.name}.amazonaws.com"
]
phz_wildcard = true
}
}
endpoint_service_dns_list = {for service, attrs in local.endpoint_service_names : service => attrs.phz_names}
endpoint_dns_service_list = merge([for service, attrs in local.endpoint_service_names : {for url in attrs["phz_names"] : url => service}]...)
}
data "aws_region" "current" {}
output "endpoint_service_dns_list" {
value = local.endpoint_service_dns_list
}
output "endpoint_dns_service_list" {
value = local.endpoint_dns_service_list
}
This would map the phz_names with the service as seen in the output, or map each url to its service
endpoint_dns_service_list = {
"api.ecr.eu-west-2.amazonaws.com" = "ecr.api"
"control-kinesis.eu-west-2.amazonaws.com" = "kinesis-streams"
"data-kinesis.eu-west-2.amazonaws.com" = "kinesis-streams"
"dkr.ecr.eu-west-2.amazonaws.com" = "ecr.dkr"
"ec2.eu-west-2.amazonaws.com" = "ec2"
"kinesis.eu-west-2.amazonaws.com" = "kinesis-streams"
}
endpoint_service_dns_list = {
"ec2" = [
"ec2.eu-west-2.amazonaws.com",
]
"ecr.api" = [
"api.ecr.eu-west-2.amazonaws.com",
]
"ecr.dkr" = [
"dkr.ecr.eu-west-2.amazonaws.com",
]
"kinesis-streams" = [
"kinesis.eu-west-2.amazonaws.com",
"data-kinesis.eu-west-2.amazonaws.com",
"control-kinesis.eu-west-2.amazonaws.com",
]
}
Hopefully one of these is the kind of thing you were looking to do

Terraform does not keep state if resources are generated dynamically

I have a variables.tf file that has the following contents:
variable "thing_configuration_set" {
default = {
"name" = "customer1"
"projects" = [
{
"name" = "project1"
"things" = [
{
"name" = "device1"
"fw_version" = "1.0"
"fw_type" = "generic_device"
"thing_type" = "default_device"
}
]
}
]
}
}
variable "iot_policy" {
type = string
sensitive = true
}
locals {
customer_list = distinct(flatten([for idx, customer in var.thing_configuration_set :
{
"customer" : customer.name
}
]))
project_list = distinct(flatten([for idx, customer in var.thing_configuration_set :
flatten([for project_idx, project in customer.projects :
{
"customer" = customer.name
"project" = project.name
}
])
]))
thing_list = flatten([for idx, customer in var.thing_configuration_set :
flatten([for project_idx, project in customer.projects :
flatten([for thing in project.things :
{
"customer" = customer.name
"project" = project.name
"thing" = thing
}
])
])
])
thing_types = distinct(flatten([for idx, record in local.thing_list :
{
"thing_type" = record.thing.thing_type
}]))
iot_policy_json = base64decode(var.iot_policy)
}
And then another tf file that defines all the resources needed to setup an IoT thing in aws:
resource "aws_iot_thing_group" "customer" {
for_each = { for idx, record in local.customer_list : idx => record }
name = each.value.customer
}
resource "aws_iot_thing_group" "project" {
for_each = { for idx, record in local.project_list : idx => record }
name = each.value.project
parent_group_name = each.value.customer
}
resource "aws_iot_thing" "thing" {
for_each = { for idx, record in local.thing_list : idx => record }
name = "${each.value.customer}_${each.value.project}_${each.value.thing.name}"
attributes = {
bms_fw_version = each.value.thing.bms_fw_version
bms_type = each.value.thing.bms_fw_type
}
thing_type_name = each.value.thing.thing_type
}
resource "aws_iot_thing_group_membership" "thing_group_membership" {
for_each = { for idx, record in local.thing_list : idx => record }
thing_name = "${each.value.customer}_${each.value.project}_${each.value.thing.name}"
thing_group_name = each.value.project
}
resource "aws_iot_thing_type" "thing_type" {
for_each = { for idx, record in local.thing_types : idx => record }
name = "${each.value.thing_type}"
}
resource "aws_iot_certificate" "things_cert" {
active = true
}
resource "aws_iot_thing_principal_attachment" "cert_attachment" {
for_each = { for idx, record in local.thing_list : idx => record }
principal = aws_iot_certificate.things_cert.arn
thing = aws_iot_thing.thing[each.key].name
}
resource "aws_iot_policy" "policy" {
name = "connect_subscribe_publish_any"
policy = local.iot_policy_json
}
resource "aws_iot_policy_attachment" "thing_policy_attachment" {
policy = aws_iot_policy.tf_policy.name
target = aws_iot_certificate.things_cert.arn
}
Since we have quite a few resources in AWS already I tried importing them. But when I do terraform plan it still wants to created these 'successfully' imported resources.
For example:
terraform import aws_iot_thing_group.customer Customer1
Would return:
Import successful!
The resources that were imported are shown above. These resources are now in
your Terraform state and will henceforth be managed by Terraform.
If I then run terraform plan it will still list that it will create this customer:
# aws_iot_thing_group.customer["0"] will be created
+ resource "aws_iot_thing_group" "customer" {
+ arn = (known after apply)
+ id = (known after apply)
+ metadata = (known after apply)
+ name = "Customer1"
+ tags_all = (known after apply)
+ version = (known after apply)
}
What am I doing wrong? Is this a bug in terraform?
From what I've seen (very new to terraform) this state only works when you define the resource directly, without any generated stuff (like for-each etc).
As per #luk2302 (h/t) comment and documentation [1], the correct import command is (since it is being run in PowerShell):
terraform import 'aws_iot_thing_group.customer[\"0\"]' Customer1
[1] https://developer.hashicorp.com/terraform/cli/commands/import#example-import-into-resource-configured-with-for_each

Sagemaker workforce with cognito

i am trying to build the terraform for sagemaker private work force with private cognito
Following : https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/sagemaker_workforce
it working fine
main.tf
resource "aws_sagemaker_workforce" "workforce" {
workforce_name = "workforce"
cognito_config {
client_id = aws_cognito_user_pool_client.congnito_client.id
user_pool = aws_cognito_user_pool_domain.domain.user_pool_id
}
}
resource "aws_cognito_user_pool" "user_pool" {
name = "sagemaker-cognito-userpool"
}
resource "aws_cognito_user_pool_client" "congnito_client" {
name = "congnito-client"
generate_secret = true
user_pool_id = aws_cognito_user_pool.user_pool.id
}
resource "aws_cognito_user_group" "user_group" {
name = "user-group"
user_pool_id = aws_cognito_user_pool.user_pool.id
}
resource "aws_cognito_user_pool_domain" "domain" {
domain = "sagemaker-user-pool-ocr-domain"
user_pool_id = aws_cognito_user_pool.user_pool.id
}
resource "aws_sagemaker_workteam" "workteam" {
workteam_name = "worker-team"
workforce_name = aws_sagemaker_workforce.workforce.id
description = "worker-team"
member_definition {
cognito_member_definition {
client_id = aws_cognito_user_pool_client.congnito_client.id
user_pool = aws_cognito_user_pool_domain.domain.user_pool_id
user_group = aws_cognito_user_group.user_group.id
}
}
}
resource "aws_sagemaker_human_task_ui" "template" {
human_task_ui_name = "human-task-ui-template"
ui_template {
content = file("${path.module}/sagemaker-human-task-ui-template.html")
}
}
resource "aws_sagemaker_flow_definition" "definition" {
flow_definition_name = "flow-definition"
role_arn = var.aws_iam_role
human_loop_config {
human_task_ui_arn = aws_sagemaker_human_task_ui.template.arn
task_availability_lifetime_in_seconds = 1
task_count = 1
task_description = "Task description"
task_title = "Please review the Key Value Pairs in this document"
workteam_arn = aws_sagemaker_workteam.workteam.arn
}
output_config {
s3_output_path = "s3://${var.s3_output_path}"
}
}
it's creating the cognito user pool with callback urls. These callback urls is coming from aws_sagemaker_workforce.workforce.subdomain and getting set in cognito automatically which is what i want.
But i also want to set config in cognito userpool like
allowed_oauth_flows = ["code", "implicit"]
allowed_oauth_scopes = ["email", "openid", "profile"]
now when i add above two line we need to add callbackurl also which i dont want.
i tried
allowed_oauth_flows = ["code", "implicit"]
allowed_oauth_scopes = ["email", "openid", "profile"]
callback_urls = [aws_sagemaker_workforce.workforce.subdomain]
which is giving error :
Cycle: module.sagemaker.aws_cognito_user_pool_client.congnito_client, module.sagemaker.aws_sagemaker_workforce.workforce
as both resource are dependent on each other, i want to pass those two line but it forces me to add callback url also.
here is the final main.tf which is failing with that three line
resource "aws_sagemaker_workforce" "workforce" {
workforce_name = "workforce"
cognito_config {
client_id = aws_cognito_user_pool_client.congnito_client.id
user_pool = aws_cognito_user_pool_domain.domain.user_pool_id
}
}
resource "aws_cognito_user_pool" "user_pool" {
name = "sagemaker-cognito-userpool"
}
resource "aws_cognito_user_pool_client" "congnito_client" {
name = "congnito-client"
generate_secret = true
user_pool_id = aws_cognito_user_pool.user_pool.id
explicit_auth_flows = ["ALLOW_REFRESH_TOKEN_AUTH", "ALLOW_USER_PASSWORD_AUTH", "ALLOW_CUSTOM_AUTH", "ALLOW_USER_SRP_AUTH"]
allowed_oauth_flows_user_pool_client = true
supported_identity_providers = ["COGNITO"]
allowed_oauth_flows = ["code", "implicit"]
allowed_oauth_scopes = ["email", "openid", "profile"]
callback_urls = [aws_sagemaker_workforce.workforce.subdomain]
}
resource "aws_cognito_user_group" "user_group" {
name = "user-group"
user_pool_id = aws_cognito_user_pool.user_pool.id
}
resource "aws_cognito_user_pool_domain" "domain" {
domain = "sagemaker-user-pool-ocr-domain"
user_pool_id = aws_cognito_user_pool.user_pool.id
}
resource "aws_sagemaker_workteam" "workteam" {
workteam_name = "worker-team"
workforce_name = aws_sagemaker_workforce.workforce.id
description = "worker-team"
member_definition {
cognito_member_definition {
client_id = aws_cognito_user_pool_client.congnito_client.id
user_pool = aws_cognito_user_pool_domain.domain.user_pool_id
user_group = aws_cognito_user_group.user_group.id
}
}
}
resource "aws_sagemaker_human_task_ui" "template" {
human_task_ui_name = "human-task-ui-template"
ui_template {
content = file("${path.module}/sagemaker-human-task-ui-template.html")
}
}
resource "aws_sagemaker_flow_definition" "definition" {
flow_definition_name = "flow-definition"
role_arn = var.aws_iam_role
human_loop_config {
human_task_ui_arn = aws_sagemaker_human_task_ui.template.arn
task_availability_lifetime_in_seconds = 1
task_count = 1
task_description = "Task description"
task_title = "Please review the Key Value Pairs in this document"
workteam_arn = aws_sagemaker_workteam.workteam.arn
}
output_config {
s3_output_path = "s3://${var.s3_output_path}"
}
}
You do not need to specify the callback URL for the workforce. It is sufficient to specify the following in order to create the aws_cognito_user_pool_client resource:
callback_urls = [
"https://${aws_cognito_user_pool_domain.domain>.cloudfront_distribution_arn}",
]
Then you reference the user pool client in your workforce definition:
resource "aws_sagemaker_workforce" "..." {
workforce_name = "..."
cognito_config {
client_id = aws_cognito_user_pool_client.<client_name>.id
user_pool = aws_cognito_user_pool_domain.<domain_name>.user_pool_id
}
}
Existence of the callback URLs can be proven after applying the terraform configuration by running aws cognito-idp describe-user-pool-client --user-pool-id <pool_id> --client-id <client_id>:
"UserPoolClient": {
...
"CallbackURLs": [
"https://____.cloudfront.net",
"https://____.labeling.eu-central-1.sagemaker.aws/oauth2/idpresponse"
],
"LogoutURLs": [
"https://____.labeling.eu-central-1.sagemaker.aws/logout"
],
It seems as terraform itself does not do anything special on workforce creation (see https://github.com/hashicorp/terraform-provider-aws/blob/main/internal/service/sagemaker/workforce.go). So the callback urls seem to be added by AWS SageMaker itself.
This means that you have to instruct terraform to ignore changes on those attributes in the aws_cognito_user_pool_client configuration:
lifecycle {
ignore_changes = [
callback_urls, logout_urls
]
}