Terraform for loop - amazon-web-services

I've been learning terraform, and have been playing with dashboards.
I have the following file which generates a dashboard.
resource "aws_cloudwatch_dashboard" "main" {
dashboard_name = "sample_dashboard"
dashboard_body = <<EOF
{
"widgets": [
${templatefile("${path.module}/cpu.tmpl", { ids = aws_instance.web[*].id })},
${templatefile("${path.module}/network.tmpl", { ids = aws_instance.web[*].id })}
]
}
EOF
}
Here is the cpu template file.
{
"type": "metric",
"x": 0,
"y": 0,
"width": 12,
"height": 6,
"properties": {
"metrics": ${jsonencode([for id in ids : ["AWS/EC2","CPUUtilization","InstanceId", "${id}"]])},
"period": 300,
"stat": "Average",
"region": "us-east-1",
"title": "EC2 Instance CPU"
}
}
Here have the network template file.
{
"type": "metric",
"x": 12,
"y": 0,
"width": 12,
"height": 6,
"properties": {
"metrics": ${jsonencode([for id in ids :
["AWS/EC2", "NetworkIn", "InstanceId", "${id}"]
])},
"period": 300,
"stat": "Average",
"region": "us-east-1",
"title": "EC2 Instance Network"
}
}
Everything works as expected, and I get the following dashboard.
The problem I'm having is when trying to add another metric in the for loop I get an error.
{
"type": "metric",
"x": 12,
"y": 0,
"width": 12,
"height": 6,
"properties": {
"metrics": ${jsonencode([for id in ids :
["AWS/EC2", "NetworkIn", "InstanceId", "${id}"],
["AWS/EC2", "NetworkOut", "InstanceId", "${id}"]
])},
"period": 300,
"stat": "Average",
"region": "us-east-1",
"title": "EC2 Instance Network"
}
}
I get the following error.
Call to function "templatefile" failed: ./network.tmpl:9,70-71:
Invalid 'for' expression; Extra characters after the end of the 'for'
expression..
As always thanks in advance for you help.

One way to overcome the issue would be to concat your metrics:
{
"type": "metric",
"x": 12,
"y": 0,
"width": 12,
"height": 6,
"properties": {
"metrics": ${jsonencode(concat([for id in ids :
["AWS/EC2", "NetworkIn", "InstanceId", "${id}"]
], [for id in ids :
["AWS/EC2", "NetworkOut", "InstanceId", "${id}"]
]))},
"period": 300,
"stat": "Average",
"region": "us-east-1",
"title": "EC2 Instance Network"
}
}

Related

Cannot add widget to AWS Cloudwatch Dashboard

I am trying to configure an existing AWS Dashboard with adding one new widget.
In Amazon Kinesis / Analytics application / Streaming application I click on the graphs 'View in metrics" of which I would like to add to my dashboard
In the next screen I click Actions / Add to dashboard
after selecting my dashboard I click add, and then I can see my dashboard with the chart:
However, if I click on "Save" I get the following error:
There was an error while trying to save your dashboard:
The dashboard body is invalid, there are 6 validation errors: [
{ "dataPath": "/widgets/5/properties/metrics/0", "message": "Should NOT have more than 4 items" },
{ "dataPath": "/widgets/5/properties/metrics/1", "message": "Should NOT have more than 4 items" },
{ "dataPath": "/widgets/5/properties/yAxis/left", "message": "Should be null" },
{ "dataPath": "/widgets/5/properties/yAxis/left", "message": "Should match some schema in anyOf" },
{ "dataPath": "/widgets/5/properties/yAxis/right", "message": "Should be null" },
{ "dataPath": "/widgets/5/properties/yAxis/right", "message": "Should match some schema in anyOf" } ]
I am totally clueless, as I did not enter anything manually, all I done was just clicking on the menu items. What is the problem here? I don't even understand the error messages even. I have 4 logs, and 1 chart already on the screen, this would be the 6th item if that is important.
Update: adding the source code of the template (I censored some sensitive information with "......."):
{
"widgets": [
{
"height": 6,
"width": 24,
"y": 12,
"x": 0,
"type": "log",
"properties": {
"query": "SOURCE '/aws/kinesis-analytics/.......' | fields #timestamp, message | filter applicationARN like /arn:aws:kinesisanalytics:eu-west-1:......./| filter messageType = \"ERROR\"| sort #timestamp desc",
"region": "eu-west-1",
"title": "Error log (last 1000 records)",
"view": "table"
}
},
{
"height": 6,
"width": 24,
"y": 6,
"x": 0,
"type": "log",
"properties": {
"query": "SOURCE '/aws/kinesis-analytics/.......' | fields #timestamp, message | filter applicationARN like /arn:aws:kinesisanalytics:eu-west-1:......./| sort #timestamp desc",
"region": "eu-west-1",
"title": "Full log (last 1000 records)",
"view": "table"
}
},
{
"height": 6,
"width": 24,
"y": 18,
"x": 0,
"type": "log",
"properties": {
"query": "SOURCE '/aws/kinesis-analytics/.......' | fields #timestamp, message | filter applicationARN like /arn:aws:kinesisanalytics:eu-west-1:......./| filter message like / OEE Data Streaming app v / | sort #timestamp desc",
"region": "eu-west-1",
"title": "Version - works only right after deployment, othervise look at the name of the jar file :) ",
"view": "table"
}
},
{
"height": 6,
"width": 24,
"y": 0,
"x": 0,
"type": "log",
"properties": {
"query": "SOURCE '/aws/kinesis-analytics/.......' | fields #timestamp, message | filter applicationARN like /arn:aws:kinesisanalytics:eu-west-1:338785721659:.......") | sort #timestamp desc",
"region": "eu-west-1",
"stacked": false,
"title": "OEE app inside logs",
"view": "table"
}
},
{
"height": 6,
"width": 6,
"y": 24,
"x": 0,
"type": "metric",
"properties": {
"region": "eu-west-1",
"yAxis": {
"left": {
"min": 0
}
},
"metrics": [
[ "AWS/Kinesis", "GetRecords.Records", "StreamName", ".......", { "id": "m3", "visible": true } ]
],
"stat": "Sum",
"title": "GetRecords - .......",
"start": "-PT3H",
"end": "P0D",
"view": "timeSeries",
"stacked": false
}
}
]
}
and if I try to add the uptime widget, it's code is this :
{
"type": "metric",
"x": 6,
"y": 24,
"width": 6,
"height": 6,
"properties": {
"region": "eu-west-1",
"yAxis": {
"left": {
"min": 0,
"stat": "Maximum",
"showUnits": false
},
"right": {
"min": 0,
"stat": "Maximum",
"showUnits": false
}
},
"metrics": [
[ "AWS/KinesisAnalytics", "uptime", "Application", "...", { "yAxis": "left", "label": "uptime", "stat": "Maximum", "showUnits": false } ],
[ ".", "fullRestarts", ".", ".", { "yAxis": "right", "label": "fullRestarts", "stat": "Maximum", "showUnits": false } ]
],
"stat": "Maximum",
"title": "Uptime (Milliseconds) - Maximum",
"start": "-PT3H",
"end": "P0D",
"view": "timeSeries",
"stacked": false
}
}
but I cannot save it now with the error message I described earlier.
Looks like the properties on axis definition and metric definition are mixed up.
Axis should not have the stat property: https://docs.aws.amazon.com/AmazonCloudWatch/latest/APIReference/CloudWatch-Dashboard-Body-Structure.html#CloudWatch-Dashboard-Properties-YAxis-Properties-Format
Metric definition should not have the showUnits property: https://docs.aws.amazon.com/AmazonCloudWatch/latest/APIReference/CloudWatch-Dashboard-Body-Structure.html#CloudWatch-Dashboard-Properties-Rendering-Object-Format
Try removing the stat property from both left and right axis definition. Also remove the showUnits property from the metrics definition (that should only be on the axis definitions).
If this was generated automatically, then it looks like a bug in the console.

Cloudformation sub function for cloudwatch dashboard body

I'm running into issues using the !Sub intrinsic cloudformation function with AWS::Region pseudoparameter within the body of my cloudwatch dashboard (to ensure my stack is region agnostic). The cloudformation I am using is given below
OrderDashboard:
Type: AWS::CloudWatch::Dashboard
Properties:
DashboardBody: !Sub '{ "widgets": [ { "type": "metric", "x": 6, "y": 0, "width": 6, "height": 6, "properties": { "metrics": [ [ "address", "validateAddressApiLatency" ] ], "view": "timeSeries", "stacked": false, "region": "${AWS::Region}", "title": "ValidateAddressApiSuccessLatencyP99", "period": 300, "stat": "p99" } }, { "type": "metric", "x": 12, "y": 0, "width": 8, "height": 6, "properties": { "metrics": [ [ "address", "validateAddressApiErrorLatency" ] ], "view": "timeSeries", "stacked": false, "region": "${AWS::Region}", "title": "ValidateAddressApiErrorLatencyP99", "period": 300, "stat": "p99" } }, { "type": "text", "x": 0, "y": 0, "width": 6, "height": 6, "properties": { "markdown": "# Heading \nThis dashboard exists to show that our success latency metric and error latency metric are published successfully using a single annotation and aspectj.\n\nThe first row shows the 99th percentile latencies, and the bottom column shows the count of the number of calls" } }, { "type": "metric", "x": 6, "y": 6, "width": 6, "height": 6, "properties": { "metrics": [ [ { "expression": "SELECT COUNT(validateAddressApiLatency) FROM SCHEMA(address)", "label": "NumberOfSuccessfulCalls", "id": "q1", "region": "${AWS::Region}" } ] ], "view": "timeSeries", "stacked": false, "region": "${AWS::Region}", "stat": "Average", "period": 300, "title": "NumberOfSuccessfulValidateCalls" } }, { "type": "metric", "x": 12, "y": 6, "width": 6, "height": 6, "properties": { "metrics": [ [ { "expression": "SELECT COUNT(validateAddressApiErrorLatency) FROM SCHEMA(address)", "label": "NumberOfErroredCalls", "id": "q1", "region": "${AWS::Region}" } ] ], "view": "timeSeries", "stacked": false, "region": "${AWS::Region}", "stat": "Average", "period": 300, "title": "NumberOfErrorValidateCalls" } } ]}'
DashboardName: order-dashboard
When I deploy the dashboard, the region is not substituted
The interesting thing is I use sub with the region parameter other places in the template, it works.
Outputs:
OrderApiUrl:
Description: "The endpoint you can use to place orders. Make sure to append the order id to the end"
Value: !Sub "https://${OrderApi}.execute-api.${AWS::Region}.amazonaws.com/v1/orders/"
Any idea on what I can do to get the value substituted? Thanks
I agree with #ErikAsplund, something like:
OrderDashboard:
Type: AWS::CloudWatch::Dashboard
Properties:
DashboardName: order-dashboard
DashboardBody: !Sub |
{
"widgets": [
{
"properties": {
"metrics": [
"AWS/Lambda",
"Duration",
"FunctionName",
"${MyReference}"
]
}
}
}
The code that you provided works perfectly fine. Thus the issue that you have must be related to other factors than your CloudFormation code in the question.
Maybe you are using some other code, not the one in the question.

How can i include all the widgets for a particular resource (eg:ec2, ebs) in the dashboard at AWS cloudwatch using terraform

I created terraform file to create dashboard in AWS cloudwatch.
here is my sample file to create dashboard
// provider module
provider "aws"{
access_key = var.access_key
secret_key = var.secret_key
region = var.region
}
// cloudwatch dashboard module
resource "aws_cloudwatch_dashboard" "main" {
dashboard_name = var.dashboard_name
dashboard_body = <<EOF
{
"widgets": [
{
"type": "metric",
"x": 0,
"y": 0,
"width": 12,
"height": 6,
"properties": {
"metrics": [
[
"AWS/EBS",
"VolumeReadOps",
"VolumeId",
"vol-04b26d88efe8ecd54"
]
],
"period": 300,
"stat": "Average",
"region": "ap-south-1",
"title": "VolumeRead"
}
},
{
"type": "metric",
"x": 0,
"y": 0,
"width": 12,
"height": 6,
"properties": {
"metrics": [
[
"AWS/EBS",
"VolumeQueueLength",
"VolumeId",
"vol-04b26d88efe8ecd54"
]
],
"period": 300,
"stat": "Average",
"region": "ap-south-1",
"title": "VolumeQueueLength"
}
},
{
"type": "metric",
"x": 14,
"y": 13,
"width": 12,
"height": 6,
"properties": {
"metrics": [
[
"AWS/EBS",
"BurstBalance",
"VolumeId",
"vol-04b26d88efe8ecd54"
]
],
"period": 300,
"stat": "Average",
"region": "ap-south-1",
"title": "BurstBalance"
}
}
]
}
EOF
}
Is there any possibility to add all the metrics available in the particular resource (eg:ec2, ebs, rds) in same widget or separate widget for each all the metrics in one function without indicating every metrics in separate function in terraform file?
In aws console just tick the checkbox above all the metrics inside the resource will include all the metrics in same widget. But i couldnt find the proper answer for terraform provision

Amazon Rekognition for Video - getFaceSearch: index number

I’m new using Amazon Rekognition to analyze faces on a video.
I’m using startFaceSearch to start my analysis. After the job is completed successfully, I’m using the JobId generated to call getFaceSearch.
On my first video analyzed, the results were as expected. But when I analyze the second example some strange behavior occurs and I can’t understand why.
Viewing the JSON generated as results for my second video, completely different faces are identified with the same index number.
Please see the results below.
{
"Timestamp": 35960,
"Person": {
"Index": 11,
"BoundingBox": {
"Width": 0.09375,
"Height": 0.24583333730698,
"Left": 0.1875,
"Top": 0.375
},
"Face": {
"BoundingBox": {
"Width": 0.06993006914854,
"Height": 0.10256410390139,
"Left": 0.24475525319576,
"Top": 0.375
},
"Landmarks": [
{
"Type": "eyeLeft",
"X": 0.26899611949921,
"Y": 0.40649232268333
},
{
"Type": "eyeRight",
"X": 0.28330621123314,
"Y": 0.41610333323479
},
{
"Type": "nose",
"X": 0.27063181996346,
"Y": 0.43293061852455
},
{
"Type": "mouthLeft",
"X": 0.25983560085297,
"Y": 0.44362303614616
},
{
"Type": "mouthRight",
"X": 0.27296212315559,
"Y": 0.44758656620979
}
],
"Pose": {
"Roll": 22.106262207031,
"Yaw": 6.3516845703125,
"Pitch": -6.2676968574524
},
"Quality": {
"Brightness": 41.875026702881,
"Sharpness": 65.948883056641
},
"Confidence": 90.114051818848
}
}
}
{
"Timestamp": 46520,
"Person": {
"Index": 11,
"BoundingBox": {
"Width": 0.19034090638161,
"Height": 0.42083331942558,
"Left": 0.30681818723679,
"Top": 0.17916665971279
},
"Face": {
"BoundingBox": {
"Width": 0.076486013829708,
"Height": 0.11217948794365,
"Left": 0.38680067658424,
"Top": 0.26923078298569
},
"Landmarks": [
{
"Type": "eyeLeft",
"X": 0.40642243623734,
"Y": 0.32347011566162
},
{
"Type": "eyeRight",
"X": 0.43237379193306,
"Y": 0.32369664311409
},
{
"Type": "nose",
"X": 0.42121160030365,
"Y": 0.34618207812309
},
{
"Type": "mouthLeft",
"X": 0.41044121980667,
"Y": 0.36520344018936
},
{
"Type": "mouthRight",
"X": 0.43202903866768,
"Y": 0.36483728885651
}
],
"Pose": {
"Roll": 0.3165397644043,
"Yaw": 2.038902759552,
"Pitch": -1.9931464195251
},
"Quality": {
"Brightness": 54.697460174561,
"Sharpness": 53.806159973145
},
"Confidence": 95.216400146484
}
}
}
In fact, in this video, all faces have the same index number, regardless of they are different. Any suggestions?
PersonDetail object is the result of the API . "index" is the identifier for the person detected in the video. So the index doesn't span across videos. It is just an internal reference.
Link below which details Index
https://docs.aws.amazon.com/rekognition/latest/dg/API_PersonDetail.html

Error !!! CloudFormation template validation

I'm designing redis cluster using cloudformation template and during the validation of the template I'm facing this error "Template contains errors.: Template format error: JSON not well-formed. (line 151, column 2)"
Below is the cloudformation script
{
"AWSTemplateFormatVersion": "2010-09-09",
"Metadata": {
"AWS::CloudFormation::Designer": {
"f60e2d2e-b46b-48b1-88c8-eecce45d2166": {
"size": {
"width": 60,
"height": 60
},
"position": {
"x": 320,
"y": 70
},
"z": 2,
"parent": "71508a33-8207-4580-8721-c3688c4a0353",
"embeds": [],
"ismemberof": [
"a63aacbd-1c6e-4118-8bbe-08a5bc63052a",
"55eb37aa-e764-49ac-b8fe-3eddb2ea77ad"
]
},
"a63aacbd-1c6e-4118-8bbe-08a5bc63052a": {
"size": {
"width": 60,
"height": 60
},
"position": {
"x": 320,
"y": 160
},
"z": 2,
"parent": "71508a33-8207-4580-8721-c3688c4a0353",
"embeds": []
},
"0291abc8-9c50-491b-8400-e1f7f8b22118": {
"source": {
"id": "f60e2d2e-b46b-48b1-88c8-eecce45d2166"
},
"target": {
"id": "a63aacbd-1c6e-4118-8bbe-08a5bc63052a"
},
"z": 1
},
"55eb37aa-e764-49ac-b8fe-3eddb2ea77ad": {
"size": {
"width": 60,
"height": 60
},
"position": {
"x": 440,
"y": 70
},
"z": 2,
"parent": "71508a33-8207-4580-8721-c3688c4a0353",
"embeds": []
},
"7aa270dd-1131-4dc4-8913-dfaf44a3815d": {
"source": {
"id": "f60e2d2e-b46b-48b1-88c8-eecce45d2166"
},
"target": {
"id": "55eb37aa-e764-49ac-b8fe-3eddb2ea77ad"
},
"z": 2
},
"71508a33-8207-4580-8721-c3688c4a0353": {
"size": {
"width": 610,
"height": 600
},
"position": {
"x": 20,
"y": 10
},
"z": 1,
"embeds": [
"55eb37aa-e764-49ac-b8fe-3eddb2ea77ad",
"a63aacbd-1c6e-4118-8bbe-08a5bc63052a",
"f60e2d2e-b46b-48b1-88c8-eecce45d2166"
]
}
}
},
"Parameters" : {
"CacheNodeType" : {
"Description" : "The compute and memory capacity of the nodes in the Cache Cluster",
"Type" : "String",
"Default" : "cache.m3.medium",
"AllowedValues" : ["cache.t2.micro", "cache.t2.small", "cache.t2.medium",
"cache.m3.medium", "cache.m3.large", "cache.m3.xlarge", "cache.m3.2xlarge",
"cache.t1.micro", "cache.m1.small", "cache.m1.medium", "cache.m1.large",
"cache.m1.xlarge", "cache.c1.xlarge", "cache.r3.large", "cache.r3.xlarge",
"cache.r3.2xlarge", "cache.r3.4xlarge","cache.r3.8xlarge", "cache.m2.xlarge",
"cache.m2.2xlarge", "cache.m2.4xlarge"],
"ConstraintDescription" : "must select a valid Cache Node type."
}
},
"Resources": {
"RedisClusterReplicationGroup": {
"Type": "AWS::ElastiCache::ReplicationGroup",
"Properties": {
"CacheParameterGroupName": {
"Ref": "RedisClusterParameterGroup"
},
"CacheSubnetGroupName": {
"Ref": "RedisClusterSubnetGroup"
},
"CacheNodeType" : { "Ref" : "CacheNodeType" },
"Engine" : "redis",
"EngineVersion" : "2.8.24",
"NumCacheClusters" : 4,
"Port" : 6879,
"PreferredCacheClusterAZs" : ["us-east-1c","us-east-1d","us-east-1e"],
"ReplicationGroupDescription" : "RedisClusterReplicationGroup",
"SecurityGroupIds" : "sg-7ea72e07",
"SnapshotRetentionLimit" : 0,
"AutomaticFailoverEnabled" : true,
"Metadata": {
"AWS::CloudFormation::Designer": {
"id": "f60e2d2e-b46b-48b1-88c8-eecce45d2166"
}
}
},
"RedisClusterParameterGroup": {
"Type": "AWS::ElastiCache::ParameterGroup",
"Properties": {
"CacheParameterGroupFamily" : "redis2.8",
"CacheParameterGroupName" : "RedisClusterParameterGroup",
"Description" :"RedisClusterParameterGroup"
},
"Metadata": {
"AWS::CloudFormation::Designer": {
"id": "a63aacbd-1c6e-4118-8bbe-08a5bc63052a"
}
}
},
"RedisClusterSubnetGroup": {
"Type": "AWS::ElastiCache::SubnetGroup",
"Properties": {
"Description" : "RedisClusterSubnetGroups",
"SubnetIds" : ["subnet-7854ab20", "subnet-eaa7039c", "subnet-988a00a5"]
},
"Metadata": {
"AWS::CloudFormation::Designer": {
"id": "71508a33-8207-4580-8721-c3688c4a0353"
}
}
}
},
}
One way to avoid this whole set of JSON errors is to switch to YAML syntax, which is supported by Cloudformation. You can convert your JSON document to YAML at
https://www.json2yaml.com/
and then just use that. I find YAML much easier to maintain without the quotes, braces, and commas.
{
"AWSTemplateFormatVersion": "2010-09-09",
"Metadata": {
"AWS::CloudFormation::Designer": {
"f60e2d2e-b46b-48b1-88c8-eecce45d2166": {
"size": {
"width": 60,
"height": 60
},
"position": {
"x": 320,
"y": 70
},
"z": 2,
"parent": "71508a33-8207-4580-8721-c3688c4a0353",
"embeds": [],
"ismemberof": [
"a63aacbd-1c6e-4118-8bbe-08a5bc63052a",
"55eb37aa-e764-49ac-b8fe-3eddb2ea77ad"
]
},
"a63aacbd-1c6e-4118-8bbe-08a5bc63052a": {
"size": {
"width": 60,
"height": 60
},
"position": {
"x": 320,
"y": 160
},
"z": 2,
"parent": "71508a33-8207-4580-8721-c3688c4a0353",
"embeds": []
},
"0291abc8-9c50-491b-8400-e1f7f8b22118": {
"source": {
"id": "f60e2d2e-b46b-48b1-88c8-eecce45d2166"
},
"target": {
"id": "a63aacbd-1c6e-4118-8bbe-08a5bc63052a"
},
"z": 1
},
"55eb37aa-e764-49ac-b8fe-3eddb2ea77ad": {
"size": {
"width": 60,
"height": 60
},
"position": {
"x": 440,
"y": 70
},
"z": 2,
"parent": "71508a33-8207-4580-8721-c3688c4a0353",
"embeds": []
},
"7aa270dd-1131-4dc4-8913-dfaf44a3815d": {
"source": {
"id": "f60e2d2e-b46b-48b1-88c8-eecce45d2166"
},
"target": {
"id": "55eb37aa-e764-49ac-b8fe-3eddb2ea77ad"
},
"z": 2
},
"71508a33-8207-4580-8721-c3688c4a0353": {
"size": {
"width": 610,
"height": 600
},
"position": {
"x": 20,
"y": 10
},
"z": 1,
"embeds": [
"55eb37aa-e764-49ac-b8fe-3eddb2ea77ad",
"a63aacbd-1c6e-4118-8bbe-08a5bc63052a",
"f60e2d2e-b46b-48b1-88c8-eecce45d2166"
]
}
}
},
"Parameters": {
"CacheNodeType": {
"Description": "The compute and memory capacity of the nodes in the Cache Cluster",
"Type": "String",
"Default": "cache.m3.medium",
"AllowedValues": [
"cache.t2.micro",
"cache.t2.small",
"cache.t2.medium",
"cache.m3.medium",
"cache.m3.large",
"cache.m3.xlarge",
"cache.m3.2xlarge",
"cache.t1.micro",
"cache.m1.small",
"cache.m1.medium",
"cache.m1.large",
"cache.m1.xlarge",
"cache.c1.xlarge",
"cache.r3.large",
"cache.r3.xlarge",
"cache.r3.2xlarge",
"cache.r3.4xlarge",
"cache.r3.8xlarge",
"cache.m2.xlarge",
"cache.m2.2xlarge",
"cache.m2.4xlarge"
],
"ConstraintDescription": "must select a valid Cache Node type."
}
},
"Resources": {
"RedisClusterReplicationGroup": {
"Type": "AWS::ElastiCache::ReplicationGroup",
"Properties": {
"CacheParameterGroupName": {
"Ref": "RedisClusterParameterGroup"
},
"CacheSubnetGroupName": {
"Ref": "RedisClusterSubnetGroup"
},
"CacheNodeType": {
"Ref": "CacheNodeType"
},
"Engine": "redis",
"EngineVersion": "2.8.24",
"NumCacheClusters": 4,
"Port": 6879,
"PreferredCacheClusterAZs": [
"us-east-1c",
"us-east-1d",
"us-east-1e"
],
"ReplicationGroupDescription": "RedisClusterReplicationGroup",
"SecurityGroupIds": "sg-7ea72e07",
"SnapshotRetentionLimit": 0,
"AutomaticFailoverEnabled": true,
"Metadata": {
"AWS::CloudFormation::Designer": {
"id": "f60e2d2e-b46b-48b1-88c8-eecce45d2166"
}
}
}
},
"RedisClusterParameterGroup": {
"Type": "AWS::ElastiCache::ParameterGroup",
"Properties": {
"CacheParameterGroupFamily": "redis2.8",
"CacheParameterGroupName": "RedisClusterParameterGroup",
"Description": "RedisClusterParameterGroup"
},
"Metadata": {
"AWS::CloudFormation::Designer": {
"id": "a63aacbd-1c6e-4118-8bbe-08a5bc63052a"
}
}
},
"RedisClusterSubnetGroup": {
"Type": "AWS::ElastiCache::SubnetGroup",
"Properties": {
"Description": "RedisClusterSubnetGroups",
"SubnetIds": [
"subnet-7854ab20",
"subnet-eaa7039c",
"subnet-988a00a5"
]
},
"Metadata": {
"AWS::CloudFormation::Designer": {
"id": "71508a33-8207-4580-8721-c3688c4a0353"
}
}
}
}
}
Any JSON parser will tell you what the issue is. The last element need not have a ',' and the JSON needed one more '}' to get validated properly. I haven't checked if the script passes the cloudformation validation but, it passes the JSON parsing