AWS Pricing API not yielding prices for the given search criteria - python-2.7

I am using AWS boto3 pricing api to get the prices of instances.
But I am not getting the results for the combination (us west 2, r3.2x large, Linux, No pre software installed, tenancy =shared)
Here is my code:
pricing = boto3.client('pricing', region_name='us-east-1')
hourlyTermCode = 'JRTCKXETXF'
rateCode = '6YS6EN2CT7'
token = ''
while True:
paginator = pricing.get_paginator('get_products')
pages = paginator.paginate(
ServiceCode='AmazonEC2',
Filters=[
{'Type': 'TERM_MATCH', 'Field': 'operatingSystem', 'Value': 'Linux'},
{'Type': 'TERM_MATCH', 'Field': 'location', 'Value': 'US West (Oregon)'}
],
PaginationConfig={
'StartingToken':token
}
)
for response in pages:
for price in response['PriceList']:
resp = json.loads(price)
product = resp['product'] # ['attributes']['']
sku = product['sku']
if product['productFamily'] == 'Compute Instance':
if str(product['attributes']['instanceType']) == str(amazon_instance_type) :
if str(product['attributes']['operatingSystem']) == 'Linux':
if str(product['attributes']['preInstalledSw']) == 'NA':
if str(product['attributes']['tenancy']) == 'Shared':
sku_key = resp['terms']['OnDemand'].get(sku)
if sku_key:
price = sku_key[sku + '.' + hourlyTermCode + '.' + rateCode]['pricePerUnit']['USD']
print 'here 7'
print price
try:
token = response['NextToken']
except KeyError:
pass

This works:
import json
import boto3
client = boto3.client('pricing', region_name='us-east-1')
response = client.get_products(
ServiceCode='AmazonEC2',
Filters=[
{'Type': 'TERM_MATCH', 'Field': 'operatingSystem', 'Value': 'Linux'},
{'Type': 'TERM_MATCH', 'Field': 'location', 'Value': 'US West (Oregon)'},
{'Type': 'TERM_MATCH', 'Field': 'instanceType', 'Value': 'r3.2xlarge'},
{'Type': 'TERM_MATCH', 'Field': 'tenancy', 'Value': 'Shared'},
{'Type': 'TERM_MATCH', 'Field': 'preInstalledSw', 'Value': 'NA'}
]
)
for pricelist_json in response['PriceList']:
pricelist = json.loads(pricelist_json)
product = pricelist['product']
if product['productFamily'] == 'Compute Instance':
print pricelist['terms']['OnDemand'].values()[0]['priceDimensions'].values()[0][u'pricePerUnit']['USD']
It is based on the output of:
{u'FormatVersion': u'aws_v1', u'PriceList': [u'{
"product": {
"productFamily": "Compute Instance",
"attributes": {
"enhancedNetworkingSupported": "Yes",
"memory": "61 GiB",
"vcpu": "8",
"capacitystatus": "Used",
"locationType": "AWS Region",
"storage": "1 x 160 SSD",
"instanceFamily": "Memory optimized",
"operatingSystem": "Linux",
"physicalProcessor": "Intel Xeon E5-2670 v2 (Ivy Bridge)",
"clockSpeed": "2.5 GHz",
"ecu": "26",
"networkPerformance": "High",
"servicename": "Amazon Elastic Compute Cloud",
"instanceType": "r3.2xlarge",
"tenancy": "Shared",
"usagetype": "USW2-BoxUsage:r3.2xlarge",
"normalizationSizeFactor": "16",
"processorFeatures": "Intel AVX; Intel Turbo",
"servicecode": "AmazonEC2",
"licenseModel": "No License required",
"currentGeneration": "No",
"preInstalledSw": "NA",
"location": "US West (Oregon)",
"processorArchitecture": "64-bit",
"operation": "RunInstances"
},
"sku": "GMTWE5CTY4FEUYDN"
},
"serviceCode": "AmazonEC2",
"terms": {
"OnDemand": {
"GMTWE5CTY4FEUYDN.JRTCKXETXF": {
"priceDimensions": {
"GMTWE5CTY4FEUYDN.JRTCKXETXF.6YS6EN2CT7": {
"unit": "Hrs",
"endRange": "Inf",
"description": "$0.665 per On Demand Linux r3.2xlarge Instance Hour",
"appliesTo": [],
"rateCode": "GMTWE5CTY4FEUYDN.JRTCKXETXF.6YS6EN2CT7",
"beginRange": "0",
"pricePerUnit": {
"USD": "0.6650000000"
}
}
},
"sku": "GMTWE5CTY4FEUYDN",
"effectiveDate": "2018-07-01T00:00:00Z",
"offerTermCode": "JRTCKXETXF",
"termAttributes": {}
}
},
...
},
"version": "20180726190848",
"publicationDate": "2018-07-26T19:08:48Z"
}'
]
}

Related

Boto3 create glue triggers with different types in one workflow

Can anyone please guide me steps to create multiple triggers types one with conditional and other with scheduled trigger type in single workflow
So far I have used create_trigger function . But above requirement not sure how to address.
Can any one help here please.
I have tried with below syntax didn't work
response = client.create_trigger(
Name='two_triggers',
WorkflowName='wf_With_two_tirggers',
Type='SCHEDULED',
Schedule='cron(0 12 * * ? *)',
Actions=[
{
'JobName': 'abc_dev',
'Arguments': {
'string': 'string'
},
'Timeout': 123,
'SecurityConfiguration': 'string',
'NotificationProperty': {
'NotifyDelayAfter': 123
},
'Trigger': 'string'
},
],
Type='CONDITIONAL',
Predicate={
'Logical': 'ANY',
'Conditions': [
{
'LogicalOperator': 'EQUALS',
'JobName': 'def_dev',
'State': 'SUCCEEDED'
},
]
},
Actions=[
{
'JobName': 'ghi_dev',
'Arguments': {
'string': 'string'
},
'Timeout': 123,
'SecurityConfiguration': 'string',
'NotificationProperty': {
'NotifyDelayAfter': 123
},
'CrawlerName': 'string'
},
],
Description='string',
StartOnCreation=True,
Tags={
'string': 'string'
}
)
Below is the design workflow struggling to write code for. Tried with above code for below design using boto3 didn't work
Yes I figured out on an answer. Below is the code for design given in question
import boto3
import os
import logging
glue = boto3.client(service_name="glue", region_name='us-east-1')
response = glue.create_workflow(
Name="dual_trigger_wf")
response1 = glue.create_trigger(
Name="trigger_one_to_many",
WorkflowName="dual_trigger_wf",
Type="SCHEDULED",
Schedule="cron(0 8 * * ? *)",
Actions=[
{
"JobName": "abc",
"Arguments": {"string": "string"},
"Timeout": 123,
"SecurityConfiguration": "string",
"NotificationProperty": {"NotifyDelayAfter": 123},
},
{
"JobName": "def",
"Arguments": {"string": "string"},
"Timeout": 123,
"SecurityConfiguration": "string",
"NotificationProperty": {"NotifyDelayAfter": 123},
},
],
Description="string",
StartOnCreation=False,
)
response2 = glue.create_trigger(
Name="trigger_many_to_one",
WorkflowName="dual_trigger_wf",
Type="CONDITIONAL",
Predicate={
"Logical": "AND",
"Conditions": [
{
"LogicalOperator": "EQUALS",
"JobName": "abc",
"State": "SUCCEEDED",
},
{
"LogicalOperator": "EQUALS",
"JobName": "def",
"State": "SUCCEEDED",
},
],
},
Actions=[
{
"JobName": "ghi",
"Arguments": {"string": "string"},
"Timeout": 123,
"SecurityConfiguration": "string",
"NotificationProperty": {"NotifyDelayAfter": 123},
}
],
Description="string",
StartOnCreation=False,
)

How to get ip address of spot instance created with boto3

I am able to create spot instance with boto3 with the following code
import boto3
import datetime, random, string
client = boto3.client('ec2')
response = client.request_spot_instances(
DryRun=False,
ClientToken=''.join(random.choices(string.ascii_uppercase + string.digits, k=10)),
InstanceCount=1,
Type='one-time',
LaunchSpecification={
'ImageId': 'ami-053ebee0bc662f9b3',
'SecurityGroups': ['default'],
'InstanceType': 't3.medium',
'Placement': {
'AvailabilityZone': 'us-east-1a',
},
'BlockDeviceMappings': [
{
},
],
'EbsOptimized': True,
'Monitoring': {
'Enabled': True
},
'SecurityGroupIds': [
'sg-03432e1f',
]
}
)
print(response)
And the response I got is below. How can I get ip address of all the machines created. I know that the request is asynchronous but if I want to poll for the details of created instances, what is the way?
{
"SpotInstanceRequests": [
{
"CreateTime": "2022-05-22 05:21:54+00:00",
"LaunchSpecification": {
"SecurityGroups": [
{
"GroupName": "default",
"GroupId": "sg-03432e1f"
}
],
"EbsOptimized": true,
"ImageId": "ami-053ebee0bc662f9b3",
"InstanceType": "t3.medium",
"Placement": {
"AvailabilityZone": "us-east-1a"
},
"SubnetId": "subnet-b9f4aadf",
"Monitoring": {
"Enabled": true
}
},
"ProductDescription": "Linux/UNIX",
"SpotInstanceRequestId": "sir-88tef9gh",
"SpotPrice": "0.041600",
"State": "open",
"Status": {
"Code": "pending-evaluation",
"Message": "Your Spot request has been submitted for review, and is pending evaluation.",
"UpdateTime": "2022-05-22 05:21:55+00:00"
},
"Type": "one-time",
"InstanceInterruptionBehavior": "terminate"
}
],
"ResponseMetadata": {
"RequestId": "ac39fdad-cb4d-4e26-82a5-1f55c1f0d5af",
"HTTPStatusCode": 200,
"HTTPHeaders": {
"x-amzn-requestid": "ac39fdad-cb4d-4e26-82a5-1f55c1f0d5af",
"cache-control": "no-cache, no-store",
"strict-transport-security": "max-age=31536000; includeSubDomains",
"content-type": "text/xml;charset=UTF-8",
"content-length": "1696",
"date": "Sun, 22 May 2022 05:21:54 GMT",
"server": "AmazonEC2"
},
"RetryAttempts": 0
}
}
The same as with regular instances. You have to get NetworkInterfaceId of the spot instances which you can get using describe_instances. The outcome of the call gives you InstanceLifecycle equal to spot for spot instances.
Once you have NetworkInterfaceId of your spot instances you can use describe_network_interfaces to get PublicIp associated with the interface.

EMR Cluster: AutoScaling Policy For Instance Group Could Not Attach And Failed

I am trying to automate the EMR cluster creation through boto3. Unfortunately, I'm getting the following warning:
The Auto Scaling policy for instance group ig-MI0ANZ0C3WNN in Amazon EMR cluster j-BS3Y2OAO65R6 (qidv2_historical_3.0.1) could not attach and failed at 2021-09-20 17:41 UTC.
I cannot figure out what is the issue is. This was adapted from an aws cli command which didn't raise any warnings or issues, but after transitioning to boto3, was getting this autoscaling policy warning
cluster_id = self.boto_client().run_job_flow(
Name=self.cluster_name,
LogUri='s3n://aws-logs',
JobFlowRole='EMR_EC2_DefaultRole',
ReleaseLabel=self.release_label,
Applications=[{'Name': 'Spark'},{'Name': 'Hive'},{'Name': 'Hadoop'},{'Name': 'Pig'},{'Name': 'Hue'},
{'Name': 'Zeppelin'},{'Name': 'Livy'},{'Name': 'JupyterHub'},{'Name': 'Tensorflow'}
],
AutoScalingRole='EMR_AutoScaling_DefaultRole',
BootstrapActions=[
{
'Name': 'Custom action',
'ScriptBootstrapAction': {
'Path': 's3://ml-data/emr-bootstrap_spk3.0.1.sh'
}
}
],
ServiceRole='EMR_DefaultRole',
ScaleDownBehavior='TERMINATE_AT_TASK_COMPLETION',
EbsRootVolumeSize=25,
Steps=[
{
'Name': 'Setup Debugging',
'ActionOnFailure': 'TERMINATE_CLUSTER',
'HadoopJarStep': {
'Jar': 'command-runner.jar',
'Args': ['state-pusher-script']
}
},
{
'Name': 'Setup - Sync with S3',
'ActionOnFailure': 'CANCEL_AND_WAIT',
'HadoopJarStep': {
'Jar': 'command-runner.jar',
'Args': ['aws', 's3', 'sync',
's3://ch-ml-data/',
'/mnt/src/']
}
},
{
'Name': 'Spark Application',
'ActionOnFailure': 'CANCEL_AND_WAIT',
'HadoopJarStep': {
'Jar': 'command-runner.jar',
'Args': ['cd /mnt/src; bash spark_jobs/qid_pipeline_historical_run.sh']
}
}
],
Configurations=[
{
'Classification': 'zeppelin-env',
'Properties': {},
'Configurations': [
{
'Classification': 'export',
'Properties': {
'ZEPPELIN_PORT': '8890',
'HADOOP_CONF_DIR': '/etc/hadoop/conf',
'ZEPPELIN_LOG_DIR': '/var/log/zeppelin',
'ZEPPELIN_PID': '$ZEPPELIN_PID_DIR/zeppelin.pid',
'MASTER': 'yarn-client',
'SPARK_SUBMIT_OPTIONS': "$SPARK_SUBMIT_OPTIONS --conf '\''spark.executorEnv.PYTHONPATH=/usr/lib/spark/python/lib/py4j-src.zip:/usr/lib/spark/python/:<CPS>{{PWD}}/pyspark.zip<CPS>{{PWD}}/py4j-src.zip'\'' --conf spark.yarn.isPython=true",
'PYSPARK_DRIVER_PYTHON': '/mnt/anaconda3/envs/question-identification-v2/bin/python',
'ZEPPELIN_NOTEBOOK_USER': 'user',
'CLASSPATH': ':/usr/lib/hadoop-lzo/lib/*:/usr/lib/hadoop/hadoop-aws.jar:/usr/share/java/Hive-JSON-Serde/hive-openx-serde.jar:/usr/share/aws/aws-java-sdk/*:/usr/share/aws/emr/emrfs/conf:/usr/share/aws/emr/emrfs/lib/*:/usr/share/aws/emr/emrfs/auxlib/*:/usr/share/aws/hmclient/lib/aws-glue-datacatalog-spark-client.jar:/usr/share/aws/sagemaker-spark-sdk/lib/sagemaker-spark-sdk.jar',
'ZEPPELIN_PID_DIR': '/var/run/zeppelin',
'PYSPARK_PYTHON': '/mnt/anaconda3/envs/question-identification-v2/bin/python',
'SPARK_HOME': '/usr/lib/spark',
'ZEPPELIN_NOTEBOOK_S3_BUCKET': 'ch-ml-data',
'ZEPPELIN_WAR_TEMPDIR': '/var/run/zeppelin/webapps',
'ZEPPELIN_CONF_DIR': '/etc/zeppelin/conf',
'ZEPPELIN_NOTEBOOK_STORAGE': 'org.apache.zeppelin.notebook.repo.S3NotebookRepo',
'ZEPPELIN_NOTEBOOK_DIR': '/var/lib/zeppelin/notebook',
'ZEPPELIN_ADDR': '0.0.0.0'
}
}
]
},
{
'Classification': 'hive-site',
'Properties': {
'hive.metastore.client.factory.class': 'com.amazonaws.glue.catalog.metastore.AWSGlueDataCatalogHiveClientFactory'
}
},
{
'Classification': 'spark-hive-site',
'Properties': {
'hive.metastore.client.factory.class': 'com.amazonaws.glue.catalog.metastore.AWSGlueDataCatalogHiveClientFactory'
}
}
],
Instances={
'Ec2KeyName': 'emr-temporary',
'KeepJobFlowAliveWhenNoSteps': False,
'TerminationProtected': False,
'Ec2SubnetId': 'subnet-063735e4fa63e3bac',
'AdditionalSlaveSecurityGroups': ["sg-012970517d0a88bae", "sg-01813cf2115b55874", "sg-04563fc7e8ed9e1ec", "sg-07ab30655981361ad"],
'ServiceAccessSecurityGroup': 'sg-00dd6e63d7004176d',
'EmrManagedSlaveSecurityGroup': 'sg-048b83d1a20550b43',
'EmrManagedMasterSecurityGroup': 'sg-017402b74e879aaa5',
'AdditionalMasterSecurityGroups': ["sg-012970517d0a88bae", "sg-01813cf2115b55874", "sg-04563fc7e8ed9e1ec", "sg-07ab30655981361ad"],
'InstanceGroups': [
{
'Name': 'Task',
'InstanceRole': 'TASK',
'InstanceType': 'i3.2xlarge',
'InstanceCount': 1
},
{
'Name': 'Master - 1',
'InstanceRole': 'MASTER',
'InstanceType': 'i3.2xlarge',
'InstanceCount': 1,
},
{
'Name': 'Core - 2',
'InstanceRole': 'CORE',
'InstanceType': 'i3.2xlarge',
'InstanceCount': 1,
'Market': 'SPOT',
'AutoScalingPolicy': {
'Constraints': {
'MinCapacity': 3,
'MaxCapacity': 100
},
'Rules': [
{
'Name': 'memory',
'Description': '',
'Action': {
'SimpleScalingPolicyConfiguration': {
'ScalingAdjustment': 10,
'CoolDown': 300,
'AdjustmentType': 'CHANGE_IN_CAPACITY'
}
},
'Trigger': {
'CloudWatchAlarmDefinition': {
'MetricName': 'YARNMemoryAvailablePercentage',
'ComparisonOperator': 'LESS_THAN',
'Statistic': 'AVERAGE',
'Period': 300,
'EvaluationPeriods': 2,
'Unit': 'PERCENT',
'Namespace': 'AWS/ElasticMapReduce',
'Threshold': 25,
'Dimensions': [
{
'Value': '${emr.clusterId}',
'Key': 'JobFlowId'
}
]
}
}
},
{
'Name': 'mem',
'Description': '',
'Action': {
'SimpleScalingPolicyConfiguration': {
'ScalingAdjustment': -5,
'CoolDown': 300,
'AdjustmentType': 'CHANGE_IN_CAPACITY'
}
},
'Trigger': {
'CloudWatchAlarmDefinition': {
'MetricName': 'YARNMemoryAvailablePercentage',
'ComparisonOperator': 'GREATER_THAN_OR_EQUAL',
'Statistic': 'AVERAGE',
'Period': 300,
'EvaluationPeriods': 18,
'Unit': 'PERCENT',
'Namespace': 'AWS/ElasticMapReduce',
'Threshold': 50,
'Dimensions': [
{
'Value': '${emr.clusterId}',
'Key': 'JobFlowId'
}
],
}
}
}
]
}
}
]
}
)

Cloudformation does not support create vpc links in apigateway

In aws api gateway there is a section called API Link and I can manually set that.
The problem is I cannot find any section in cloudformation documentation on how I can create vpc link via cloud formation on api gateway.
Is it sth that cloudformation does not support or am I missing it?
You can use swagger to define an API Gateway using VPC Link. This is a complete CloudFormation template you can deploy to test it out...
{
"AWSTemplateFormatVersion": "2010-09-09",
"Description": "Test backend access via API Gateway. This template provisions a Regional API Gateway proxing requests to a backend via VPC Link and Direct Connect to on-premises resources using private ip addresses.",
"Parameters": {
"VPCId": {
"Description": "VPC Id for API Gateway VPC Link",
"Type": "AWS::EC2::VPC::Id"
},
"NLBSubnetList": {
"Type": "List<AWS::EC2::Subnet::Id>",
"Description": "Subnet Ids for provisioning load balancer supporting the VPC Link"
},
"BackendBaseEndpoint": {
"Description": "The backend service base url including protocol. e.g.: https://<url>",
"Type": "String",
"Default": "https://mybackend.dev.mycompany.com"
},
"TargetIpAddresses": {
"Type": "CommaDelimitedList",
"Description": "Comma separated list of NLB target ip addresses. Specify two entries.",
"Default": "10.78.80.1, 10.79.80.1"
}
},
"Resources": {
"API": {
"Type": "AWS::ApiGateway::RestApi",
"Properties": {
"Name": "Test Api",
"Description": "Test Api using VPC_LINK and AWS_IAM authorisation",
"Body": {
"swagger": "2.0",
"info": {
"title": "Test Api"
},
"schemes": [
"https"
],
"paths": {
"/{proxy+}": {
"x-amazon-apigateway-any-method": {
"parameters": [
{
"name": "proxy",
"in": "path",
"required": true,
"type": "string"
}
],
"responses": {},
"security": [
{
"sigv4": []
}
],
"x-amazon-apigateway-integration": {
"responses": {
"default": {
"statusCode": "200"
}
},
"requestParameters": {
"integration.request.path.proxy": "method.request.path.proxy"
},
"uri": {
"Fn::Join": [
"",
[
{
"Ref": "BackendBaseEndpoint"
},
"/{proxy}"
]
]
},
"passthroughBehavior": "when_no_match",
"connectionType": "VPC_LINK",
"connectionId": "${stageVariables.vpcLinkId}",
"httpMethod": "GET",
"type": "http_proxy"
}
}
}
},
"securityDefinitions": {
"sigv4": {
"type": "apiKey",
"name": "Authorization",
"in": "header",
"x-amazon-apigateway-authtype": "awsSigv4"
}
}
},
"EndpointConfiguration": {
"Types": [
"REGIONAL"
]
}
},
"DependsOn": "VPCLink"
},
"APIStage": {
"Type": "AWS::ApiGateway::Stage",
"Properties": {
"StageName": "dev",
"Description": "dev Stage",
"RestApiId": {
"Ref": "API"
},
"DeploymentId": {
"Ref": "APIDeployment"
},
"MethodSettings": [
{
"ResourcePath": "/*",
"HttpMethod": "GET",
"MetricsEnabled": "true",
"DataTraceEnabled": "true",
"LoggingLevel": "ERROR"
}
],
"Variables": {
"vpcLinkId": {
"Ref": "VPCLink"
}
}
}
},
"APIDeployment": {
"Type": "AWS::ApiGateway::Deployment",
"Properties": {
"RestApiId": {
"Ref": "API"
},
"Description": "Test Deployment"
}
},
"VPCLink": {
"Type": "AWS::ApiGateway::VpcLink",
"Properties": {
"Description": "Vpc link to GIS platform",
"Name": "VPCLink",
"TargetArns": [
{
"Ref": "NLB"
}
]
}
},
"NLBTargetGroup": {
"Type": "AWS::ElasticLoadBalancingV2::TargetGroup",
"Properties": {
"Name": "NLBTargetGroup",
"Port": 443,
"Protocol": "TCP",
"TargetGroupAttributes": [
{
"Key": "deregistration_delay.timeout_seconds",
"Value": "20"
}
],
"TargetType": "ip",
"Targets": [
{
"Id": { "Fn::Select" : [ "0", {"Ref": "TargetIpAddresses"} ] },
"Port": 443,
"AvailabilityZone": "all"
},
{
"Id": { "Fn::Select" : [ "1", {"Ref": "TargetIpAddresses"} ] },
"Port": 443,
"AvailabilityZone": "all"
}
],
"VpcId": {
"Ref": "VPCId"
},
"Tags": [
{
"Key": "Project",
"Value": "API and VPC Link Test"
}
]
}
},
"NLB": {
"Type": "AWS::ElasticLoadBalancingV2::LoadBalancer",
"Properties": {
"Type": "network",
"Scheme": "internal",
"Subnets": {
"Ref": "NLBSubnetList"
}
}
},
"NLBListener": {
"Type": "AWS::ElasticLoadBalancingV2::Listener",
"Properties": {
"DefaultActions": [
{
"Type": "forward",
"TargetGroupArn": {
"Ref": "NLBTargetGroup"
}
}
],
"LoadBalancerArn": {
"Ref": "NLB"
},
"Port": "443",
"Protocol": "TCP"
}
}
},
"Outputs": {
"NetworkLoadBalancerArn": {
"Value": {
"Ref": "NLB"
},
"Description": "The network elastic load balancer Amazon resource name"
}
}
}
Unfortunately, CloudFormation does not support API Gateway's VPC Links at this time.
You can create a Lambda-backed custom resource to manage the VPC Link using CloudFormation.
Here is a Lambda function (using python3.6) for a CloudFormation custom resource I use to manage VPC links:
import copy
import json
import re
import time
import boto3
from botocore.vendored import requests
SUCCESS = "SUCCESS"
FAILED = "FAILED"
FAILED_PHYSICAL_RESOURCE_ID = "FAILED_PHYSICAL_RESOURCE_ID"
class AddOrUpdateTargetArnsError(Exception):
def __init__(self):
self.message = 'Target arns are not allowed to be changed/added.'
super().__init__(self.message)
class FailedVpcLinkError(Exception):
def __init__(self, status_message):
self.message = f'statusMessages: {status_message}'
super().__init__(self.message)
def lambda_handler(event, context):
try:
_lambda_handler(event, context)
except Exception as e:
send(
event,
context,
response_status=FAILED,
# Do not fail on delete to avoid rollback failure
response_data=None,
physical_resource_id=event.get('PhysicalResourceId', FAILED_PHYSICAL_RESOURCE_ID),
reason=e
)
# Must raise, otherwise the Lambda will be marked as successful, and the exception
# will not be logged to CloudWatch logs.
raise
def _lambda_handler(event, context):
print("Received event: ")
print(event)
resource_type = event['ResourceType']
if resource_type != "Custom::ApiGatewayVpcLink":
raise ValueError(f'Unexpected resource_type: {resource_type}')
request_type = event['RequestType']
wait_for = event.get('WaitFor', None)
resource_properties = event['ResourceProperties']
physical_resource_id = event.get('PhysicalResourceId', None)
apigateway = boto3.client('apigateway')
if wait_for:
handle_self_invocation(
wait_for=wait_for,
physical_resource_id=physical_resource_id,
event=event,
context=context,
)
else:
if request_type == 'Create':
kwargs = dict(
name=resource_properties['Name'],
targetArns=resource_properties['TargetArns'],
description=resource_properties.get('Description', None)
)
response = apigateway.create_vpc_link(**kwargs)
event_copy = copy.deepcopy(event)
event_copy['WaitFor'] = 'CreateComplete'
event_copy['PhysicalResourceId'] = response['id']
print('Reinvoking function because VPC link creation is asynchronous')
relaunch_lambda(event=event_copy, context=context)
return
elif request_type == 'Update':
old_resource_properties = event['OldResourceProperties']
current_target_arns = apigateway.get_vpc_link(
vpcLinkId=physical_resource_id,
)['targetArns']
# must compare current_target_arns to resource_properties['TargetArns'], to protect against
# UPDATE created by UPDATE_FAILED. In that particular case, current_target_arns will be the same as
# resource_properties['TargetArns'] but different than old_resource_properties['TargetArns']
if set(current_target_arns) != set(resource_properties['TargetArns']) and \
set(resource_properties['TargetArns']) != set(old_resource_properties['TargetArns']):
raise AddOrUpdateTargetArnsError()
patch_operations = []
if resource_properties['Name'] != old_resource_properties['Name']:
patch_operations.append(dict(
op='replace',
path='/name',
value=resource_properties['Name'],
))
if 'Description' in resource_properties and 'Description' in old_resource_properties:
if resource_properties['Description'] != old_resource_properties['Description']:
patch_operations.append(dict(
op='replace',
path='/description',
value=resource_properties['Description'],
))
elif 'Description' in resource_properties and 'Description' not in old_resource_properties:
patch_operations.append(dict(
op='replace',
path='/description',
value=resource_properties['Description'],
))
elif 'Description' not in resource_properties and 'Description' in old_resource_properties:
patch_operations.append(dict(
op='replace',
path='/description',
value=None,
))
apigateway.update_vpc_link(
vpcLinkId=physical_resource_id,
patchOperations=patch_operations,
)
elif request_type == 'Delete':
delete = True
if physical_resource_id == FAILED_PHYSICAL_RESOURCE_ID:
delete = False
print('Custom resource was never properly created, skipping deletion.')
stack_name = re.match("arn:aws:cloudformation:.+:stack/(?P<stack_name>.+)/.+", event['StackId']).group('stack_name')
if stack_name in physical_resource_id:
delete = False
print(f'Skipping deletion, because VPC link was not created properly. Heuristic: stack name ({stack_name}) found in physical resource ID ({physical_resource_id})')
logical_resource_id = event['LogicalResourceId']
if logical_resource_id in physical_resource_id:
delete = False
print(f'Skipping deletion, because VPC link was not created properly. Heuristic: logical resource ID ({logical_resource_id}) found in physical resource ID ({physical_resource_id})')
if delete:
apigateway.delete_vpc_link(
vpcLinkId=physical_resource_id
)
event_copy = copy.deepcopy(event)
event_copy['WaitFor'] = 'DeleteComplete'
print('Reinvoking function because VPC link deletion is asynchronous')
relaunch_lambda(event=event_copy, context=context)
return
else:
print(f'Request type is {request_type}, doing nothing.')
send(
event,
context,
response_status=SUCCESS,
response_data=None,
physical_resource_id=physical_resource_id,
)
def handle_self_invocation(wait_for, physical_resource_id, event, context):
apigateway = boto3.client('apigateway')
if wait_for == 'CreateComplete':
print('Waiting for creation of VPC link: {vpc_link_id}'.format(vpc_link_id=physical_resource_id))
response = apigateway.get_vpc_link(
vpcLinkId=physical_resource_id,
)
status = response['status']
print('Status of VPC link {vpc_link_id} is {status}'.format(vpc_link_id=physical_resource_id, status=status))
if status == 'AVAILABLE':
send(
event,
context,
response_status=SUCCESS,
response_data=None,
physical_resource_id=physical_resource_id,
)
elif status == 'FAILED':
raise FailedVpcLinkError(status_message=response['statusMessage'])
elif status == 'PENDING':
# Sleeping here to avoid polluting CloudWatch Logs by reinvoking the Lambda too quickly
time.sleep(30)
relaunch_lambda(event, context)
else:
print('Unexpected status, doing nothing')
elif wait_for == 'DeleteComplete':
print('Waiting for deletion of VPC link: {vpc_link_id}'.format(vpc_link_id=physical_resource_id))
try:
response = apigateway.get_vpc_link(
vpcLinkId=physical_resource_id,
)
except apigateway.exceptions.NotFoundException:
print('VPC link {vpc_link_id} deleted successfully'.format(vpc_link_id=physical_resource_id))
send(
event,
context,
response_status=SUCCESS,
response_data=None,
physical_resource_id=physical_resource_id,
)
else:
status = response['status']
assert status == 'DELETING', f'status is {status}'
# Sleeping here to avoid polluting CloudWatch Logs by reinvoking the Lambda too quickly
time.sleep(10)
relaunch_lambda(event, context)
else:
raise ValueError(f'Unexpected WaitFor: {wait_for}')
def relaunch_lambda(event, context):
boto3.client("lambda").invoke(
FunctionName=context.function_name,
InvocationType='Event',
Payload=json.dumps(event),
)
def send(event, context, response_status, response_data, physical_resource_id, reason=None):
response_url = event['ResponseURL']
response_body = {
'Status': response_status,
'Reason': str(reason) if reason else 'See the details in CloudWatch Log Stream: ' + context.log_stream_name,
'PhysicalResourceId': physical_resource_id,
'StackId': event['StackId'],
'RequestId': event['RequestId'],
'LogicalResourceId': event['LogicalResourceId'],
'Data': response_data,
}
json_response_body = json.dumps(response_body)
headers = {
'content-type': '',
'content-length': str(len(json_response_body))
}
try:
requests.put(
response_url,
data=json_response_body,
headers=headers
)
except Exception as e:
print("send(..) failed executing requests.put(..): " + str(e))

Facebook Messenger Bot - Invalid URL button fields for List Template

I have been working to send a list to user containing some data. I am following facebook's doc to setup my request payload. However, I am getting the following error:
{'error': {
'message': '(#100) Invalid URL button fields provided. Please check documentation for details.',
'type': 'OAuthException',
'code': 100,
'error_subcode': 2018125, 'fbtrace_id': 'GZFFcM+j5e/'}}
Here is my JSON Payload:
{'recipient': {'id': 'MY_MESSENGER_ID'},
'message':
{'attachment':
{'type': 'template',
'payload':
{'template_type': 'list',
'top_element_style': 'compact',
'elements':
[{'title': 'Hello 1', 'subtitle': 'Subtitle 1',
'buttons':
[{'title': 'View', 'type': 'web_url',
'url': 'https://www.medium.com/',
'messenger_extensions': 'false',
'webview_height_ratio': 'full',
'fallback_url': 'https://www.medium.com/'}],
'default_action':
{'title': 'View', 'type': 'web_url',
'url': 'https://www.medium.com/',
'messenger_extensions': 'false',
'webview_height_ratio': 'full',
'fallback_url': 'https://www.medium.com/'}},
{'title': 'Hello 2', 'subtitle': 'Subtitle 2',
'image_url': 'https://cdn-images-1.medium.com/1*Vkf6A8Mb0wBoL3Fw1u0paA.jpeg',
'buttons':
[{'title': 'View', 'type': 'web_url',
'url': 'https://www.medium.com/',
'messenger_extensions': 'false',
'webview_height_ratio': 'full',
'fallback_url': 'https://www.medium.com/'}],
'default_action':
{'title': 'View', 'type': 'web_url',
'url': 'https://www.medium.com/',
'messenger_extensions': 'false',
'webview_height_ratio': 'full',
'fallback_url': 'https://www.medium.com/'}}]}}}}
I have checked, re-checked it multiple times. PLUS, I have sent the facebook's example json from the doc but I have got the same reply. Please take a look and let me know where I am stuck!
This is my end url:
"https://graph.facebook.com/v2.6/me/messages?access_token="
Thanks in advance!
Your request has two issues:
For default_action, you can set fallback_url only if messenger_extensions:true
default_action cannot have a title prop.
Try this:
{
"recipient": {
"id": "{{PSID}}"
},
"message": {
"attachment": {
"type": "template",
"payload": {
"template_type": "list",
"top_element_style": "compact",
"elements": [{
"title": "Hello 1",
"subtitle": "Subtitle 1",
"buttons": [{
"title": "View",
"type": "web_url",
"url": "https://www.medium.com/",
"messenger_extensions": "false",
"webview_height_ratio": "full"
}],
"default_action": {
"type": "web_url",
"url": "https://www.medium.com/",
"messenger_extensions": "false",
"webview_height_ratio": "full"
}
},
{
"title": "Hello 2",
"subtitle": "Subtitle 2",
"image_url": "https://cdn-images-1.medium.com/1*Vkf6A8Mb0wBoL3Fw1u0paA.jpeg",
"buttons": [{
"title": "View",
"type": "web_url",
"url": "https://www.medium.com/",
"messenger_extensions": "false",
"webview_height_ratio": "full"
}],
"default_action": {
"type": "web_url",
"url": "https://www.medium.com/",
"messenger_extensions": "false",
"webview_height_ratio": "full"
}
}
]
}
}
}
}