Issue getting AWS Rekognition to read images when placed in S3 bucket - amazon-web-services

Have gotten really stuck trying to get AWS Rekognition to label images I upload to S3. I am still learning how to get the roles and acceess right (I have added 'all' Rekognition services as inline policies to all the Roles I have in IAM for this app I'm building to get some hands-on experience with AWS.
Below is all the code (apologies for the messy code - still learning).
Further below that is the output from the tests I'm running in Lambda.
Could someone please help to suggest what I am doing wrong and how I could make some adjustments to get Rekognition to be able to scan the image and use list out what is in the image (eg; person, tree, car, etc).
Thanks in advance!!!
'use strict';
let aws = require('aws-sdk');
let s3 = new aws.S3({ apiVersion: '2006-03-01' });
let rekognition = new aws.Rekognition();
s3.bucket = 'arn:aws:s3:::XXXXXXX/uploads';
exports.handler = function(event, context) {
// Get the object from the event and show its content type
const eventn = event.Records[0].eventName;
const filesize = event.Records[0].s3.object.size;
const bucket = event.Records[0].s3.bucket.name;
const key = decodeURIComponent(event.Records[0].s3.object.key.replace(/\+/g, ' '));
var eventText = JSON.stringify(event, null, 2);
console.log('print this out -->' + eventText);
console.log('bucket name --> ' + s3.bucket);
var filesizemod = "-";
if (typeof filesize == "number") {
if (filesize>=1000000000) {filesizemod=(filesize/1000000000).toFixed(2)+' GB';}
else if (filesize>=1000000) {filesizemod=(filesize/1000000).toFixed(2)+' MB';}
else if (filesize>=1000) {filesizemod=(filesize/1000).toFixed(2)+' KB';}
else {filesizemod=filesize+' bytes';}
} else if (typeof filesize !== 'undefined' && filesize) {
filesizemod = filesize;
}
var Rekparams = {
Image: {
S3Object: {Bucket: s3.bucket, Name: key }},
MaxLabels: 10,
MinConfidence: 0.0
};
console.log("s3object is = " + JSON.stringify(Rekparams));
var request = rekognition.detectLabels(Rekparams, function(err, data) {
if(err){
var errorMessage = 'Error in [rekognition-image-assessment].\r' +
' Function input ['+JSON.stringify(event, null, 2)+'].\r' +
' Error ['+err+'].';
// Log error
console.log(errorMessage, err.stack);
return(errorMessage, null);
}
else{
console.log("i get to here!!!! ****")
console.log('Retrieved Labels ['+JSON.stringify(data)+']');
console.log("i have got all the labels i need!!")
// Return labels as a JavaScript object that can be passed into the
// subsequent lambda function.
return(null, Object.assign(data, event));
}
});
console.log("not in label getting function!!")
// Call detectLabels
//var request = rekognition.detectLabels(Rekparams);
//var request1 = rekognition.detectLabels(bucket, key);
//var labels = JSON.stringify(request1);
//console.log('Retrieved Labels ['+JSON.stringify(data)+']');
//DetectLabelsRequest request = new DetectLabelsRequest()
// .withImage(new Image().withS3Object(new S3Object().withName(key).withBucket(s3.bucket))).withMaxLabels(10).withMinConfidence(75F);
var subjecttext="Myfirstapp -> New image uploaded";
var eventText2 = "\n\nFile: " + key + "\nSize: "
+ filesizemod
+ "\n\nPlease see my S3 bucket for images."
+ "\nThis is what is in the image:"
+ request;
var sns = new aws.SNS();
var params = {
Message: eventText2,
Subject: subjecttext,
TopicArn: "arn:aws:sns:XXXXXX"
};
sns.publish(params, context.done);
};
Test output from Lambda. Also note my S3 bucket is in the same region as my Lambda function:
Response:
{
"ResponseMetadata": {
"RequestId": "a08afc8a-d2a4-5a8a-a435-af4503295913"
},
"MessageId": "5f1c516b-c52f-5aa1-8af3-02a414a2c938"
}
Request ID:
"1b17d85f-8e77-11e8-a89d-e723ca75e0cf"
Function Logs:
"1970-01-01T00:00:00.000Z",
"requestParameters": {
"sourceIPAddress": "127.0.0.1"
},
"s3": {
"configurationId": "testConfigRule",
"object": {
"eTag": "0123456789abcdef0123456789abcdef",
"key": "HappyFace.jpg",
"sequencer": "0A1B2C3D4E5F678901",
"size": 1024
},
"bucket": {
"ownerIdentity": {
"principalId": "EXAMPLE"
},
"name": "sourcebucket",
"arn": "arn:aws:s3:::mybucket"
},
"s3SchemaVersion": "1.0"
},
"responseElements": {
"x-amz-id-2": "EXAMPLE123/5678abcdefghijklambdaisawesome/mnopqrstuvwxyzABCDEFGH",
"x-amz-request-id": "EXAMPLE123456789"
},
"awsRegion": "us-east-1",
"eventName": "ObjectCreated:Put",
"userIdentity": {
"principalId": "EXAMPLE"
},
"eventSource": "aws:s3"
}
]
}
2018-07-23T12:51:24.864Z 1b17d85f-8e77-11e8-a89d-e723ca75e0cf bucket name --> arn:aws:s3:::XXXXXXXX/uploads
2018-07-23T12:51:24.865Z 1b17d85f-8e77-11e8-a89d-e723ca75e0cf s3object is = {"Image":{"S3Object":{"Bucket":"arn:aws:s3:::XXXXXXX/uploads","Name":"HappyFace.jpg"}},"MaxLabels":10,"MinConfidence":0}
2018-07-23T12:51:25.427Z 1b17d85f-8e77-11e8-a89d-e723ca75e0cf not in label getting function!!
2018-07-23T12:51:25.925Z 1b17d85f-8e77-11e8-a89d-e723ca75e0cf Error in [rekognition-image-assessment].
Function input [{
"Records": [
{
"eventVersion": "2.0",
"eventTime": "1970-01-01T00:00:00.000Z",
"requestParameters": {
"sourceIPAddress": "127.0.0.1"
},
"s3": {
"configurationId": "testConfigRule",
"object": {
"eTag": "0123456789abcdef0123456789abcdef",
"key": "HappyFace.jpg",
"sequencer": "0A1B2C3D4E5F678901",
"size": 1024
},
"bucket": {
"ownerIdentity": {
"principalId": "EXAMPLE"
},
"name": "sourcebucket",
"arn": "arn:aws:s3:::mybucket"
},
"s3SchemaVersion": "1.0"
},
"responseElements": {
"x-amz-id-2": "EXAMPLE123/5678abcdefghijklambdaisawesome/mnopqrstuvwxyzABCDEFGH",
"x-amz-request-id": "EXAMPLE123456789"
},
"awsRegion": "us-east-1",
"eventName": "ObjectCreated:Put",
"userIdentity": {
"principalId": "EXAMPLE"
},
"eventSource": "aws:s3"
}
]
}].
Error [ValidationException: 1 validation error detected: Value 'arn:aws:s3:::XXXXXX/uploads' at 'image.s3Object.bucket' failed to satisfy constraint: Member must satisfy regular expression pattern: [0-9A-Za-z\.\-_]*]. ValidationException: 1 validation error detected: Value 'arn:aws:s3:::XXXXXXXX/uploads' at 'image.s3Object.bucket' failed to satisfy constraint: Member must satisfy regular expression pattern: [0-9A-Za-z\.\-_]*
at Request.extractError (/var/runtime/node_modules/aws-sdk/lib/protocol/json.js:48:27)
at Request.callListeners (/var/runtime/node_modules/aws-sdk/lib/sequential_executor.js:105:20)
at Request.emit (/var/runtime/node_modules/aws-sdk/lib/sequential_executor.js:77:10)
at Request.emit (/var/runtime/node_modules/aws-sdk/lib/request.js:683:14)
at Request.transition (/var/runtime/node_modules/aws-sdk/lib/request.js:22:10)
at AcceptorStateMachine.runTo (/var/runtime/node_modules/aws-sdk/lib/state_machine.js:14:12)
at /var/runtime/node_modules/aws-sdk/lib/state_machine.js:26:10
at Request.<anonymous> (/var/runtime/node_modules/aws-sdk/lib/request.js:38:9)
at Request.<anonymous> (/var/runtime/node_modules/aws-sdk/lib/request.js:685:12)
at Request.callListeners (/var/runtime/node_modules/aws-sdk/lib/sequential_executor.js:115:18)
END RequestId: 1b17d85f-8e77-11e8-a89d-e723ca75e0cf
REPORT RequestId: 1b17d85f-8e77-11e8-a89d-e723ca75e0cf Duration: 1309.41 ms Billed Duration: 1400 ms Memory Size: 128 MB Max Memory Used: 36 MB

Bucket is not supposed to be an ARN, but the name of the bucket.

Related

AWS Lambda Test S3 Event Notification is Null

I have the following Kotlin code:
override fun execute(input: APIGatewayProxyRequestEvent): APIGatewayProxyResponseEvent {
val response = APIGatewayProxyResponseEvent()
val body = input.body
if(body != null) {
val json = JSONObject(body)
val s3 = json.optJSONArray("Records").getJSONObject(0).getJSONObject("s3")
val bucketName = s3.getJSONObject("bucket").getString("name")
try {
val jsonResponse = objectMapper.writeValueAsString(mapOf("message" to bucketName))
response.statusCode = 200
response.body = jsonResponse
} catch (e: JsonProcessingException) {
response.statusCode = 500
}
}
return response
}
I want to basically get the function to be triggered on a new S3 put and just get the bucket name. When I try locally to pass a APIGatewayProxyRequestEvent with the following body:
{
"Records": [
{
"eventVersion": "2.0",
"eventSource": "aws:s3",
"awsRegion": "us-east-1",
"eventTime": "1970-01-01T00:00:00.000Z",
"eventName": "ObjectCreated:Put",
"userIdentity": {
"principalId": "EXAMPLE"
},
"requestParameters": {
"sourceIPAddress": "127.0.0.1"
},
"responseElements": {
"x-amz-request-id": "EXAMPLE123456789",
"x-amz-id-2": "EXAMPLE123/5678abcdefghijklambdaisawesome/mnopqrstuvwxyzABCDEFGH"
},
"s3": {
"s3SchemaVersion": "1.0",
"configurationId": "testConfigRule",
"bucket": {
"name": "example-bucket",
"ownerIdentity": {
"principalId": "EXAMPLE"
},
"arn": "arn:aws:s3:::example-bucket"
},
"object": {
"key": "test%2Fkey",
"size": 1024,
"eTag": "0123456789abcdef0123456789abcdef",
"sequencer": "0A1B2C3D4E5F678901"
}
}
}
]
}
The kotlin code works as expected. When I deploy this on AWS lambda, and I either provide the exact same body in a test event or actually uploading an object on S3 to trigger the function, the input.body is null. I don't understand why.

AWS - How to get Cloudfront metrics using Javascript SDK

I am trying to get Cloudfront metrics using the JS SDK for AWS but I am not getting anything back.
I am not sure what I am doing wrong but I have isolated this NOT to be with:
Permissions (gave it a full admin account for testing purposes)
Region. North Virginia (for CloudFront)
Basic params like: StartDate, EndDate, DistributionID
My code is as below (simplified):
var AWS = require('aws-sdk');
AWS.config.update({
accessKeyId: "accessKeyId",
secretAccessKey: "secretAccessKey",
apiVersion: '2017-10-25',
region: 'us-east-1'
});
var cloudwatchmetrics = new AWS.CloudWatch();
var cloudFrontParams = {
"StartTime": 1518867432,
"EndTime": 1518868032,
"MetricDataQueries": [
{
"Id": "m1",
"MetricStat": {
"Metric": {
"Dimensions": [
{
"Name": "DistributionId",
"Value": "ValueOfDistribution"
},
{
"Name": "Region",
"Value": "Global"
}
],
"MetricName": "Requests",
"Namespace": "AWS/CloudFront"
},
"Stat": "Sum",
"Period": 3600
},
"ReturnData": true
}
]
};
cloudwatchmetrics.getMetricData(cloudFrontParams, function (err, data) {
if (err) {
console.log(err);
}else{
console.log(JSON.stringify(data));
}
});
This is what I get back (it's not erroring out):
{
"ResponseMetadata":{
"RequestId":"xxxxxxxxxxxxxxxxxxxxx"
},
"MetricDataResults":[
{
"Id":"m1",
"Label":"Requests",
"Timestamps":[
],
"Values":[
],
"StatusCode":"Complete",
"Messages":[
]
}
]
}
The issue was with the StartTime, it was too far back in time. What I have in the post translates to: Saturday, February 17, 2018
Hopefully this helps someone someday.

Unable to send GET request with AWS Lambda & DynamoDB Rest API using serverless

I am creating an API to make GET and POST request to a table in DynamoDB.
I deployed it using serverless and received the endpoints for each API type.
But when testing it out with Postman I get the following error:
Bad request. We can't connect to the server for this app or website at this time. There might be too much traffic or a configuration error. Try again later, or contact the app or website owner.
If you provide content to customers through CloudFront, you can find steps to troubleshoot and help prevent this error by reviewing the CloudFront documentation.
Code for creating the data in the table:
const postsTable = process.env.POSTS_TABLE;
// Create a response
function response(statusCode, message) {
return {
statusCode: statusCode,
body: JSON.stringify(message)
};
}
// Create a post
module.exports.createPost = (event, context, callback) => {
const reqBody = JSON.parse(event.body);
if (
!reqBody.title ||
reqBody.title.trim() === "" ||
!reqBody.body ||
reqBody.body.trim() === ""
) {
return callback(
null,
response(400, {
error:
"Post must have a title and body and they must not be empty"
})
);
}
const post = {
id: uuidv4(),
createdAt: new Date().toISOString(),
userId: 1,
title: reqBody.title,
body: reqBody.body
};
return db
.put({
TableName: postsTable,
Item: post
})
.promise()
.then(() => {
callback(null, response(201, post));
})
.catch(err => response(null, response(err.statusCode, err)));
};
I managed to do it but did not use Serverless.
I set up Lambda functions to POST and GET the data from a url.
I think the issue previously was to do with the policies. This time when making the Lambda functions I set it as the following:
I clicked on "Create a new role from AWS policy templates" while creating an execution role for a new function, then selected "Simple microservice permissions" for Policy templates. This added Basic execution role policy and below DynamoDB permissions to the role for all the tables in the same region as the function :
"Action": [
"dynamodb:DeleteItem",
"dynamodb:GetItem",
"dynamodb:PutItem",
"dynamodb:Scan",
"dynamodb:UpdateItem"
]
Lambda function for POST request
exports.handler = async (event, context) => {
const ddb = new AWS.DynamoDB({ apiVersion: "2012-10-08" });
const documentClient = new AWS.DynamoDB.DocumentClient({
region: "ap-southeast-1"
});
let responseBody = "";
let statusCode = 0;
const {
deviceId,
batteryLevel,
eventId,
id,
location,
tags,
time
} = JSON.parse(event.body);
const params = {
TableName: "dashboard",
Item: {
batteryLevel: batteryLevel,
deviceId: deviceId,
eventId: eventId,
location: location,
tags: tags,
time: time
}
};
try {
const data = await documentClient.put(params).promise();
responseBody = JSON.stringify(data);
statusCode = 201;
} catch (err) {
responseBody = "Unable to POST data";
statusCode = 403;
}
const response = {
statusCode: statusCode,
headers: {
myHeader: "test"
},
body: responseBody
};
return response;
};
Other issues as well were with the method execution of the API I needed to set a custom model for the Request Body to match my data:
{
"$schema": "http://json-schema.org/draft-04/schema#",
"title": "DashboardInputModel",
"type": "object",
"properties":
{
"batteryLevel": {"type": "string"},
"deviceId": {"type": "string"},
"eventId": {"type": "string"},
"id": {"type": "number"},
"location": {
"type": "object",
"properties":{
"accuracy": {"type": "number"},
"latitude": {"type": "number"},
"longitude": {"type": "number"}
}
},
"tags": {
"type": "array",
"items": {
"type": "object",
"properties": {
"accelX":{"type": "number"},
"accelY": {"type": "number"},
"accelZ": {"type": "number"},
"createDate": {"type": "string"},
"dataFormat":{"type": "number"},
"defaultBackground": {"type": "number"},
"favorite": {"type": "boolean"},
"humidity": {"type": "number"},
"id": {"type": "string"},
"measurementSequenceNumber": {"type": "number"},
"movementCounter": {"type": "number"},
"name": {"type": "string"},
"pressure": {"type": "number"},
"rssi": {"type": "number"},
"temperature": {"type": "number"},
"txPower":{"type": "number"},
"updateAt": {"type": "string"},
"voltage": {"type": "number"}
}
}
},
"time": {"type": "string"}
}
}
For each action I also enabled CORS and replaced the existing CORS headers.
These two videos explains the entire process much better than the documentation and I hope it helps.
Part 1
Part 2
By bad request do you mean Status Code 400? It could simply be that you are not correctly calling your API.
If you are getting a 403 then you need to pass through that you are authorised to access the resource you are trying to get. You can see how to do this through the AWS docs.
This page includes a link to an example.
List of error codes.

S3 - Getting metadata from Post S3 Upload lambda function

I'm generating a presigned URL using s3.getSignedUrl('putObject', params) and for my params
var params = {
Bucket: bucketName,
Key: photoId + "-" + photoNumber + "-of-" + numberImages + ".jpeg",
Expires: signedUrlExpireSeconds,
ContentType: contentType,
Metadata : { testkey1 : "hello" }
};
I'm trying to receive the Metadata in my S3 successful upload lambda function, however it's not appearing. Anyone know why? The upload is successful and for my printed logs, I'm receiving everything but the metadata tag in the event:
console.log(event);
"Records": [
{
"eventVersion": "2.1",
"eventSource": "aws:s3",
"awsRegion": "us-east-1",
"eventTime": "2020-01-15T06:51:57.171Z",
"eventName": "ObjectCreated:Put",
"userIdentity": {
"principalId":
},
"requestParameters": {
"sourceIPAddress":
},
"responseElements": {
"x-amz-request-id": "4C32689CE5B70A82",
"x-amz-id-2": "AS0f97RHlLW2DF6tVfRwbTeoEpk2bEne/0LrWqHpLJRHY5GMBjy/NQOHqYAMhd2JjiiUcuw0nUTMJS8pDAch1Abc5xzzWVMv"
},
"s3": {
"s3SchemaVersion": "1.0",
"configurationId": "9a9a755e-e809-4dbf-abf8-3450aaa208ed",
"bucket": {
"name": ,
"ownerIdentity": {
"principalId": "A3SZPXLS03IWBG"
},
"arn":
},
"object": {
"key": "BcGMYe-1-of-1.jpeg",
"size": 19371,
"eTag": "45c719f2f6b5349cc360db9a13d0cee4",
"sequencer": "005E1EB6921E08F7E4"
}
}
}
]
This is s3 event message structure. the message structure originally doesn't contain metadata.
You need to get metadata in the lambda function in person.
You would get metadata through s3 head-object command with the bucket-name and object-key in the event received.
{
"Records":[
{
"eventVersion":"2.2",
"eventSource":"aws:s3",
"awsRegion":"us-west-2",
"eventTime":The time, in ISO-8601 format, for example, 1970-01-01T00:00:00.000Z, when Amazon S3 finished processing the request,
"eventName":"event-type",
"userIdentity":{
"principalId":"Amazon-customer-ID-of-the-user-who-caused-the-event"
},
"requestParameters":{
"sourceIPAddress":"ip-address-where-request-came-from"
},
"responseElements":{
"x-amz-request-id":"Amazon S3 generated request ID",
"x-amz-id-2":"Amazon S3 host that processed the request"
},
"s3":{
"s3SchemaVersion":"1.0",
"configurationId":"ID found in the bucket notification configuration",
"bucket":{
"name":"bucket-name",
"ownerIdentity":{
"principalId":"Amazon-customer-ID-of-the-bucket-owner"
},
"arn":"bucket-ARN"
},
"object":{
"key":"object-key",
"size":object-size,
"eTag":"object eTag",
"versionId":"object version if bucket is versioning-enabled, otherwise null",
"sequencer": "a string representation of a hexadecimal value used to determine event sequence,
only used with PUTs and DELETEs"
}
},
"glacierEventData": {
"restoreEventData": {
"lifecycleRestorationExpiryTime": "The time, in ISO-8601 format, for example, 1970-01-01T00:00:00.000Z, of Restore Expiry",
"lifecycleRestoreStorageClass": "Source storage class for restore"
}
}
}
]
}

CloudFormation Custom Resource responseKey

I have got lambda backed Custom Stack in CloudFormation , So I need the fetch function output and put it to the AWS Console, how I can handle this problem?
My Stack is shown as below ;
"CreateExistingVPC": {
"Type": "Custom::CreateExistingVPC",
"Properties": {
"ServiceToken": { "Fn::If": ["LambdaAvailable",{ "Fn::GetAtt": [ "CustomLogic", "Outputs.LambdaAttachHostedZoneArn" ] }, { "Ref": "AWS::NoValue" } ] },
"Region": { "Ref": "AWS::Region" },
"HostedZoneId": { "Ref": "InternalHostedZone" },
"VpcId": { "Fn::GetAtt": [ "VPC", "Outputs.VPC" ] }
}
}
},
"Outputs": {
"Route53VPC": {
"Description": "ExistingRoute53VPCStatus",
"Value": { "Fn::GetAtt": [ "CreateExistingVPC", "{ ??????? }" ] }
}
}
In actually, I have found some answers but 'response key' not worked in my case , how I can find response key ??
AWS Cloudformation, Output value from Custom Resource
You need to use the variable you are using to return your response. e.g. (nodeJs)
module.exports.createPoolList = (event, context, callback) => {
if (event.RequestType == 'Create') {
let array = event.ResourceProperties.OpsPoolArnList.split(",");
array.push(event.ResourceProperties.UserPool);
let response = {
'list': array.join(),
};
sendresponse(event, "SUCCESS", response, "");
}
if (event.RequestType == 'Delete') {
sendresponse(event, "SUCCESS", null, "");
}
callback(null, "");
};
Here list is the variable which contains my output & returning in my response. The built payload looks like
let payload = {
'StackId': event.StackId,
'Status' : responsestatus,
'Reason' : reason,
'RequestId': event.RequestId,
'LogicalResourceId': event.LogicalResourceId,
'PhysicalResourceId': event.LogicalResourceId + 'qwerty',
'Data': response
};
And I refer to this in my script as
!GetAtt <ResourceName>.list
Hope it helps.