AWS lambda documentClient.put error when trying to put data - amazon-web-services

I am facing this timeout error in aws editor(nodejs environment). I understand that there maybe be timeout set for 3 seconds, still this small code should run easily within 3 secs. Also, I have used other methods to put data in my dynamoDB like batchWrite and it works fine!
Response:
{
"errorMessage": "2021-02-08T15:58:57.631Z 66890dfe-6f3a-42ae-b689-510bb8027a9d Task timed out after 3.03 seconds"
}
My code:
const AWS = require('aws-sdk');
const docClient = new AWS.DynamoDB.DocumentClient(
{
region:'ap-south-1'
});
exports.handle = function(event , context ,callback) {
var params = {
TableName: 'guestbook',
Item:{
"date":Date.now(),
"message":"I love your website"
}
};
docClient.put(params, function(err,data){
if(err){
callback(err,null);
}
else{
callback(null,data);
}
});
};
I am trying to put data into my dynamoDB,.
My role policy is
{
"Version": "2012-10-17",
"Statement": [
{
"Sid": "VisualEditor0",
"Effect": "Allow",
"Action": "dynamodb:*",
"Resource": "*"
}
]
}

Related

Synthetics Canary keeps failing with 403 error

0
I cannot get canary to hit api gateway endpoint. I keep getting an error below. I can hit the api using postman without authentication key so not sure why I can't do the same using synthetic canary. The API is within the same vpc as the canary also if this helps. Please can anyone help me with this? (Arn changed for privacy so please ignore this fact.)
{"message":"User: anonymous is not authorized to perform: execute-api:Invoke on resource: arn:aws:execute-api:us-east-1:********2234:54354534534/test/GET/mezzanine with an explicit deny"}
The code I have for the canary is:
const synthetics = require('Synthetics'); const log = require('SyntheticsLogger'); const syntheticsConfiguration = synthetics.getConfiguration();
const apiCanaryBlueprint = async function () {
syntheticsConfiguration.setConfig({
restrictedHeaders: [], // Value of these headers will be redacted from logs and reports
restrictedUrlParameters: [] // Values of these url parameters will be redacted from logs and reports
});
// Handle validation for positive scenario
const validateSuccessful = async function(res) {
return new Promise((resolve, reject) => {
if (res.statusCode < 200 || res.statusCode > 299) {
throw new Error(res.statusCode + ' ' + res.statusMessage);
}
let responseBody = '';
res.on('data', (d) => {
responseBody += d;
});
res.on('end', () => {
// Add validation on 'responseBody' here if required.
resolve();
});
});
};
// Set request option for Verify https://453453453.execute-api.us-east-1.amazonaws.com
let requestOptionsStep1 = {
hostname: '4534535.execute-api.us-east-1.amazonaws.com',
method: 'GET',
path: '/test/mezzanine',
port: '443',
protocol: 'https:',
body: "",
headers: {"health":"true"}
};
requestOptionsStep1['headers']['User-Agent'] = [synthetics.getCanaryUserAgentString(), requestOptionsStep1['headers']['User-Agent']].join(' ');
// Set step config option for Verify https://5345345345435.execute-api.us-east-1.amazonaws.com
let stepConfig1 = { includeRequestHeaders: true, includeResponseHeaders: true, includeRequestBody: true, includeResponseBody: true, continueOnHttpStepFailure: true };
await synthetics.executeHttpStep('Verify https://45345334535.execute-api.us-east- 1.amazonaws.com', requestOptionsStep1, validateSuccessful, stepConfig1);
};
exports.handler = async () => { return await apiCanaryBlueprint(); };
For the API I have no Auth on and the resource Policy is:
{ "Version": "2012-10-17", "Statement": [ { "Effect": "Allow", "Principal": "", "Action": "execute-api:Invoke", "Resource": "arn:aws:execute-api:us-east-1:34324234234234:343243242///" }, { "Effect": "Deny", "Principal": "", "Action": "execute-api:Invoke", "Resource": "arn:aws:execute-api:us-east-1:3432434234:434324234///", "Condition": { "NotIpAddress": { "aws:SourceIp": [ "23.23.23.23/32", "23.23.23.23/32", "23.23.23.23/32" ] } } } ] }
I have also tried leaving the resource policy blank and also with the below code but still cannot get this canary to work.
{ "Version": "2012-10-17", "Statement": [ { "Effect": "Allow", "Principal": "", "Action": "execute-api:Invoke", "Resource": "arn:aws:execute-api:us-east-1:4534534543:4543534543534///" } ] }
Want to check endpoint

sts.assumeRole getting undefined credentials

I have a role called awsiotsdk that has full access to iot. I'm trying to get temporary credentials for Connect, Subscribe, and Receive, but my credentials are undefined. What could be the reason for this?
var AWS = require("aws-sdk");
var sts = new AWS.STS({ apiVersion: "2011-06-15" });
const iotpolicy = `{
"Version": "2012-10-17",
"Statement": [
{
"Sid": "VisualStudioCode",
"Effect": "Allow",
"Action": [
"iot:Connect",
"iot:Subscribe",
"iot:Receive"
],
"Resource": "*"
}
]
}`;
const role = {
RoleArn: "arn:aws:iam::258030452305:role/awsiotsdk",
Policy: iotpolicy,
RoleSessionName: "RoleSession1",
};
sts.assumeRole(role, (err, data) => {
console.log({
accessKeyId: data.Credentials.accessKeyId,
secretAcessKey: data.Credentials.secretAccessKey,
sessionToken: data.Credentials.sessionToken,
});
});
Output:
{
accessKeyId: undefined,
secretAcessKey: undefined,
sessionToken: undefined
}

Access Denied 400 when copy S3 object

My lambda tries to copy a S3 object, but does not work.
Code:
import { escapeUriPath } from '#aws-sdk/util-uri-escape';
import { S3Client, CopyObjectCommandInput, CopyObjectCommand } from '#aws-sdk/client-s3';
const handler = () => {
const path = escapeUriPath('All Files/documents/folder with space/test');
const CopySource = escapeUriPath('my_bucket/All Files/documents/folder with space/test_rename');
copyS3Object({
Bucket: 'my_bucket',
Key: path,
CopySource
})
}
export const copyS3Object = async (input: CopyObjectCommandInput) => {
const command = new CopyObjectCommand(input);
return await s3Client.send(command);
};
An error I see in CloudWatch which is not quite helpful
"Code": "AccessDenied",
"name": "AccessDenied",
"$fault": "client",
"$metadata": {
"httpStatusCode": 400,
"attempts": 1,
"totalRetryDelay": 0
}
The interesting part is that:
I already set s3 policy to allow s3:* action on the bucket
I am able to use the same lambda to upload/delete an object, but not copy
What could cause the AccessDenied error?
I figure it out. It is totally my fault.
The policy resource should be "Resource": ["my_bucket", "my_bucket/*"], instead of "Resource": ["my_bucket"]
The full policy:
resource "aws_iam_policy" "create-lambda-policy" {
name = local.lambda_policy_name
path = "/"
policy = <<EOF
{
"Version": "2012-10-17",
"Statement": [
{
"Action": [
"s3:*"
],
"Resource": ["my_bucket", "my_bucket/*"],
"Effect": "Allow"
}
]
}
EOF
}

Creating Lambda function in terraform runs for a longer duration or outputs timeout error

I am trying to create Lambda function using the below terraform script:
provider "aws" {
region = "us-east-1"
}
resource "aws_iam_role" "lambda_exec_role" {
name = "lambda_exec_role"
assume_role_policy = <<EOF
{
"Version": "2012-10-17",
"Statement": [
{
"Action": "sts:AssumeRole",
"Principal": {
"Service": "lambda.amazonaws.com"
},
"Effect": "Allow",
"Sid": ""
}
]
}
EOF
}
resource "aws_lambda_function" "demo_lambda" {
function_name = "demo_lambda"
handler = "index.handler"
runtime = "nodejs4.3"
filename = "function.zip"
source_code_hash = "${base64sha256(file("function.zip"))}"
role = "${aws_iam_role.lambda_exec_role.arn}"
}
the zip file contains a single file:
index.js
exports.handler = function(event, context, callback) {
console.log('Event: ', JSON.stringify(event, null, '\t'));
console.log('Context: ', JSON.stringify(context, null, '\t'));
callback(null);
};
The 'apply' phase runs for very long duration around 15mins and its still running. sometime it says timeout.
Sometimes the function gets created some time not however in any case the process takes so much of time.
Is this the way the lambda function creation works or am i missing something over here?

AWS Lambda get s3 object data template not working

I am working on understanding Amazon Lambda. This is modified only slightly from the s3 template to view content type of uploaded file.
my received event log, and params log both show up in cloud watch however it's like the s3.getObject() never gets executed, as neither the error log of the data log ever get shown in the logs and all I get is a task timed out after timeout period.
var aws = require('aws-sdk');
var s3 = new aws.S3({apiVersion: 'latest'});
exports.handler = function(event, context) {
console.log('Received event:', JSON.stringify(event, null, 2));
// Get the object from the event and show its content type
var bucket = event.Records[0].s3.bucket.name;
var key = decodeURIComponent(event.Records[0].s3.object.key.replace(/\+/g, ' '));
var params = {
Bucket: bucket,
Key: key
};
console.log(params);
s3.getObject(params, function(err, data) {
if (err) console.log(err, err.stack); // an error occurred
else console.log(data); // successful response
/*
if (err) {
var message = "Error getting object " + key + " from bucket " + bucket +
". Make sure they exist and your bucket is in the same region as this function.";
console.log(message);
context.fail(message);
} else {
console.log('CONTENT TYPE:', data.ContentType);
context.succeed(data.ContentType);
}
*/
});
};
Here is the current IM role do I need to make some changes?
{
"Version": "2012-10-17",
"Statement": [{
"Effect": "Allow",
"Action": [
"logs:CreateLogGroup",
"logs:CreateLogStream",
"logs:PutLogEvents"],
"Resource": "arn:aws:logs:*:*:*"
},
{
"Effect": "Allow",
"Action": [
"ec2:CreateNetworkInterface",
"ec2:DescribeNetworkInterfaces",
"ec2:DetachNetworkInterface",
"ec2:DeleteNetworkInterface"],
"Resource": "*"
},
{
"Effect": "Allow",
"Action": [
"s3:ListAllMyBuckets",
"s3:ListBucket",
"s3:GetBucketLocation",
"s3:GetObject",
"s3:PutObject"],
"Resource": "arn:aws:s3:::*"
},
{
"Action": "lambda:*",
"Effect": "Allow",
"Resource": "arn:aws:lambda:*"
}]
}
I've run into this issue before. This happens after the validate, build, and sign callbacks execute, but before the send callback occurs. Essentially, one of the sockets go into Zombie mode. I've reported this to AWS, but they have not yet produced a fix. To "fix", set the socket timeout when instantiating the s3 client. The socket will timeout and the operation will automatically retry.
var s3 = new AWS.S3({httpOptions: { timeout: 2000 }});
By default, the timeout is two minutes, which is why it is problematic if this occurs in a Lambda where the timeout is significantly shorter.
Before ListBucket you need ListAllBuckets.
I advise you to restrict your lambda access.
{
"Statement":[
{
"Effect":"Allow",
"Action":[
"s3:ListAllMyBuckets"
],
"Resource":"arn:aws:s3:::*"
},
{
"Effect":"Allow",
"Action":[
"s3:ListBucket",
"s3:GetBucketLocation"
],
"Resource":"arn:aws:s3:::yourBucket"
},
{
"Effect":"Allow",
"Action":[
"s3:GetObject"
],
"Resource":"arn:aws:s3:::yourBucket/*"
}
]
}
You must include an explicit callback at the end of your lambda or it'll stop running automatically before your s3 callback function ever gets hit.
As an example:
module.exports.getS3Object = async (event, context, callback) => {
const AWS = require('aws-sdk');
const S3 = new AWS.S3();
S3.getObject({ Bucket: "exampleBucket", Key: "exampleKey" })
.promise()
.then(data => console.log("finished"))
.catch(err => console.log("there was an error" + err))
callback(null, {statusCode: "200", body:"yay"}); //MUST INCLUDE
}