AWS SES - Nodemailer - transporter.sendEmail is not a function - amazon-web-services

const nodemailer = require('nodemailer');
const aws = require('aws-sdk');
aws.config.update({ region: 'myRegion');
const senderEmail = process.env.SENDER_EMAIL;
const destinationEmail = process.env.DESTINATION_EMAIL;
const transporter = nodemailer.createTransport({
SES: new aws.SES({
apiVersion: '2010-12-01'
})
});
exports.handler = async (event) => {
let request = event.body;
try {
await this.sendEmail(request);
return {
'statusCode': 201,
'headers': {
'Access-Control-Allow-Origin': '*'
}
};
} catch (error) {
return {
'statusCode': 500
};
}
};
exports.sendEmail = async (request) => {
try {
await transporter.sendEmail({
from: senderEmail,
to: destinationEmail,
subject: 'Hello',
text: 'Hello using SES'
});
} catch (error) {
console.log(error);
throw error;
}
};
I have the following code which gives a error saying 'transporter.Email' is not a function. I have the following dependencies specified in my package.json (Some content is stripped but it is a valid JSON)
{
"devDependencies": {
"aws-sdk": "^2.267.1",
"chai": "^4.1.2",
"eslint": "^5.1.0",
"eslint-config-standard": "^11.0.0",
"eslint-plugin-import": "^2.13.0",
"eslint-plugin-node": "^6.0.1",
"eslint-plugin-promise": "^3.8.0",
"eslint-plugin-standard": "^3.1.0",
"hippie-swagger": "^3.2.0",
"mocha": "^5.1.1",
"mochawesome": "^3.0.3",
"sinon": "^6.1.0",
"swagger-parser": "^5.0.2"
},
"dependencies": {
"axios": "^0.18.0",
"nodemailer": "^4.6.8",
"nodemailer-ses-transport": "^1.5.1"
}
}
At the first look, it does not look like a permissions issue, so ruling that out. Any pointers to fix it would be of great help.
I am using Node.js 8.10 in lambda.

Should be transporter.sendMail
http://nodemailer.com/usage/#sending-mail

Related

AWS SES send email lambda not sending every time

I want to send emails using the ses from aws from lambda. The problem is that the email is only sent some times using the same code. We don't get errors.
Here's the code:
const AWS = require('aws-sdk');
var ses = new AWS.SES();
exports.handler = async (event, context, callback) => {
context.callbackWaitsForEmptyEventLoop = false;
await new Promise((resolve, reject) => {
var params = {
Destination: {
ToAddresses: [myEmail]
},
Message: {
Body: {
Text: { Data: "Test"
}
},
Subject: { Data: "Test Email"
}
},
Source: "sourceMail"
};
ses.sendEmail(params, function (err, data) {
if (err) {
console.log(err);
context.fail(err);
} else {
console.log(data);
context.succeed(event);
}
callback(null, {err: err, data: data});
});
});
}
I would be careful with using callbackWaitsForEmptyEventLoop as it can lead to unexpected results (If this is false, any outstanding events continue to run during the next invocation.).
Can you try using this simplified version:
const AWS = require('aws-sdk');
var ses = new AWS.SES();
exports.handler = async (event, context, callback) => {
const params = {
Destination: {
ToAddresses: [myEmail],
},
Message: {
Body: {
Text: { Data: 'Test' },
},
Subject: { Data: 'Test Email' },
},
Source: 'sourceMail',
};
await ses.sendEmail(params).promise();
return event;
};

AWS Lambda - unable to convert SDK call to promise

I have a Lambda which looks like so:
module.exports.handler = (event, context, callback) => {
AWS.config.setPromisesDependency(null);
const uploadPromise = s3.upload(params).promise();
uploadPromise.then((data) => {
const response = {
...
};
return response;
}).catch((error) => {
console.log(error);
});
};
Calling it from Postman results in server error in Postman. CloudWatch logs have no further info.
Doing:
s3.upload(params, (error, data) => {
if (error) {
console.error("error occurred storing to s3: ", error);
return;
}
const response = {
...
};
return response;
});
does not result in a server error.
I am trying to follow the information from AWS that can be found here:
https://aws.amazon.com/blogs/developer/support-for-promises-in-the-sdk/
Postman is able to upload to Lambda by doing the following with async/await and try/catch:
exports.handler = async function(event, context) {
const s3 = new AWS.S3();
const encodedImage = util.inspect(event.body);
const decodedImage = Buffer.from(encodedImage, "base64");
const filePath = "test.png";
const params = {
Body: decodedImage,
Bucket: "my bucket",
Key: filePath,
ACL: "public-read",
ContentType: "mime/png"
};
try {
const result = await s3.upload(params).promise();
const response = {
statusCode: 200,
headers: {
my_header: "my_value"
},
body: JSON.stringify(result),
isBase64Encoded: false
};
return response;
} catch (error) {
console.log('error')
}
};

Failed use apigwManagementApi.postToConnection in $connect route

I want to return connectionId to a client after the client connect to aws websocket.
I'm using apigwManagementApi.postToConnection to send a response to a client, but I always get an absurd error message.
I already try to debug & search in google, but I can't find a solution for this.
patch.js
require('aws-sdk/lib/node_loader');
var AWS = require('aws-sdk/lib/core');
var Service = AWS.Service;
var apiLoader = AWS.apiLoader;
apiLoader.services['apigatewaymanagementapi'] = {};
AWS.ApiGatewayManagementApi = Service.defineService('apigatewaymanagementapi', ['2018-11-29']);
Object.defineProperty(apiLoader.services['apigatewaymanagementapi'], '2018-11-29', {
get: function get() {
var model = {
"metadata": {
"apiVersion": "2018-11-29",
"endpointPrefix": "execute-api",
"signingName": "execute-api",
"serviceFullName": "AmazonApiGatewayManagementApi",
"serviceId": "ApiGatewayManagementApi",
"protocol": "rest-json",
"jsonVersion": "1.1",
"uid": "apigatewaymanagementapi-2018-11-29",
"signatureVersion": "v4"
},
"operations": {
"PostToConnection": {
"http": {
"requestUri": "/#connections/{connectionId}",
"responseCode": 200
},
"input": {
"type": "structure",
"members": {
"Data": {
"type": "blob"
},
"ConnectionId": {
"location": "uri",
"locationName": "connectionId"
}
},
"required": [
"ConnectionId",
"Data"
],
"payload": "Data"
}
}
},
"shapes": {}
}
model.paginators = {
"pagination": {}
}
return model;
},
enumerable: true,
configurable: true
});
module.exports = AWS.ApiGatewayManagementApi;
index.js
const AWS = require('aws-sdk');
require('./patch.js');
exports.handler = async(event) => {
const connectionId = event.requestContext.connectionId;
const apigwManagementApi = new AWS.ApiGatewayManagementApi({
apiVersion: '2018-11-29',
endpoint: event.requestContext.domainName + '/' + event.requestContext.stage
});
await apigwManagementApi.postToConnection({ ConnectionId: connectionId, Data: connectionId }).promise();
return {};
};
client.js
const WebSocket = require('ws');
const ws = new WebSocket('wss://****');
ws.on('open', () => {
console.log('connected ===================>')
ws.on('message', data => console.warn(`From server: ${data}`));
});
Error in cloudwatch
{
"errorMessage": "410",
"errorType": "UnknownError",
"stackTrace": [
"Object.extractError (/var/runtime/node_modules/aws-sdk/lib/protocol/json.js:48:27)",
"Request.extractError (/var/runtime/node_modules/aws-sdk/lib/protocol/rest_json.js:52:8)",
"Request.callListeners (/var/runtime/node_modules/aws-sdk/lib/sequential_executor.js:105:20)",
"Request.emit (/var/runtime/node_modules/aws-sdk/lib/sequential_executor.js:77:10)",
"Request.emit (/var/runtime/node_modules/aws-sdk/lib/request.js:683:14)",
"Request.transition (/var/runtime/node_modules/aws-sdk/lib/request.js:22:10)",
"AcceptorStateMachine.runTo (/var/runtime/node_modules/aws-sdk/lib/state_machine.js:14:12)",
"/var/runtime/node_modules/aws-sdk/lib/state_machine.js:26:10",
"Request.<anonymous> (/var/runtime/node_modules/aws-sdk/lib/request.js:38:9)",
"Request.<anonymous> (/var/runtime/node_modules/aws-sdk/lib/request.js:685:12)"
]
}
I don't know why, but if I'm trying in a custom route, this code can work.
Does anyone know how to solve this?
I'd suggest to look into this example from AWS, there is on connect response for subprotocol confirmation, but I think any payload can be provided.
The most important bit is the route integration settings in the template, basically, the following two lines in the route integration properties:
IntegrationMethod: POST
ConnectionType: INTERNET
then response will be sent to the connected client.
The only way I've found to make this work is to use a DynamoDB table to store connections, then set up a trigger from the table back to a Lambda function.
There are a few catches though. This Lambda function wont work like your index.js file above. You'll have to use NPM install --save aws-sdk on a folder with your index.js file, zip it and upload it to the lambda function, so that the SDK is localized.
You will also need to set up a user with proper access and put the credentials into a your Lambda function.
Note, if you see a 410 error, that means the connection is no longer there, so you're going in the right direction at that point.
const AWS = require('aws-sdk');
require('./patch.js');
var log = console.log;
AWS.config.update({
accessKeyId: "YOURDATAHERE",
secretAccessKey: "YOURDATAHERE"
});
let send = undefined;
function init() {
const apigwManagementApi = new AWS.ApiGatewayManagementApi({
apiVersion: '2018-11-29',
endpoint: "HARDCODEYOURENDPOINTHERE"
});
send = async (connectionId, data) => {
await apigwManagementApi.postToConnection({ ConnectionId: connectionId, Data: `${data}` }).promise();
}
}
exports.handler = async (event, context) => {
init();
console.log('Received event:', JSON.stringify(event, null, 2));
for (const record of event.Records) {
//console.log(record.eventID);
console.log(record.eventName);
console.log('DynamoDB Record: %j', record.dynamodb);
if(record.eventName == "INSERT"){
var connectionId = record.dynamodb.NewImage.connectionId.S;
try{
await send(connectionId, connectionId);
}catch(err){
log("Error", err);
}
log("sent");
}
}
return `Successfully processed ${event.Records.length} records.`;
};

Google Cloud IoT sendCommandToDevice from cloud funcctions showing Service Unavailable

I tried sending the command from cloud functions, I am getting Error: The service is currently unavailable.
Package.JSON
"dependencies": {
"firebase-admin": "~6.0.0",
"firebase-functions": "^2.0.3",
"googleapis": "34.0.0"
}
const parentName = `projects/${projectId}/locations/${cloudRegion}`;
const registryName = `${parentName}/registries/${reqData.registryId}`;
const binaryData = Buffer.from(JSON.stringify(reqData.message)).toString('base64');
const request = {
name: `${registryName}/devices/${reqData.deviceId}`,
binaryData: binaryData
};
google.auth.getClient().then((authClient) => {
const discoveryUrl =
`${DISCOVERY_API}?version=${API_VERSION}`;
if (authClient.createScopedRequired && authClient.createScopedRequired()) {
// Scopes can be specified either as an array or as a single,
// space-delimited string.
authClient = authClient.createScoped([
'https://www.googleapis.com/auth/cloud-platform'
]);
}
google.options({
auth: authClient
});
google.discoverAPI(discoveryUrl).then((client, err) => {
if (err) {
console.log('Error during API discovery', err);
return undefined;
}
client.projects.locations.registries.devices.sendCommandToDevice(request,
(err, data) => {
if (err) {
console.log('Could not send command:', request);
console.log('Message: ', err);
} else {
console.log('Success :', data.statusText);
}
});
});
});
Logs:
{ Error: The service is currently unavailable. at createError (/user_code/node_modules/googleapis/node_modules/axios/lib/core/createError.js:16:15) at settle (/user_code/node_modules/googleapis/node_modules/axios/lib/core/settle.js:18:12) at Unzip.handleStreamEnd (/user_code/node_modules/googleapis/node_modules/axios/lib/adapters/http.js:201:11) at emitNone (events.js:91:20) at Unzip.emit (events.js:185:7) at endReadableNT (_stream_readable.js:974:12) at _combinedTickCallback (internal/process/next_tick.js:80:11) at process._tickDomainCallback (internal/process/next_tick.js:128:9)
The problem is that subfolder MUST be specified, and MUST not be an empty string.
As I was using this in a Firebase function, I just use the firebase subfolder for any commands being sent that do not have a specific subfolder
const request = {
name: `${registryName}/devices/${deviceId}`,
binaryData: Buffer.from(JSON.stringify(commandMessage)).toString("base64"),
subfolder: 'firebase'
}
Here's functions deps:
"dependencies": {
"firebase-admin": "^6.4.0",
"firebase-functions": "^2.1.0",
"fs-extra": "^7.0.0",
"googleapis": "^36.0.0",
},
This is probably due to
bug in the node library
bug in Google's endpoint
Lack of testing on Google's part
Seems that Google's "IoT" is still very young and needs a lot of work
I'm not too familiar with Firebase cloud functions, but I didn't get the error using the inline editor for Cloud Functions (https://console.cloud.google.com/functions). Can you tell me when you started getting this error (and if you're still encountering it)?
For reference, here was the code that I used (basically what you had but with more explicit definitions for projectId, cloudRegion.
const {google} = require('googleapis');
const API_VERSION = 'v1';
const DISCOVERY_API = 'https://cloudiot.googleapis.com/$discovery/rest';
exports.sendCommand = (req, res) => {
let reqData = req.body;
const projectId = reqData.projectId || process.env.GCLOUD_PROJECT;
const cloudRegion = reqData.cloudRegion || process.env.GCLOUD_REGION;
const parentName = `projects/${projectId}/locations/${cloudRegion}`;
const registryName = `${parentName}/registries/${reqData.registryId}`;
const binaryData = Buffer.from(JSON.stringify(reqData.message)).toString('base64');
const request = {
name: `${registryName}/devices/${reqData.deviceId}`,
binaryData: binaryData
};
google.auth.getClient().then((authClient) => {
const discoveryUrl =
`${DISCOVERY_API}?version=${API_VERSION}`;
if (authClient.createScopedRequired && authClient.createScopedRequired()) {
// Scopes can be specified either as an array or as a single,
// space-delimited string.
authClient = authClient.createScoped([
'https://www.googleapis.com/auth/cloud-platform'
]);
}
google.options({
auth: authClient
});
google.discoverAPI(discoveryUrl).then((client, err) => {
if (err) {
console.log('Error during API discovery', err);
return undefined;
}
client.projects.locations.registries.devices.sendCommandToDevice(request,
(err, data) => {
if (err) {
console.log('Could not send command:', request);
console.log('Message: ', err);
} else {
console.log('Success :', data.statusText);
}
});
});
});
res.status(200).send(reqData.message);
};

Lambda function s3.getObject returns "Internal server error"

This code works just fine locally using nodejs. Images download from s3, write to file.
However, in Lambda (using nodejs 8.10) I'm getting "Internal Server Error" when testing the function with this in the Logs:
"Execution failed due to configuration error: Malformed Lambda proxy response"
I am using the lambda proxy response in the callback, but clearly some AWS SDK error with S3 is not getting caught.
I do have a role setup with S3 full access that the Lambda has access to.
What am I missing with my first Lambda function? Docs and tutorials I've followed correctly and it is not working.
const async = require('async')
const aws = require('aws-sdk')
const fs = require('fs')
const exec = require('child_process').exec
const bucket = 'mybucket'
const s3Src = 'bucket_prefix'
const s3Dst = 'new_prefix'
const local = `${__dirname}/local/`
aws.config.region = 'us-west-2'
const s3 = new aws.S3()
exports.handler = async (event, context, callback) => {
const outputImage = 'hello_world.png'
const rack = JSON.parse(event.body)
const images = my.images
async.waterfall([
function download(next) {
let downloaded = 0
let errors = false
let errorMessages = []
for (let i = 0; i < images.length; i++) {
let key = `${s3Src}/${images[i].prefix}/${images[i].image}`,
localImage = `${local}${images[i].image}`
getBucketObject(bucket, key, localImage).then(() => {
++downloaded
if (downloaded === images.length) { // js is non blocking, need to check if all images have been downloaded. If so, then go to next function
if (errors) {
next(errorMessages.join(' '))
} else {
next(null)
}
}
}).catch(error => {
errorMessages.push(`${error} - ${localImage}`)
++downloaded
errors = true
})
}
function getBucketObject(bucket, key, dest) {
return new Promise((resolve, reject) => {
let ws = fs.createWriteStream(dest)
ws.once('error', (err) => {
return reject(err)
})
ws.once('finish', () => {
return resolve(dest)
})
let s3Stream = s3.getObject({
Bucket: bucket,
Key: key
}).createReadStream()
s3Stream.pause() // Under load this will prevent first few bytes from being lost
s3Stream.on('error', (err) => {
return reject(err)
})
s3Stream.pipe(ws)
s3Stream.resume()
})
}
}
], err => {
if (err) {
let response = {
"statusCode": 400,
"headers": {
"my_header": "my_value"
},
"body": JSON.stringify(err),
"isBase64Encoded": false
}
callback(null, response)
} else {
let response = {
"statusCode": 200,
"headers": {
"my_header": "my_value"
},
"body": JSON.stringify(`<img src="${local}${outputImage}" />`),
"isBase64Encoded": false
}
callback(null, response)
}
}
)
}
Response should be always sent to callback function. Your code sends response only on error. That's why Lambda executor thinks your code fails.
BTW - should your functions in async.waterfall be separated with coma, as two tasks?
Locally, I've been running nodejs 10.10 and lambda currently is at 8.10. That is a big part I'm sure. In the end I had to remove the async. I had to move the getBucketObject function out of the waterfall. Once I made those adjustments it started working. And another issue was the downloaded images needed to go into "/tmp" directory.
const aws = require('aws-sdk')
const async = require('async')
const fs = require('fs')
const bucket = 'mybucket'
const s3Src = 'mys3src'
const local = '/tmp/'
aws.config.region = 'us-west-2'
const s3 = new aws.S3()
exports.handler = (event, context, callback) => {
const outputImage = 'hello_world.png'
async.waterfall([
function download(next) {
let downloaded = 0,
errorMessages = []
for (let i = 0; i < event['images'].length; i++) {
let key = `${s3Src}/${event['images'][i]['prefix']}/${event['images'][i]['image']}`,
localImage = `${local}${event['images'][i]['image']}`
getBucketObject(bucket, key, localImage).then(() => {
downloaded++
if (downloaded === event['images'].length) {
if (errorMessages.length > 0) {
next(errorMessages.join(' '))
} else {
console.log('All downloaded')
next(null)
}
}
}).catch(error => {
downloaded++
errorMessages.push(`${error} - ${localImage}`)
if (downloaded === event['images'].length) {
next(errorMessages.join(' '))
}
})
}
}
], err => {
if (err) {
console.error(err)
callback(null, {
"statusCode": 400,
"body": JSON.stringify(err),
"isBase64Encoded": false
})
} else {
console.log('event image created!')
callback(null, {
"statusCode": 200,
"body": JSON.stringify(`<img src="${local}${outputImage}" />`),
"isBase64Encoded": false
})
}
}
)
}
function getBucketObject(bucket, key, dest) {
return new Promise((resolve, reject) => {
let ws = fs.createWriteStream(dest)
ws.once('error', (err) => {
return reject(err)
})
ws.once('finish', () => {
return resolve(dest)
})
let s3Stream = s3.getObject({
Bucket: bucket,
Key: key
}).createReadStream()
s3Stream.pause() // Under load this will prevent first few bytes from being lost
s3Stream.on('error', (err) => {
return reject(err)
})
s3Stream.pipe(ws)
s3Stream.resume()
})
}