I have been trying to launch a new EC2 instance as well as add a piece of string data to my SQS through lambda in response to an object upload event in my s3 bucket.
I have been able to successfully to update my SQS but has been unable to initialise the new EC2 instance. Despite setting the time allocation to the lambda function to the maximum time of 5mins and increasing memory allocation, an operation timeout error continuously surface.
My code is as below. Can anyone point out what are the potential causes for such an error? While I have stuck my whole piece of code here for reference, the area concerning the launch is towards the end of the code.
Thank you so much!
console.log('Loading function');
var fs = require('fs');
var async = require('async');
var aws = require('aws-sdk');
var s3 = new aws.S3({ apiVersion: '2006-03-01' });
var sqs = new aws.SQS({apiVersion: '2012-11-05'});
var ecs = new aws.ECS({apiVersion: '2014-11-13'});
var ec2 = new aws.EC2({apiVersion: '2015-10-01'});
// Check if the given key suffix matches a suffix in the whitelist. Return true if it matches, false otherwise.
exports.checkS3SuffixWhitelist = function(key, whitelist) {
if(!whitelist){ return true; }
if(typeof whitelist == 'string'){ return key.match(whitelist + '$') }
if(Object.prototype.toString.call(whitelist) === '[object Array]') {
for(var i = 0; i < whitelist.length; i++) {
if(key.match(whitelist[i] + '$')) { return true; }
}
return false;
}
console.log(
'Unsupported whitelist type (' + Object.prototype.toString.call(whitelist) +
') for: ' + JSON.stringify(whitelist)
);
return false;
};
exports.handler = function(event, context) {
//console.log('Received event:', JSON.stringify(event, null, 2));
console.log('Received event:');
//Read in the configuration file
var config = JSON.parse(fs.readFileSync('config.json', 'utf8'));
if(!config.hasOwnProperty('s3_key_suffix_whitelist')) {
config.s3_key_suffix_whitelist = false;
}
console.log('Config: ' + JSON.stringify(config));
var name = event.Records[0].s3.object.key;
if(!exports.checkS3SuffixWhitelist(name, config.s3_key_suffix_whitelist)) {
context.fail('Suffix for key: ' + name + ' is not in the whitelist')
}
// Get the object from the event and show its key
var bucket = event.Records[0].s3.bucket.name;
var key = decodeURIComponent(event.Records[0].s3.object.key.replace(/\+/g, ' '));
var params = {
Bucket: bucket,
Key: key
};
s3.getObject(params, function(err, data) {
if (err) {
console.log(err);
var message = "Error getting object " + key + " from bucket " + bucket +
". Make sure they exist and your bucket is in the same region as this function.";
console.log(message);
context.fail(message);
} else {
console.log('CONTENT TYPE:', key);
context.succeed(key);
}
});
//Sending the image key as a message to SQS and starting a new instance on EC2
async.waterfall([
function(next){
var params = {
MessageBody: JSON.stringify(event),
QueueUrl: config.queue
};
console.log("IN QUEUE FUNCTION");
sqs.sendMessage(params, function (err, data) {
if (err) { console.warn('Error while sending message: ' + err); }
else { console.info('Message sent, ID: ' + data.MessageId); }
next(err);
});
},
function (next) {
console.log("INITIALIZING ECS");
var params = {
ImageId: 'ami-e559b485',
MinCount: 1,
MaxCount: 1,
DryRun: true,
InstanceType: 't2.micro',
KeyName: 'malpem2102',
SubnetId: 'subnet-e8607e8d'
}
ec2.runInstances(params, function(err,data){
if(err){
console.log(err, err.stack);
context.fail('Error', "Error getting file: " + err);
return;
} else{
var instanceId = data.Instances[0].InstanceId;
console.log("Created instance ", instanceId);
context.suceed("Created instance");
}
});
}
], function(err){
if (err) {
context.fail('An error has occurred: ' + err);
}
else {
context.succeed('Successfully processed Amazon S3 URL.');
}
}
);
};
Related
I have s3 bucket as below,
myBucket
a/
b/
c/
where myBucket is a s3 bucket and a, b, c are the key folders inside that bucket.
I will upload images into a/. The s3 event notification will trigger SQS which will then trigger lambda function which does the process of removing image background and uploads into b/ folder.
The problem here is for example, if I upload a folder which has around 26 images into s3 only 23 or 22 images are getting triggered by lambda and only those images are getting are processed.
For some reason s3 is not triggering all the images or is that something I should configure in my lambda function?
Here is my function code
exports.handler = async(event, context, callback) => {
try {
console.log(event.Records[0]);
var json = JSON.parse(event.Records[0]['body']);
console.log('json: '+json);
json = JSON.parse(json['Message']);
json = json['Records'][0]['s3'];
console.log(json);
var srcBucket = json['bucket']['name'];
console.log('srcBucket: ' + srcBucket);
var srcKey = decodeURIComponent(json['object']['key'].replace(/\+/g, ' '));
console.log('srcKey: ' + srcKey);
var str = (srcKey.split('/').pop()).split('.')[0];
console.log('str: ' + str);
if (str != '') {
var folderPath = srcKey.substr(srcKey.indexOf('/') + 1).split('.')[0];
folderPath = folderPath.substring(0, folderPath.lastIndexOf('/'));
console.log('folderPath: ' + folderPath);
const params1 = { Bucket: srcBucket, Key: srcKey };
var origimage = await s3.getObject(params1).promise();
var destObject = await origimage.Body;
var destKey = 'removebg/' + folderPath + '/' + str + '.jpg';
var options = {
'method': 'POST',
'url': 'https://api.remove.bg/v1.0/removebg',
'headers': {
'X-Api-Key': 'xxxxxxxxxxx'
},
formData: {
'image_file': destObject,
'size': 'auto'
},
encoding: null
};
request(options, function(error, response, body) {
if (error) {
console.log(error);
sendmessage(error, 'Error removing image background', arn, srcBucket + '/' + srcKey, destBucket + destKey);
}
var params = { Bucket: destBucket, Key: destKey, Body: body };
s3.upload(params, function(err, data) {
if (err) {
console.log('Error uploading data: ', err);
sendmessage(err, 'Error uploading transparent image to s3', arn, srcBucket + '/' + srcKey, destBucket + destKey);
}
else { console.log('Successfully uploaded data to ' + destBucket); }
});
});
}
}
catch (e) {
console.log(e);
}
callback(null, 'All done!');
};
Please let me know. Thanks in advance.
I think your problem lies on these lines:
console.log(event.Records[0]);
var json = JSON.parse(event.Records[0]['body']);
console.log('json: '+json);
json = JSON.parse(json['Message']);
json = json['Records'][0]['s3'];
Your function is only looking at the first record that is provided to the function. Multiple events can be given the the Lambda function, so your function should loop through the Records entries and process all of the events that are provided.
It should do something like:
for record in event.Records:
console.log(record);
var json = JSON.parse(record['body']);
console.log('json: '+json);
json = JSON.parse(json['Message']);
json = record['s3'];
I am trying to write a script that will loop thru an array of items for a DynamoDB table and run a batch write command. My functionality is good, but I am having trouble with DynamoDB. Would be great if I could point my AWS.DynamoDB.DocumentClient() to my localhost running DynamoDB. Any tips?
Would also consider a way to just run the commands via the aws cli but I am not sure how to do that. I am running Node.js so it maybe possible?
Here is my code:
var AWS = require('aws-sdk');
AWS.config.update({ region: 'eu-central-1' });
var DynamoDB = new AWS.DynamoDB.DocumentClient()
DynamoDB.endpoint = 'http://localhost:8000';
const allItems = require('./resource.json');
const tableName = 'some-table-name';
console.log({ tableName, allItems });
var batches = [];
var currentBatch = [];
var count = 0;
for (let i = 0; i < allItems.length; i++) {
//push item to the current batch
count++;
currentBatch.push(allItems[i]);
if (count % 25 === 0) {
batches.push(currentBatch);
currentBatch = [];
}
}
//if there are still items left in the curr batch, add to the collection of batches
if (currentBatch.length > 0 && currentBatch.length !== 25) {
batches.push(currentBatch);
}
var completedRequests = 0;
var errors = false;
//request handler for DynamoDB
function requestHandler(err, data) {
console.log('In the request handler...');
return function (err, data) {
completedRequests++;
errors = errors ? true : err;
//log error
if (errors) {
console.error(JSON.stringify(err, null, 2));
console.error('Request caused a DB error.');
console.error('ERROR: ' + err);
console.error(JSON.stringify(err, null, 2));
} else {
var res = {
statusCode: 200,
headers: {
'Content-Type': 'application/json',
'Access-Control-Allow-Methods': 'GET,POST,OPTIONS',
'Access-Control-Allow-Origin': '*',
'Access-Control-Allow-Credentials': true,
},
body: JSON.stringify(data),
isBase64Encoded: false,
};
console.log(`Success: returned ${data}`);
return res;
}
if (completedRequests == batches.length) {
return errors;
}
};
}
//Make request
var params;
for (let j = 0; j < batches.length; j++) {
//items go in params.RequestedItems.id array
//format for the items is {PutRequest : {Item: ITEM_OBJECT}}
params = '{"RequestItems": {"' + tableName + '": []}}';
params = JSON.parse(params);
params.RequestItems[tableName] = batches[j];
console.log('before db.batchWriteItem: ', params);
//send to db
DynamoDB.batchWrite(params, requestHandler(params));
}
I figured it out and will leave this here for anyone that may need it.
var { DynamoDB } = require('aws-sdk');
var db = new DynamoDB.DocumentClient({
region: 'localhost',
endpoint: 'http://localhost:8000',
});
Following this Streaming CloudWatch Logs Data to Amazon Elasticsearch Service, it's working fine to stream cloud watch log to ELK having one log group and one Lambda function.
But now I want to change target lambda function for my other logs group, but I am not able to do that as there is no option in AWS console.
Any Help will be appreciated.
Thanks
I was streaming to ELK using the AWS console option which is Start Streaming to Amazon Elasticsearch Service, But I failed to change or choose different lambda function as there is only lambda function can be selected for any log group using this option.
So, I create new lambda function and set stream target to AWS lambda function,
Here is the code that all you need, Node version for lambda function is 4.* as it was some issue with the new version but the pulse point is it does not require any extra NPM packages.
// v1.1.2
var https = require('https');
var zlib = require('zlib');
var crypto = require('crypto');
var endpoint = 'search-my-test.us-west-2.es.amazonaws.com';
exports.handler = function(input, context) {
// decode input from base64
var zippedInput = new Buffer(input.awslogs.data, 'base64');
// decompress the input
zlib.gunzip(zippedInput, function(error, buffer) {
if (error) { context.fail(error); return; }
// parse the input from JSON
var awslogsData = JSON.parse(buffer.toString('utf8'));
// transform the input to Elasticsearch documents
var elasticsearchBulkData = transform(awslogsData);
// skip control messages
if (!elasticsearchBulkData) {
console.log('Received a control message');
context.succeed('Control message handled successfully');
return;
}
// post documents to the Amazon Elasticsearch Service
post(elasticsearchBulkData, function(error, success, statusCode, failedItems) {
console.log('Response: ' + JSON.stringify({
"statusCode": statusCode
}));
if (error) {
console.log('Error: ' + JSON.stringify(error, null, 2));
if (failedItems && failedItems.length > 0) {
console.log("Failed Items: " +
JSON.stringify(failedItems, null, 2));
}
context.fail(JSON.stringify(error));
} else {
console.log('Success: ' + JSON.stringify(success));
context.succeed('Success');
}
});
});
};
function transform(payload) {
if (payload.messageType === 'CONTROL_MESSAGE') {
return null;
}
var bulkRequestBody = '';
payload.logEvents.forEach(function(logEvent) {
var timestamp = new Date(1 * logEvent.timestamp);
// index name format: cwl-YYYY.MM.DD
var indexName = [
'prod-background-wo-' + timestamp.getUTCFullYear(), // year
('0' + (timestamp.getUTCMonth() + 1)).slice(-2), // month
('0' + timestamp.getUTCDate()).slice(-2) // day
].join('.');
var source = buildSource(logEvent.message, logEvent.extractedFields);
source['response_time'] = source["end"] - source["start"];
source['#id'] = logEvent.id;
source['#timestamp'] = new Date(1 * logEvent.timestamp).toISOString();
source['#message'] = logEvent.message;
source['#owner'] = payload.owner;
source['#log_group'] = payload.logGroup;
source['#log_stream'] = payload.logStream;
var action = { "index": {} };
action.index._index = indexName;
action.index._type = payload.logGroup;
action.index._id = logEvent.id;
bulkRequestBody += [
JSON.stringify(action),
JSON.stringify(source),
].join('\n') + '\n';
});
return bulkRequestBody;
}
function buildSource(message, extractedFields) {
if (extractedFields) {
var source = {};
for (var key in extractedFields) {
if (extractedFields.hasOwnProperty(key) && extractedFields[key]) {
var value = extractedFields[key];
if (isNumeric(value)) {
source[key] = 1 * value;
continue;
}
jsonSubString = extractJson(value);
if (jsonSubString !== null) {
source['$' + key] = JSON.parse(jsonSubString);
}
source[key] = value;
}
}
return source;
}
jsonSubString = extractJson(message);
if (jsonSubString !== null) {
return JSON.parse(jsonSubString);
}
return {};
}
function extractJson(message) {
var jsonStart = message.indexOf('{');
if (jsonStart < 0) return null;
var jsonSubString = message.substring(jsonStart);
return isValidJson(jsonSubString) ? jsonSubString : null;
}
function isValidJson(message) {
try {
JSON.parse(message);
} catch (e) { return false; }
return true;
}
function isNumeric(n) {
return !isNaN(parseFloat(n)) && isFinite(n);
}
function post(body, callback) {
var requestParams = buildRequest(endpoint, body);
var request = https.request(requestParams, function(response) {
var responseBody = '';
response.on('data', function(chunk) {
responseBody += chunk;
});
response.on('end', function() {
var info = JSON.parse(responseBody);
var failedItems;
var success;
if (response.statusCode >= 200 && response.statusCode < 299) {
failedItems = info.items.filter(function(x) {
return x.index.status >= 300;
});
success = {
"attemptedItems": info.items.length,
"successfulItems": info.items.length - failedItems.length,
"failedItems": failedItems.length
};
}
var error = response.statusCode !== 200 || info.errors === true ? {
"statusCode": response.statusCode,
"responseBody": responseBody
} : null;
callback(error, success, response.statusCode, failedItems);
});
}).on('error', function(e) {
callback(e);
});
request.end(requestParams.body);
}
function buildRequest(endpoint, body) {
var endpointParts = endpoint.match(/^([^\.]+)\.?([^\.]*)\.?([^\.]*)\.amazonaws\.com$/);
var region = endpointParts[2];
var service = endpointParts[3];
var datetime = (new Date()).toISOString().replace(/[:\-]|\.\d{3}/g, '');
var date = datetime.substr(0, 8);
var kDate = hmac('AWS4' + process.env.AWS_SECRET_ACCESS_KEY, date);
var kRegion = hmac(kDate, region);
var kService = hmac(kRegion, service);
var kSigning = hmac(kService, 'aws4_request');
var request = {
host: endpoint,
method: 'POST',
path: '/_bulk',
body: body,
headers: {
'Content-Type': 'application/json',
'Host': endpoint,
'Content-Length': Buffer.byteLength(body),
'X-Amz-Security-Token': process.env.AWS_SESSION_TOKEN,
'X-Amz-Date': datetime
}
};
var canonicalHeaders = Object.keys(request.headers)
.sort(function(a, b) { return a.toLowerCase() < b.toLowerCase() ? -1 : 1; })
.map(function(k) { return k.toLowerCase() + ':' + request.headers[k]; })
.join('\n');
var signedHeaders = Object.keys(request.headers)
.map(function(k) { return k.toLowerCase(); })
.sort()
.join(';');
var canonicalString = [
request.method,
request.path, '',
canonicalHeaders, '',
signedHeaders,
hash(request.body, 'hex'),
].join('\n');
var credentialString = [ date, region, service, 'aws4_request' ].join('/');
var stringToSign = [
'AWS4-HMAC-SHA256',
datetime,
credentialString,
hash(canonicalString, 'hex')
] .join('\n');
request.headers.Authorization = [
'AWS4-HMAC-SHA256 Credential=' + process.env.AWS_ACCESS_KEY_ID + '/' + credentialString,
'SignedHeaders=' + signedHeaders,
'Signature=' + hmac(kSigning, stringToSign, 'hex')
].join(', ');
return request;
}
function hmac(key, str, encoding) {
return crypto.createHmac('sha256', key).update(str, 'utf8').digest(encoding);
}
function hash(str, encoding) {
return crypto.createHash('sha256').update(str, 'utf8').digest(encoding);
}
I'm new to dynamoDB and I'm trying to write some data to the table using Lambda.
So far I have this:
'AddFood': function () {
var FoodName = this.event.request.intent.slots.FoodName.value;
var FoodCalories = this.event.request.intent.slots.FoodCalories.value;
console.log('FoodName : ' + FoodName);
const params = {
TableName: 'Foods',
Item: {
'id': {"S": 3},
'calories': {"S": FoodCalories},
'food': {"S": FoodName}
}
};
writeDynamoItem(params, myResult=>{
var say = '';
say = myResult;
say = FoodName + ' with ' + FoodCalories + ' calories has been added ';
this.response.speak(say).listen('try again');
this.emit(':responseReady');
});
function writeDynamoItem(params, callback) {
var AWS = require('aws-sdk');
AWS.config.update({region: AWSregion});
var docClient = new AWS.DynamoDB();
console.log('writing item to DynamoDB table');
docClient.putItem(params, function (err, data) {
if (err) {
callback(err, null)
} else {
callback(null, data)
}
});
}
}
Does anyone know why the data is not appearing in the database?
I have checked the IAM and the policy is set to AmazonDynamoDBFullAccess.
After making a few changes to the write function, the following code allowed me to write items to the database:
function writeDynamoItem(params, callback) {
const AWS = require('aws-sdk');
AWS.config.update({region: AWSregion});
const docClient = new AWS.DynamoDB.DocumentClient({region: 'eu-west-1'});
console.log('writing item to DynamoDB table');
docClient.put(params, function (err, data) {
if (err) {
callback(err, null)
console.error("Unable to write item. Error JSON:", JSON.stringify(err, null, 2))
} else {
callback(null, data)
}
});
}
To write to DynamoDB from a Lambda function (using Python) you must use the boto3 package and load the dynamodb resource:
Hope this helps, it is writing food and calories from the event and writing them with a generated uuid
import boto3
import os
import uuid
def writeToDynamo(event, context):
recordId = str(uuid.uuid4())
voice = event["food"]
text = event["calories"]
print('Generating new DynamoDB record, with ID: ' + recordId)
print('Input food: ' + food)
print('Input calories: ' + calories)
#Creating new record in DynamoDB table
dynamodb = boto3.resource('dynamodb')
table = dynamodb.Table(os.environ['DB_TABLE_NAME'])
table.put_item(
Item={
'id' : recordId,
'food' : food,
'calories' : calories
}
)
return recordId
I've been using source code from AWS Lambda in Action - Poccia, to create users in a User Pool and Identity Pool. I keep getting the error:
Response:
{
"errorMessage": "RequestId: f6511085-f22c-11e7-be27-534dfc5d6456 Process exited before completing request"
}
Request ID:
"f6511085-f22c-11e7-be27-534dfc5d6456"
Function Logs:
START RequestId: f6511085-f22c-11e7-be27-534dfc5d6456 Version: $LATEST
2018-01-05T15:27:38.890Z f6511085-f22c-11e7-be27-534dfc5d6456 TypeError: Pass phrase must be a buffer
at TypeError (native)
at pbkdf2 (crypto.js:576:20)
at Object.exports.pbkdf2 (crypto.js:558:10)
at computeHash (/var/task/lib/cryptoUtils.js:10:10)
at InternalFieldObject.ondone (/var/task/lib/cryptoUtils.js:19:4)
END RequestId: f6511085-f22c-11e7-be27-534dfc5d6456
REPORT RequestId: f6511085-f22c-11e7-be27-534dfc5d6456 Duration: 113.62 ms Billed Duration: 200 ms Memory Size: 128 MB Max Memory Used: 33 MB
RequestId: f6511085-f22c-11e7-be27-534dfc5d6456 Process exited before completing request
I'm new to AWS Services and am not sure why this error is occurring. Below is the Lambda function I'm attempting to use and following is the cryptoUtils.js script it's referencing.
console.log('Loading function');
//Loading standard module, such as crypto and the AWS SDK
var AWS = require('aws-sdk');
var crypto = require('crypto');
var cryptoUtils = require('./lib/cryptoUtils.js'); //Loading the cryptoUtils.js module shared code, included in the uploaded ZIP archive
var config = require('./config.json'); //Loading the configuration in the config.json file, included in the uploaded ZIP archive
var dynamodb = new AWS.DynamoDB({
accessKeyId: 'usingKEYfromIAM',
secretAccessKey: 'usingKEYfromIAM',
}); //Getting the Amazon DynamoDB service object
var ses = new AWS.SES(); //Getting Amazon SES service object
function storeUser(email, password, salt, fn) { //The storeUser() function stores the new user in the DynamoDB table.
var len = 128;
crypto.randomBytes(len, function(err, token) { //Arandom token sent in the validation email and used to validate a user
if (err) return fn(err);
token = token.toString('hex');
dynamodb.putItem({ //Putting an item in the DynamoDB table
TableName: config.DDB_TABLE, //The table name is taken from the config.json configuration file.
//Most of the data is string ("S"), but the verifiede attribute is Boollean ("BOOL"),
//new users aren't verified (false), and the randomly generated token is stored in the "verifyToken" attribute
Item: {
email: {
S: email
},
passwordHash: {
S: password
},
passwordSalt: {
S: salt
},
verified: {
BOOL: false
},
verifyToken: {
S: token
}
},
ConditionExpression: 'attribute_not_exists (email)' //This condition avoids overwriting existing users (with the same email).
}, function(err, data) {
if (err) return fn(err);
else fn(null, token); //The storeUser() function returns the randomly generated token.
});
});
}
function sendVerificationEmail(email, token, fn) { //The send-VerificationEmail() funciton sends the verification email to the new user.
var subject = 'Verification Email for ' + config.EXTERNAL_NAME;
//The verification link, to the verify.hrml page, passes the randomly generated token as a query parameter.
var verificationLink = config.VERIFICATION_PAGE + '?email=' + encodeURIComponent(email) + '&verify=' + token;
ses.sendEmail({ //Sending the email in HTML format
Source: config.EMAIL_SOURCE,
Destination: {
ToAddresses: [
email
]
},
Message: {
Subject: {
Data: subject
},
Body: {
Html: {
Data: '<html><head>' + '<meta http-equiv= "Content-Type" content="test/html; charset=UTF-8" />' +
'<title>' + subject + '</title>' + '</head><body>' + 'Please <a href="' + verificationLink +
'">click here to verify your email address</a> or a copy & paste the following link in a browser:' +
'<br><br>' + '' + verificationLink + '' + '</body></html>'
}
}
}
}, fn);
}
exports.handler = (event, context, callback) => { //The function that's exported and can be invoked using AWS Lambda as createUser
//Getting the input parameters (email, password) from the event
var email = event.email;
var clearPassword = event.password;
//Using compute-Hash() from cryptoUtils.js to salt the password.
cryptoUtils.computeHash(clearPassword, function(err, salt, hash) {
if (err) {
callback('Error in hash: ' + err);
} else {
storeUser(email, hash, salt, function(err, token) { //Storing the user via the storeUser()function
if (err) {
if (err.code == 'ConditionalCheckFailedException') { //Checking if the database error is due to the email being already prsent in the database
//userID already found
callback(null, {
created: false
});
} else {
callback('Error in storUser: ' + err);
}
} else {
sendVerificationEmail(email, token, function(err, data) { //Sending the verification email
if (err) {
callback('Error in sendVerificationEmail: ' + err);
} else {
callback(null, {
created: true
});
}
});
}
});
}
});
};
var crypto = require('crypto');
function computeHash(password, salt, fn) {
var len = 512;
var iterations = 4096;
var digest = 'sha512';
if (3 == arguments.length) {
crypto.pbkdf2(password, salt, iterations, len, digest, function(err, derivedKey) {
if (err) return fn(err);
else fn(null, salt, derivedKey.toString('base64'));
});
} else {
fn = salt;
crypto.randomBytes(len, function(err, solat) {
if (err) return fn(err);
salt = salt.toString('base64');
computeHash(password, salt, fn);
});
}
}
module.exports.computeHash = computeHash;
If anybody has any suggestions or needs more information to help me determine why the error is occurring I would greatly appreciate it. Thank you.
The password you're passing is a number?
If so:
Convert it to String.
If you don't want to do that, you can pass a Buffer object:
Pass a Buffer object using the class Method Buffer.from(string[, encoding])
https://nodejs.org/api/buffer.html#buffer_class_method_buffer_from_string_encoding
crypto.pbkdf2(Buffer.from(password, 'utf8'), salt, iterations, len, digest, function(err, derivedKey) {
if (err) return fn(err);
else fn(null, salt, derivedKey.toString('base64'));
});
Hope it helps!
var crypto = require('crypto');
function computeHash(password, salt, fn) {
// Bytesize. The larger the numbers, the better the security, but the longer it will take to complete
var len = 512;
var iterations = 4096;
var digest = 'sha512';
if (3 == arguments.length) {
crypto.pbkdf2(Buffer.from(password, 'utf8'), salt, iterations, len, digest, function(err, derivedKey) {
if (err) return fn(err);
else fn(null, salt, derivedKey.toString('base64'));
});
} else {
fn = salt;
crypto.randomBytes(len, function(err, salt) {
if (err) return fn(err);
salt = salt.toString('base64');
computeHash(password, salt, fn);
});
}
}
module.exports.computeHash = computeHash;
Error "TypeError: Pass phrase must be a buffer"
does suggest trying a Buffer conversion of input String
Before your test of Buffer.from(password, 'utf8')
did you verify input value is encoding 'utf8',
and not some other encoding such as base64 or latin1 ?
List of encodings that Node.js supports