After having spent 7 hours on this, I decided to reach out to you.
I need to update credentials within in a terraform flow. Since the secrets shall not be in the state-file, I use an AWS lambda function to update the secret of the RDS instance. The password is passed via CLI.
locals {
db_password = tostring(var.db_user_password)
}
data "aws_lambda_invocation" "credentials_manager" {
function_name = "credentials-manager"
input = <<JSON
{
"secretString": "{\"username\":\"${module.db_instance.db_user}\",\"password\":\"${local.db_password}\",\"dbInstanceIdentifier\":\"${module.db_instance.db_identifier}\"}",
"secretId": "${module.db_instance.db_secret_id}",
"storageId": "${module.db_instance.db_identifier}",
"forcedMod": "${var.forced_mod}"
}
JSON
depends_on = [
module.db_instance.db_secret_id,
]
}
output "result" {
description = "String result of Lambda execution"
value = jsondecode(data.aws_lambda_invocation.credentials_manager.result)
}
In order to make sure that the RDS instance status is 'available' the lambda function also contains a waiter.
When I manually execute the function everything works like a charm.
But within in terraform it does not proceed from here:
data.aws_lambda_invocation.credentials_manager: Refreshing state...
However, when I look into AWS Cloud Watch I can see that the lambda function is being invoked by Terraform over and over again.
This is the lambda policy:
{
"Version": "2012-10-17",
"Statement": [
{
"Sid": "Stmt1589715377799",
"Action": [
"rds:ModifyDBInstance",
"rds:DescribeDBInstances"
],
"Effect": "Allow",
"Resource": "*"
}
]
}
The lambda function looks like this:
const secretsManager = require('aws-sdk/clients/secretsmanager')
const rds = require('aws-sdk/clients/rds')
const elastiCache = require('aws-sdk/clients/elasticache')
const log = require('loglevel')
/////////////////////////////////////////
// ENVIRONMENT VARIABLES
/////////////////////////////////////////
const logLevel = process.env["LOG_LEVEL"];
const region = process.env["REGION"]
/////////////////////////////////////////
// CONFIGURE LOGGER
log.setLevel(logLevel);
let protocol = []
/////////////////////////////////////////
/////////////////////////////////////////
// DEFINE THE CLIENTS
const SM = new secretsManager({ region })
const RDS = new rds({ region })
const ELC = new elastiCache({region})
/////////////////////////////////////////
/////////////////////////////////////////
// FUNCTION DEFINITIONS
/////////////////////////////////////////
// HELPERS
/**
* #function waitForSeconds
* Set a custom waiter.
*
* #param {int} milseconds - the milliseconds to set as timeout.
*
*/
const waitForSeconds = (ms) => {
return new Promise(resolve => setTimeout(resolve, ms))
}
// AWS SECRETS MANAGER FUNCTIONS
/**
* #function UpdateSecretInSM
* The function updates the secrect value in the corresponding secret.
*
* #param {string} secretId - The id of the secret located in AWS SecretsManager
* #param {string} secretString - The value of the new secret
*
*/
const UpdateSecretInSM = async (secretId,secretString) => {
const params = {SecretId: secretId, SecretString: secretString}
try {
const data = await SM.updateSecret(params).promise()
log.info(`[INFO]: Password for ${secretId} successfully changed in Scecrets Manager!`)
let success = {Timestamp: new Date().toISOString(),Func: 'UpdateSecretInSM', Message: `Secret for ${secretId} successfully changed!`}
protocol.push(success)
return
} catch (err) {
log.debug("[DEBUG]: Error: ", err.stack);
let error = {Timestamp: new Date().toISOString(),Func: 'UpdateSecretInSM', Error: err.stack}
protocol.push(error)
return
}
}
/**
* #function GetSecretFromSM
* The function retrieves the specified secret from AWS SecretsManager.
* Returns the password.
*
* #param {string} secretId - secretId that is available in AWS SecretsManager
*
*/
const GetSecretFromSM = async (secretId) => {
try {
const data = await SM.getSecretValue({SecretId: secretId}).promise()
log.debug("[DEBUG]: Secret: ", data);
let success = {Timestamp: new Date().toISOString(),Func: 'GetSecretFromSM', Message: 'Secret from SecretsManager successfully received!'}
protocol.push(success)
const { SecretString } = data
const password = JSON.parse(SecretString)
return password.password
} catch (err) {
log.debug("[DEBUG]: Error: ", err.stack);
let error = {Timestamp: new Date().toISOString(),Func: 'GetSecretFromSM', Error: err.stack}
protocol.push(error)
return
}
}
// AWS RDS FUNCTIONS
/**
* #function ChangeRDSSecret
* Change the secret of the specified RDS instance.
*
* #param {string} rdsId - id of the RDS instance
* #param {string} password - new password
*
*/
const ChangeRDSSecret = async (rdsId,password) => {
const params = {
DBInstanceIdentifier: rdsId,
MasterUserPassword: password
}
try {
await RDS.modifyDBInstance(params).promise()
log.info(`[INFO]: Password for ${rdsId} successfully changed!`)
let success = {Timestamp: new Date().toISOString(), Func: 'ChangeRDSSecret', Message: `Secret for ${rdsId} successfully changed!`}
protocol.push(success)
return
} catch (err) {
log.debug("[DEBUG]: Error: ", err.stack);
let error = {Timestamp: new Date().toISOString(), Func: 'ChangeRDSSecret', Error: err.stack}
protocol.push(error)
return
}
}
const DescribeRDSInstance = async(id) => {
const params = { DBInstanceIdentifier : id }
const secondsToWait = 10000
try {
let pendingModifications = true
while (pendingModifications == true) {
log.info(`[INFO]: Checking modified values for ${id}`)
let data = await RDS.describeDBInstances(params).promise()
console.log(data)
// Extract the 'PendingModifiedValues' object
let myInstance = data['DBInstances']
myInstance = myInstance[0]
if (myInstance.DBInstanceStatus === "resetting-master-credentials") {
log.info(`[INFO]:Password change is being processed!`)
pendingModifications = false
}
log.info(`[INFO]: Waiting for ${secondsToWait/1000} seconds!`)
await waitForSeconds(secondsToWait)
}
let success = {Timestamp: new Date().toISOString(), Func: 'DescribeRDSInstance', Message: `${id} available again!`}
protocol.push(success)
return
} catch (err) {
log.debug("[DEBUG]: Error: ", err.stack);
let error = {Timestamp: new Date().toISOString(), Func: 'DescribeRDSInstance', Error: err.stack}
protocol.push(error)
return
}
}
const WaitRDSForAvailableState = async(id) => {
/**
* #function WaitRDSForAvailableState
* Wait for the instance to be available again.
*
* #param {string} id - id of the RDS instance
*
*/
const params = { DBInstanceIdentifier: id}
try {
log.info(`[INFO]: Waiting for ${id} to be available again!`)
const data = await RDS.waitFor('dBInstanceAvailable', params).promise()
log.info(`[INFO]: ${id} available again!`)
let success = {Timestamp: new Date().toISOString(), Func: 'WaitRDSForAvailableState', Message: `${id} available again!`}
protocol.push(success)
return
} catch (err) {
log.debug("[DEBUG]: Error: ", err.stack);
let error = {Timestamp: new Date().toISOString(), Func: 'WaitRDSForAvailableState', Error: err.stack}
protocol.push(error)
return
}
}
// AWS ELASTICACHE FUNCTIONS
// ... removed since they follow the same principle like RDS
/////////////////////////////////////////
// Lambda Handler
/////////////////////////////////////////
exports.handler = async (event,context,callback) => {
protocol = []
log.debug("[DEBUG]: Event:", event)
log.debug("[DEBUG]: Context:", context)
// Variable for the final message the lambda function will return
let finalValue
// Get the password and rds from terraform output
const secretString = event.secretString // manual input
const secretId = event.secretId // coming from secretesmanager
const storageId = event.storageId // coming from db identifier
const forcedMod = event.forcedMod // manual input
// Extract the password from the passed secretString to for comparison
const passedSecretStringJSON = JSON.parse(secretString)
const passedSecretString = passedSecretStringJSON.password
const currentSecret = await GetSecretFromSM(secretId)
// Case if the password has already been updated
if (currentSecret !== "ChangeMeViaScriptOrConsole" && passedSecretString === "ChangeMeViaScriptOrConsole") {
log.debug("[DEBUG]: No change necessary.")
finalValue = {timestamp: new Date().toISOString(),
message: 'Lambda function execution finished!',
summary: 'Password already updated. It is not "ChangeMeViaScriptOrConsole."'}
return finalValue
}
// Case if the a new password has not been set yet
if (currentSecret === "ChangeMeViaScriptOrConsole" && passedSecretString === "ChangeMeViaScriptOrConsole") {
finalValue = {timestamp: new Date().toISOString(),
message: 'Lambda function execution finished!',
summary: 'Password still "ChangeMeViaScriptOrConsole". Please change me!'}
return finalValue
}
// Case if the passed password is equal to the stored password and if pw modification is enforced
if (currentSecret === passedSecretString && forcedMod === "no") {
finalValue = {timestamp: new Date().toISOString(),
message: 'Lambda function execution finished!',
summary: 'Stored password is the same as the passed one. No changes made!'}
return finalValue
}
// Case for changing the password
if (passedSecretString !== "ChangeMeViaScriptOrConsole") {
// Update the secret in SM for the specified RDS Instances
await UpdateSecretInSM(secretId,secretString)
log.debug("[DEBUG]: Secret updated for: ", secretId)
// Change the new secret vom SM
const updatedSecret = await GetSecretFromSM(secretId)
log.debug("[DEBUG]: Updated secret: ", updatedSecret)
if (secretId.includes("rds")) {
// Update RDS instance with new secret and wait for it to be available again
await ChangeRDSSecret(storageId, updatedSecret)
await DescribeRDSInstance(storageId)
await WaitRDSForAvailableState(storageId)
} else if (secretId.includes("elasticache")) {
// ... removed since it is analogeous to RDS
} else {
protocol.push(`No corresponding Storage Id exists for ${secretId}. Please check the Secret Id/Name in the terraform configuration.`)
}
finalValue ={timestamp: new Date().toISOString(),
message: 'Lambda function execution finished!',
summary: protocol}
return finalValue
} else {
finalValue = {timestamp: new Date().toISOString(),
message: 'Lambda function execution finished!',
summary: 'Nothing changed'}
return finalValue
}
}
Anyone an idea how to solve or mitigate this behaviour?
Can you please show the iam policy for your lambda function? By my understanding you might be missing this resource aws_lambda_permission for your lambda function. https://www.terraform.io/docs/providers/aws/r/lambda_permission.html
Related
I am using AWS and have an api which is called via API gateway which calls a node.js lambda function.
Very often but randomly I get 502 responses but when I immediately try again with the exact same request I get a normal response. So I decided to search the logs to see if I could find any issues.
The following is what I found for 1 of the requests:
RequestId: xxxxx Error: Runtime exited with error: signal: segmentation fault Runtime.ExitError
as well as:
xxxx ERROR Uncaught Exception
{
"errorType": "Error",
"errorMessage": "Quit inactivity timeout",
"code": "PROTOCOL_SEQUENCE_TIMEOUT",
"fatal": true,
"timeout": 30000,
"stack": [
"Error: Quit inactivity timeout",
" at Quit.<anonymous> (/opt/nodejs/node_modules/mysql/lib/protocol/Protocol.js:160:17)",
" at Quit.emit (node:events:527:28)",
" at Quit.emit (node:domain:475:12)",
" at Quit._onTimeout (/opt/nodejs/node_modules/mysql/lib/protocol/sequences/Sequence.js:124:8)",
" at Timer._onTimeout (/opt/nodejs/node_modules/mysql/lib/protocol/Timer.js:32:23)",
" at listOnTimeout (node:internal/timers:559:17)",
" at processTimers (node:internal/timers:502:7)"
]
}
the following is my reusable sql connector:
const CustomSecret = require('../secrets/CustomSecret');
const mysql = require("mysql");
module.exports = class MySqlConnect {
databaseCredObject;
constructor() {
}
async queryDb(sql, args) {
if (!this.databaseCredObject) {
await this.fetchSecret();
}
let connection = null;
const connection_settings = {
host: this.databaseCredObject.host,
user: this.databaseCredObject.username,
password: this.databaseCredObject.password,
database: 'logbook'
};
connection = mysql.createConnection(connection_settings);
return new Promise((resolve, reject) => {
connection.connect(function (err) {
if (err) {
console.log('error when connecting to db:', err);
} else {
console.log('Connected');
connection.query(sql, args, function (err, result) {
connection.end();
if (err) {
return reject(err);
}
return resolve(result);
});
}
});
});
}
async fetchSecret() {
const databaseCredString = await CustomSecret.getSecret('secretname', 'eu-west-2');
this.databaseCredObject = JSON.parse(databaseCredString);
}
}
Finally this is an example of my lambda function (shortened version):
const {compress, decompress} = require("compress-json");
const MySqlConnect = require("customPackagePath/MySqlConnect");
const CustomJwt = require("customPackagePath/CustomJwt");
const AWS = require("aws-sdk");
const warmer = require("lambda-warmer");
exports.handler = async (event) => {
if (await warmer(event)) {
console.log("Warming");
return 'warmed';
}
let responseCode = 200;
let response = {};
response.headers = {
'Content-Type': 'application/json',
'Access-Control-Allow-Origin': '*'
};
const bodyContent = JSON.parse(event.body);
const dataType = bodyContent.dataType;
const webAuth = new CustomJwt();
const decodedToken = webAuth.decodeToken(event.headers.Authorization);
const userUUID = decodedToken['uuid'];
const connection = new MySqlConnect();
let sql;
switch (dataType) {
case 'userPreferences':
sql = await connection.queryDb('SELECT * FROM user WHERE uuid = ?', [userUUID]);
break;
}
let data = [];
for (let index in sql) {
data.push(JSON.parse(JSON.stringify(sql[index])));
}
const returnData = {
data
};
let compressed = compress(returnData);
response.statusCode = responseCode;
response.body = JSON.stringify(compressed);
return response;
};
Now I am new to infrastructure stuff. But it seems to me that once a lambda function has been called, its not closing or ending correctly. Also I am using the lambda warmer to keep the functions warm as seen in the lambda code and not sure if that is causing any issues.
Appreciate any help with this as I can't seem to figure it out.
Thanks
After doing more research I decided to add this to my Lambda function:
exports.handler = async (event, context, callback) => {
and the return like this
callback(null, response);
and ever since this issue seems to have been resolved. I am not entirely sure why but for now its looking good :)
On button click I have programmed to call a graphql api which is connected to a Lambda function and the function is pulling data from a dynamodb table. The query does not produce any error, but it doesn't give me any results as well. I have also checked the cloudwatch logs and I dont see any traces of the function being called. Not sure on the careless mistake I am making here.
Here is my api
void findUser() async {
try {
String graphQLDocument = '''query getUserById(\$userId: ID!) {
getUserById(userId: \$id) {
id
name
}
}''';
var operation = Amplify.API.query(
request: GraphQLRequest<String>(
document: graphQLDocument,
variables: {'id': 'USER-14160000000'}));
var response = await operation.response;
var data = response.data;
print('Query result: ' + data);
} on ApiException catch (e) {
print('Query failed: $e');
}
}
Here is my lambda function -
const getUserById = require('./user-queries/getUserById');
exports.handler = async (event) => {
var userId = event.arguments.userId;
var name = event.arguments.name;
var avatarUrl = event.arguments.avatarUrl;
//console.log('Received Event - ', JSON.stringify(event,3));
console.log(userId);
switch(event.info.fieldName) {
case "getUserById":
return getUserById(userId);
}
};
const AWS = require('aws-sdk');
const docClient = new AWS.DynamoDB.DocumentClient({region: 'ca-central-1'});
async function getUserById(userId) {
const params = {
TableName:"Bol-Table",
KeyConditionExpression: 'pk = :hashKey and sk = :sortKey',
ExpressionAttributeValues: {
':hashKey': userId,
':sortKey': 'USER'
}
};
try {
const Item = await docClient.query(params).promise();
console.log(Item);
return {
id: Item.Items[0].pk,
name: Item.Items[0].details.displayName,
avatarUrl: Item.Items[0].details.avatarUrl,
createdAt: Item.Items[0].details.createdAt,
updatedAt: Item.Items[0].details.updatedAt
};
} catch(err) {
console.log("BOL Error: ", err);
}
}
module.exports = getUserById;
Upon button click I get this
Moving my comment to an answer:
Can you try changing your graphQLDocumnet to
String graphQLDocument = '''query getUserById(\$id: ID!) {
getUserById(userId: \$id) {
id
name
}
}''';
Your variable is $userId and then $id. Try calling it $id in both places like in your variables object.
Your flutter code is working fine but in lambda from the aws is returning blank string "" to not to print anything
I Have this task for my company where i have to do a monthly User access review via cloudwatch.
This is a manual process where i have to go to cloudwatch > cloudwatch_logs > log_groups > /var/log/example_access > example-instance and then document the logs for a list of users from random generated date. The example instance is a certificate manager box which is linked to our entire production fleet nodes. I also have to document what command that user used on a specific nodes.
Wondering is there any way i can automate this process and dump it into word docs? it's getting painful as the list of user/employees are increasing. Thanks
Sure there is, I don't reckon you want Word docs though, I'd launch an elasticsearch instance on AWS and then give users who want data Kibana access.
Also circulating word docs in an org is bad juju, depending on your windows/office version it carries risks.
Add this lambda function and then go into cloudwatch and add it as subscription filter to the right log groups.
Note you may get missing log entries if they're not logged in JSON format or have funky formatting, if you're using a standard log format it should work.
/* eslint-disable */
// Eslint disabled as this is adapted AWS code.
const zlib = require('zlib')
const elasticsearch = require('elasticsearch')
/**
* This is an example function to stream CloudWatch logs to ElasticSearch.
* #param event
* #param context
* #param callback
* #param utils
*/
export default (event, context, callback) => {
context.callbackWaitsForEmptyEventLoop = true
const payload = new Buffer(event.awslogs.data, 'base64')
const esClient = new elasticsearch.Client({
httpAuth: process.env.esAuth, // your params here
host: process.env.esEndpoint, // your params here.
})
zlib.gunzip(payload, (err, result) => {
if (err) {
return callback(null, err)
}
const logObject = JSON.parse(result.toString('utf8'))
const elasticsearchBulkData = transform(logObject)
const params = { body: [] }
params.body.push(elasticsearchBulkData)
esClient.bulk(params, (err, resp) => {
if (err) {
callback(null, 'success')
return
}
})
callback(null, 'success')
})
}
function transform(payload) {
if (payload.messageType === 'CONTROL_MESSAGE') {
return null
}
let bulkRequestBody = ''
payload.logEvents.forEach((logEvent) => {
const timestamp = new Date(1 * logEvent.timestamp)
// index name format: cwl-YYYY.MM.DD
const indexName = [
`cwl-${process.env.NODE_ENV}-${timestamp.getUTCFullYear()}`, // year
(`0${timestamp.getUTCMonth() + 1}`).slice(-2), // month
(`0${timestamp.getUTCDate()}`).slice(-2), // day
].join('.')
const source = buildSource(logEvent.message, logEvent.extractedFields)
source['#id'] = logEvent.id
source['#timestamp'] = new Date(1 * logEvent.timestamp).toISOString()
source['#message'] = logEvent.message
source['#owner'] = payload.owner
source['#log_group'] = payload.logGroup
source['#log_stream'] = payload.logStream
const action = { index: {} }
action.index._index = indexName
action.index._type = 'lambdaLogs'
action.index._id = logEvent.id
bulkRequestBody += `${[
JSON.stringify(action),
JSON.stringify(source),
].join('\n')}\n`
})
return bulkRequestBody
}
function buildSource(message, extractedFields) {
if (extractedFields) {
const source = {}
for (const key in extractedFields) {
if (extractedFields.hasOwnProperty(key) && extractedFields[key]) {
const value = extractedFields[key]
if (isNumeric(value)) {
source[key] = 1 * value
continue
}
const jsonSubString = extractJson(value)
if (jsonSubString !== null) {
source[`$${key}`] = JSON.parse(jsonSubString)
}
source[key] = value
}
}
return source
}
const jsonSubString = extractJson(message)
if (jsonSubString !== null) {
return JSON.parse(jsonSubString)
}
return {}
}
function extractJson(message) {
const jsonStart = message.indexOf('{')
if (jsonStart < 0) return null
const jsonSubString = message.substring(jsonStart)
return isValidJson(jsonSubString) ? jsonSubString : null
}
function isValidJson(message) {
try {
JSON.parse(message)
} catch (e) { return false }
return true
}
function isNumeric(n) {
return !isNaN(parseFloat(n)) && isFinite(n)
}
Now you should have your logs going into elastic, go into Kibana and you can search by date and even write endpoints to allow people to query their own data!
Easy way is just give stakeholders Kibana access and let them check it out.
Might not be exactly what ya wanted by I reckon it'll work better.
I am trying to write a data transformation for an Aurora Postgres data stream. The identity transformation gives me objects like this:
{
"type": "DatabaseActivityMonitoringRecords",
"version": "1.0",
"databaseActivityEvents": "AYADeLZrKReAa/7tgBmd/d06ybQAXwA...BABVoaWV1Fi8LyA==",
"key": "AADDDVAS ...pztPgaw=="
}
So it appears to me that I need to decrypt these. I have the arn of the key attached to the database in question but I cannot get the data transformation to work. Here is what I have so far:
console.log('Loading function');
const zlib = require('zlib');
const AWS = require('aws-sdk');
exports.handler = async (event, context) => {
/* Process the list of records and transform them */
const output = event.records.map((record) => {
const keyId = "arn:aws:kms:us-east-1:501..89:key/1...d7"; // I don't need this?
const CiphertextBlob = record.data;
const kmsClient = new AWS.KMS({region: 'us-east-1'});
const pt = kmsClient.decrypt({ CiphertextBlob }, (err, data) => {
if (err) console.log(err, err.stack); // an error occurred
else {
const { Plaintext } = data;
console.log(data);
return Plaintext;
}
});
return { old_data : record.data,};
});
console.log(`Processing completed. Successful records ${output.length}.`);
return { records: output };
};
This gives me the baffling error "errorMessage": "Error: Unable to stringify response body" I am following the example outlined here: https://docs.aws.amazon.com/kms/latest/developerguide/programming-encryption.html#decryption
Any idea what I am doing wrong here?
I have been trying to launch a new EC2 instance as well as add a piece of string data to my SQS through lambda in response to an object upload event in my s3 bucket.
I have been able to successfully to update my SQS but has been unable to initialise the new EC2 instance. Despite setting the time allocation to the lambda function to the maximum time of 5mins and increasing memory allocation, an operation timeout error continuously surface.
My code is as below. Can anyone point out what are the potential causes for such an error? While I have stuck my whole piece of code here for reference, the area concerning the launch is towards the end of the code.
Thank you so much!
console.log('Loading function');
var fs = require('fs');
var async = require('async');
var aws = require('aws-sdk');
var s3 = new aws.S3({ apiVersion: '2006-03-01' });
var sqs = new aws.SQS({apiVersion: '2012-11-05'});
var ecs = new aws.ECS({apiVersion: '2014-11-13'});
var ec2 = new aws.EC2({apiVersion: '2015-10-01'});
// Check if the given key suffix matches a suffix in the whitelist. Return true if it matches, false otherwise.
exports.checkS3SuffixWhitelist = function(key, whitelist) {
if(!whitelist){ return true; }
if(typeof whitelist == 'string'){ return key.match(whitelist + '$') }
if(Object.prototype.toString.call(whitelist) === '[object Array]') {
for(var i = 0; i < whitelist.length; i++) {
if(key.match(whitelist[i] + '$')) { return true; }
}
return false;
}
console.log(
'Unsupported whitelist type (' + Object.prototype.toString.call(whitelist) +
') for: ' + JSON.stringify(whitelist)
);
return false;
};
exports.handler = function(event, context) {
//console.log('Received event:', JSON.stringify(event, null, 2));
console.log('Received event:');
//Read in the configuration file
var config = JSON.parse(fs.readFileSync('config.json', 'utf8'));
if(!config.hasOwnProperty('s3_key_suffix_whitelist')) {
config.s3_key_suffix_whitelist = false;
}
console.log('Config: ' + JSON.stringify(config));
var name = event.Records[0].s3.object.key;
if(!exports.checkS3SuffixWhitelist(name, config.s3_key_suffix_whitelist)) {
context.fail('Suffix for key: ' + name + ' is not in the whitelist')
}
// Get the object from the event and show its key
var bucket = event.Records[0].s3.bucket.name;
var key = decodeURIComponent(event.Records[0].s3.object.key.replace(/\+/g, ' '));
var params = {
Bucket: bucket,
Key: key
};
s3.getObject(params, function(err, data) {
if (err) {
console.log(err);
var message = "Error getting object " + key + " from bucket " + bucket +
". Make sure they exist and your bucket is in the same region as this function.";
console.log(message);
context.fail(message);
} else {
console.log('CONTENT TYPE:', key);
context.succeed(key);
}
});
//Sending the image key as a message to SQS and starting a new instance on EC2
async.waterfall([
function(next){
var params = {
MessageBody: JSON.stringify(event),
QueueUrl: config.queue
};
console.log("IN QUEUE FUNCTION");
sqs.sendMessage(params, function (err, data) {
if (err) { console.warn('Error while sending message: ' + err); }
else { console.info('Message sent, ID: ' + data.MessageId); }
next(err);
});
},
function (next) {
console.log("INITIALIZING ECS");
var params = {
ImageId: 'ami-e559b485',
MinCount: 1,
MaxCount: 1,
DryRun: true,
InstanceType: 't2.micro',
KeyName: 'malpem2102',
SubnetId: 'subnet-e8607e8d'
}
ec2.runInstances(params, function(err,data){
if(err){
console.log(err, err.stack);
context.fail('Error', "Error getting file: " + err);
return;
} else{
var instanceId = data.Instances[0].InstanceId;
console.log("Created instance ", instanceId);
context.suceed("Created instance");
}
});
}
], function(err){
if (err) {
context.fail('An error has occurred: ' + err);
}
else {
context.succeed('Successfully processed Amazon S3 URL.');
}
}
);
};