I followed following steps while trying to run android app test via AWS Lambda Node.JS
Created a project
Created an upload
Uploaded APK to signed url
Once upload was done I created device pool using following params
var createDevicePoolParams = {
name: "DAP_Device_Pool",
description: "DAP_Android_Devices",
projectArn: projectARN,
rules: [{
attribute: "PLATFORM",
operator: "EQUALS",
value: "\"ANDROID\""
}]
};
Then I called schedulerun with following params
var scheduleRunParams = {
appArn: uploadARN,
name: "tarunRun",
devicePoolArn: devicePoolARN,
projectArn: projectARN,
test: {
type: "BUILTIN_FUZZ",
}
};
But I am getting error of missing or unprocessed resources.
I am not able to understand what I am missing. My understanding is that If I am using built in fuzz testing type then I dont need to upload any custom testcases.
Can somebody pls help pointing out what step is missing
Then
After your uploads have been processed by Device Farm, call aws devicefarm schedule-run
[update]
I put this code in a AWS Lambda function and it worked there as well. Here is a gist of it:
https://gist.github.com/jamesknowsbest/3ea0e385988b0098e5f9d38bf5a932b6
Here is the code I just authored and it seems to work with the Built-inFuzz/Explorer tests
// assume we already executed `npm install aws-sdk`
var AWS = require('aws-sdk');
// assumes `npm install https`
const request = require("request");
// assumes `npm install fs`
const fs = require('fs');
// https://stackoverflow.com/a/41641607/8016330
const sleep = (waitTimeInMs) => new Promise(resolve => setTimeout(resolve, waitTimeInMs));
// Device Farm is only available in the us-west-2 region
var devicefarm = new AWS.DeviceFarm({ region: 'us-west-2' });
(async function() {
let project_params = {
name: "test of fuzz tests"
};
let PROJECT_ARN = await devicefarm.createProject(project_params).promise().then(
function(data){
return data.project.arn;
},
function (error) {
console.error("Error creating project", "Error: ", error);
}
);
console.log("Project created ", "Project arn: ", PROJECT_ARN);
// create the upload and upload files to the project
let params = {
name: "app-debug.apk",
type: "ANDROID_APP",
projectArn: PROJECT_ARN
};
let UPLOAD = await devicefarm.createUpload(params).promise().then(
function(data){
return data.upload;
},
function(error){
console.error("Creating upload failed with error: ", error);
}
);
let UPLOAD_ARN = UPLOAD.arn;
let UPLOAD_URL = UPLOAD.url;
console.log("upload created with arn: ", UPLOAD_ARN);
console.log("uploading file...");
let options = {
method: 'PUT',
url: UPLOAD_URL,
headers: {},
body: fs.readFileSync("/path/to/your/apk/file")
};
// wait for upload to finish
await new Promise(function(resolve,reject){
request(options, function (error, response, body) {
if (error) {
console.error("uploading file failed with error: ", error);
reject(error);
}
resolve(body);
});
});
//get the status of the upload and make sure if finished processing before scheduling
let STATUS = await getStatus(UPLOAD_ARN);
console.log("upload status is: ", STATUS);
while(STATUS !== "SUCCEEDED"){
await sleep(5000);
STATUS = await getStatus(UPLOAD_ARN);
console.log("upload status is: ", STATUS);
}
//create device pool
let device_pool_params = {
projectArn: PROJECT_ARN,
name: "Google Pixel 2",
rules: [{"attribute": "ARN","operator":"IN","value":"[\"arn:aws:devicefarm:us-west-2::device:5F20BBED05F74D6288D51236B0FB9895\"]"}]
}
let DEVICE_POOL_ARN = await devicefarm.createDevicePool(device_pool_params).promise().then(
function(data){
return data.devicePool.arn;
},function(error){
console.error("device pool failed to create with error: ",error);
}
);
console.log("Device pool created successfully with arn: ", DEVICE_POOL_ARN);
//schedule the run
let schedule_run_params = {
name: "MyRun",
devicePoolArn: DEVICE_POOL_ARN, // You can get the Amazon Resource Name (ARN) of the device pool by using the list-pools CLI command.
projectArn: PROJECT_ARN, // You can get the Amazon Resource Name (ARN) of the project by using the list-projects CLI command.
test: {
type: "BUILTIN_FUZZ"
},
appArn: UPLOAD_ARN
};
let schedule_run_result = await devicefarm.scheduleRun(schedule_run_params).promise().then(
function(data){
return data.run;
},function(error){
console.error("Schedule run command failed with error: ", error);
}
);
console.log("run finished successfully with result: ", schedule_run_result);
})();
async function getStatus(UPLOAD_ARN){
return await devicefarm.getUpload({arn: UPLOAD_ARN}).promise().then(
function(data){
return data.upload.status;
},function(error){
console.error("getting upload failed with error: ", error);
}
);
}
Ouput is:
Project created Project arn: arn:aws:devicefarm:us-west-2:111122223333:project:b9233b49-967e-4b09-a51a-b5c4101340a1
upload created with arn: arn:aws:devicefarm:us-west-2:111122223333:upload:b9233b49-967e-4b09-a51a-b5c4101340a1/48ffd115-f7d7-4df5-ae96-4a44911bff65
uploading file...
upload status is: INITIALIZED
upload status is: SUCCEEDED
Device pool created successfully with arn: arn:aws:devicefarm:us-west-2:111122223333:devicepool:b9233b49-967e-4b09-a51a-b5c4101340a1/c0ce1bbc-7b40-4a0f-a419-ab024a6b1000
run finished successfully with result: { arn:
'arn:aws:devicefarm:us-west-2:111122223333:run:b9233b49-967e-4b09-a51a-b5c4101340a1/39369894-3829-4e14-81c9-bdfa02c7e032',
name: 'MyRun',
type: 'BUILTIN_FUZZ',
platform: 'ANDROID_APP',
created: 2019-06-06T23:51:13.529Z,
status: 'SCHEDULING',
result: 'PENDING',
started: 2019-06-06T23:51:13.529Z,
counters:
{ total: 0,
passed: 0,
failed: 0,
warned: 0,
errored: 0,
stopped: 0,
skipped: 0 },
totalJobs: 1,
completedJobs: 0,
billingMethod: 'METERED',
seed: 982045377,
appUpload:
'arn:aws:devicefarm:us-west-2:111122223333:upload:b9233b49-967e-4b09-a51a-b5c4101340a1/48ffd115-f7d7-4df5-ae96-4a44911bff65',
eventCount: 6000,
jobTimeoutMinutes: 150,
devicePoolArn:
'arn:aws:devicefarm:us-west-2:111122223333:devicepool:b9233b49-967e-4b09-a51a-b5c4101340a1/c0ce1bbc-7b40-4a0f-a419-ab024a6b1000',
radios: { wifi: true, bluetooth: false, nfc: true, gps: true } }
HTH
-James
Related
I am trying to update a thing's shadow on AWS IoT Core by calling the function 'UpdateThingShadowCommand' from my vueJS web app.
I am following instructions from the documentation here:
https://docs.aws.amazon.com/AWSJavaScriptSDK/v3/latest/clients/client-iot-data-plane/classes/updatethingshadowcommand.html
However, when I execute the method 'UpdateThingShadowCommand', I keep running into the following error message:
net::ERR_CERT_AUTHORITY_INVALID
And another log message:
TypeError: Failed to fetch
My code is as follows:
import { IoTDataPlaneClient } from "#aws-sdk/client-iot-data-plane";
import { UpdateThingShadowCommand } from "#aws-sdk/client-iot-data-plane";
myMethod () {
const configIotDataPlaneClient = {
apiVersion: 'XXXXXXXX',
region: 'XXXXXXX',
credentials: {
accessKeyId: 'XXXXXXXXXXXXXXXX',
secretAccessKey: 'XXXXXXXXXXXXXXXXX'
}
//Initializing the client
const clientShadow = new IoTDataPlaneClient(configIotDataPlaneClient);
console.log(clientShadow)
const inputShadow = {
payload: new Uint8Array(
Buffer.from(
JSON.stringify({
"state": {
"reported": {
"item1": "val1",
"item2": "val2"
}
}
}),
),
),
//shadowName: "",
thingName: "thing-name"
}
//Updating a thing's shadow
try {
const commandShadow = new UpdateThingShadowCommand(inputShadow);
console.log(commandShadow)
const responseShadow = await clientShadow.send(commandShadow);
console.log("Update Shadow response", responseShadow)
}
catch (error) {
console.log("Update Shadow error: ", error)
}
finally{
console.log("Update Shadow: finally method")
}
}
Can anyone suggest why I may be getting these errors? Any help is much appreciated!
Trigger a cloud function whenever a new file is uploaded to cloud storage bucket. This function should call a dataproc job written in pyspark to read the file and load it to BigQuery.
I want to know how to call a google dataproc job from cloud function. Please suggest.
I was able to create a simple Cloud Function that triggers Dataproc Job on GCS create file event. In this example, the file in GCS contains a Pig query to execute. However you can follow Dataproc API documentation to create a PySpark version.
index.js:
exports.submitJob = (event, callback) => {
const google = require('googleapis');
const projectId = 'my-project'
const clusterName = 'my-cluster'
const file = event.data;
if (file.name) {
google.auth.getApplicationDefault(function (err, authClient, projectId) {
if (err) {
throw err;
}
const queryFileUri = "gs://" + file.bucket + "/" + file.name
console.log("Using queryFileUri: ", queryFileUri);
if (authClient.createScopedRequired && authClient.createScopedRequired()) {
authClient = authClient.createScoped([
'https://www.googleapis.com/auth/cloud-platform',
'https://www.googleapis.com/auth/userinfo.email'
]);
}
const dataproc = google.dataproc({ version: 'v1beta2', auth: authClient });
dataproc.projects.regions.jobs.submit({
projectId: projectId,
region: "global",
resource: {
"job": {
"placement": {"clusterName": clusterName},
"pigJob": {
"queryFileUri": queryFileUri,
}
}
}
}, function(err, response) {
if (err) {
console.error("Error submitting job: ", err);
}
console.log("Dataproc response: ", response);
callback();
});
});
} else {
throw "Skipped processing file!";
}
callback();
};
Make sure to set Function to execute to submitJob.
package.json:
{
"name": "sample-cloud-storage",
"version": "0.0.1",
"dependencies":{ "googleapis": "^21.3.0" }
}
The following blogpost gave me many ideas how to get started:
https://cloud.google.com/blog/big-data/2016/04/scheduling-dataflow-pipelines-using-app-engine-cron-service-or-cloud-functions
I have set up the hyperledger blockchain locally. I run the hyperledger bc service within the docker container. I am able to bring up the node successfully, able to deploy and write to bc using a sample contract.
But couldnt read back the data from the block chain. Below is the error message bc throws. Can anyone pls guide what's wrong here ?
[ibc-js] Deploy Chaincode - Complete
{"query":{},"invoke":{},"details":{"deployed_name":"c123c14a65a511ee79e2a41b23726f473478d002064c01c3ce035cffa1229af083d73f1db220fc2f267b9ae31d66ce2e10113548e7abdf8812986ac3c5770a9c","func":{"invoke":["init","write"],"query":["read"]},"git_url":"https://github.com/IBM-Blockchain/learn-chaincode/finished","options":{"quiet":true,"timeout":60000,"tls":false},"peers":[{"name":"vp0-vp0...:49155","api_host":"127.0.0.1","api_port":49155,"id":"vp0","tls":false}],"timestamp":1470146338831,"users":[],"unzip_dir":"learn-chaincode-master/finished","version":"github.com/hyperledger/fabric/core/chaincode/shim","zip_url":"https://github.com/IBM-Blockchain/learn-chaincode/archive/master.zip"}}
sdk has deployed code and waited
[ibc-js] write - success: { jsonrpc: '2.0',
result:
{ status: 'OK',
message: '8b340e92-f96f-41f6-9b15-6ccb23304360' },
id: 1470146405598 }
write response: { jsonrpc: '2.0',
result:
{ status: 'OK',
message: '8b340e92-f96f-41f6-9b15-6ccb23304360' },
id: 1470146405598 }
[ibc-js] read - success: { jsonrpc: '2.0',
error:
{ code: -32003,
message: 'Query failure',
data: 'Error when querying chaincode: Error:Failed to launch chaincode spec(Could not get deployment transaction for c123c14a65a511ee79e2xxxxxxxxxxxxxxxxe7abdf8812986ac3c5770a9c - LedgerError - ResourceNotFound: ledger: resource not found)' },
id: 1470146405668 }
read response: null { name: 'query() resp error',
code: 400,
details:
{ code: -32003,
message: 'Query failure',
data: 'Error when querying chaincode: Error:Failed to launch chaincode spec(Could not get deployment transaction for c123c14a65a511ee79e2xxxxxxxxxxxxxxxxe7abdf8812986ac3c5770a9c - LedgerError - ResourceNotFound: ledger: resource not found)' } }
I have used IBM Blockchain JS for interacting w/ the go contract.
Below is the node js code
// Step 1 ==================================
var Ibc1 = require('ibm-blockchain-js');
var ibc = new Ibc1(/*logger*/); //you can pass a logger such as winston here - optional
var chaincode = {};
// ==================================
// configure ibc-js sdk
// ==================================
var options = {
network:{
peers: [{
"api_host": "127.0.0.1",
"api_port": 49155,
//"api_port_tls": 49157,
"id": "vp4"
}],
users: null,
options: {quiet: true, tls:false, maxRetry: 1}
},
chaincode:{
zip_url: 'https://github.com/IBM-Blockchain/learn-chaincode/archive/master.zip',
unzip_dir: 'learn-chaincode-master/finished',
git_url: 'https://github.com/IBM-Blockchain/learn-chaincode/finished'
}
};
// Step 2 ==================================
ibc.load(options, cb_ready);
// Step 3 ==================================
function cb_ready(err, cc){ //response has chaincode functions
chaincode = cc;
console.log(JSON.stringify(cc));
chaincode.deploy('init', ['Hi hyperledger'], null, cb_deployed);
// Step 5 ==================================
function cb_deployed(){
console.log(JSON.stringify(chaincode));
console.log('sdk has deployed code and waited');
console.log('******** Writing to chaincode Now **********');
chaincode.invoke.write(["mykey","Hi Ledger Systems"],function(err,data){
console.log('write response:', data);
readData();
});
}
function readData()
{
console.log('\n\n**** Waiting 7 seconds before reading **** \n\n');
setTimeout(function () {
console.log('\n\n**** Start reading **** \n\n');
chaincode.invoke.read(["mykey"],function(err,data){
console.log('read response:', data);
});
}, 7000)
}
}
How do I launch a Cloud Dataflow job from a Google Cloud Function? I'd like to use Google Cloud Functions as a mechanism to enable cross-service composition.
I've included a very basic example of the WordCount sample below. Please note that you'll need to include a copy of the java binary in your Cloud Function deployment, since it is not in the default environment. Likewise, you'll need to package your deploy jar with your Cloud Function as well.
module.exports = {
wordcount: function (context, data) {
const spawn = require('child_process').spawn;
const child = spawn(
'jre1.8.0_73/bin/java',
['-cp',
'MY_JAR.jar',
'com.google.cloud.dataflow.examples.WordCount',
'--jobName=fromACloudFunction',
'--project=MY_PROJECT',
'--runner=BlockingDataflowPipelineRunner',
'--stagingLocation=gs://STAGING_LOCATION',
'--inputFile=gs://dataflow-samples/shakespeare/*',
'--output=gs://OUTPUT_LOCATION'
],
{ cwd: __dirname });
child.stdout.on('data', function(data) {
console.log('stdout: ' + data);
});
child.stderr.on('data', function(data) {
console.log('error: ' + data);
});
child.on('close', function(code) {
console.log('closing code: ' + code);
});
context.success();
}
}
You could further enhance this example by using the non-blocking runner and having the function return the Job ID, so that you can poll for job completion separately. This pattern should be valid for other SDKs as well, so long as their dependencies can be packaged into the Cloud Function.
The best way is to launch is via cloud function but be careful, if you are using the cloud function for google cloud storage, then for every file uploaded a dataflow job will be launched.
const { google } = require('googleapis');
const templatePath = "gs://template_dir/df_template;
const project = "<project_id>";
const tempLoc = "gs://tempLocation/";
exports.PMKafka = (data, context, callback) => {
const file = data;
console.log(`Event ${context.eventId}`);
console.log(`Event Type: ${context.eventType}`);
console.log(`Bucket Name: ${file.bucket}`);
console.log(`File Name: ${file.name}`);
console.log(`Metageneration: ${file.metageneration}`);
console.log(`Created: ${file.timeCreated}`);
console.log(`Updated: ${file.updated}`);
console.log(`Uploaded File Name - gs://${file.bucket}/${file.name}`);
google.auth.getApplicationDefault(function (err, authClient, projectId) {
if (err) {
throw err;
}
if (authClient.createScopedRequired && authClient.createScopedRequired()) {
authClient = authClient.createScoped(authScope);
}
const dataflow = google.dataflow({ version: 'v1b3', auth: authClient });
var inputDict= {
inputFile: `gs://${file.bucket}/${file.name}`,
...
...
<other_runtime_parameters>
};
var env = {
tempLocation: tempLoc
};
var resource_opts = {
parameters: inputDict,
environment: env,
jobName: config.jobNamePrefix + "-" + new Date().toISOString().toLowerCase().replace(":","-").replace(".","-")
};
var opts = {
gcsPath: templatePath,
projectId: project,
resource: resource_opts
}
console.log(`Dataflow Run Time Options - ${JSON.stringify(opts)}`)
dataflow.projects.templates.launch(opts, function (err, response) {
if (err) {
console.error("problem running dataflow template, error was: ", err);
slack.publishMessage(null, null, false, err);
return;
}
console.log("Dataflow template response: ", response);
var jobid = response["data"]["job"]["id"];
console.log("Dataflow Job ID: ", jobid);
});
callback();
});
};
I am trying to create AWS Lambda function that does following process.
Receive S3 "Put" event
Get fileA from S3
Get fileB from S3 that invoked lambda
Launch just one EC2 instance
Create tags for the new EC2 instance
Problem: Multiple(5) instances are launched unexpectedly.
An instance is successfully created, but 4 other instances are also launched. 5 instances in total are launched.
Logs
In the Log Streams for this function, I found 4 Streams for this invocation. Each Stream doesn't show any errors or exceptions, but it seems that the function is executed repeatedly.
Trial
I guessed that the function has been timed out and then re-run.
Then, I changed Timeout from 5s to 60s and put a file on S3.
It somehow effected. Only 2 Log Streams appeared, first one shows that the function has been executed just once, second shows the function has been executed twice. Number of launched instances is 3.
However, I have no idea why multiple(3) instances are launched.
Any comments are welcome!
Thank you in advance :-)
My Lambda function
My Lambda function is following. (It's simplified to hide credential informations but it doesn't lose its basic structure)
var AWS = require('aws-sdk');
function composeParams(data, config){
var block_device_name = "/dev/xvdb";
var security_groups = [
"MyGroupName"
];
var key_name = 'mykey';
var security_group_ids = [
"sg-xxxxxxx"
];
var subnet_id = "subnet-xxxxxxx";
// Configurations for a new EC2 instance
var params = {
ImageId: 'ami-22d27b22', /* required */
MaxCount: 1, /* required */
MinCount: 1, /* required */
KeyName: key_name,
SecurityGroupIds: security_group_ids,
InstanceType: data.instance_type,
BlockDeviceMappings: [
{
DeviceName: block_device_name,
Ebs: {
DeleteOnTermination: true,
Encrypted: true,
VolumeSize: data.volume_size,
VolumeType: 'gp2'
}
}
],
Monitoring: {
Enabled: false /* required */
},
SubnetId: subnet_id,
UserData: new Buffer(config).toString('base64'),
DisableApiTermination: false,
InstanceInitiatedShutdownBehavior: 'stop',
DryRun: data.dry_run,
EbsOptimized: false
};
return params;
}
exports.handler = function(event, context) {
// Get the object from the event
var s3 = new AWS.S3({ apiVersion: '2006-03-01' });
var bucket = event.Records[0].s3.bucket.name;
var key = event.Records[0].s3.object.key;
// Get fileA
var paramsA = {
Bucket: bucket,
Key: key
};
s3.getObject(paramsA, function(err, data) {
if (err) {
console.log(err);
} else {
var dataA = JSON.parse(String(data.Body));
// Get fileB
var paramsB = {
Bucket: bucket,
Key: 'config/config.yml'
};
s3.getObject(paramsB, function(err, data) {
if (err) {
console.log(err, err.stack);
} else {
var config = data.Body;
/* Some process */
// Launch EC2 Instance
var ec2 = new AWS.EC2({ region: REGION, apiVersion: '2015-04-15' });
var params = composeParams(dataA, config);
ec2.runInstances(params, function(err, data) {
if (err) {
console.log(err, err.stack);
} else {
console.log(data);
// Create tags for instance
for (var i=0; i<data.Instances.length; i++){
var instance = data.Instances[i];
var params = {
Resources: [ /* required */
instance.InstanceId
],
Tags: [ /* required */
{
Key: 'Name',
Value: instance_id
},
{
Key: 'userID',
Value: dataA.user_id
}
],
DryRun: dataA.dry_run
};
ec2.createTags(params, function(err, data) {
if (err) {
console.log(err, err.stack);
} else {
console.log("Tags created.");
console.log(data);
}
});
}
}
});
}
});
}
});
};
Solved.
Adding context.succeed(message); to the last part of the nested callback prevents the repeated execution of the function.
ec2.createTags(params, function(err, data) {
if (err) {
console.log(err, err.stack);
context.fail('Failed');
} else {
console.log("Tags created.");
console.log(data);
context.succeed('Completed');
}
});
Check in cloudwatch event that context.aws_request_id value for each invokation. If it is
same than it is retry because aws function got some error raised.
make your lambda idempotent
different than it is because of connection timeout from your aws
lambda client. check aws client configuration request timeout and
connect timeout values.
I was having the same problem with the newer runtime (Node.JS v4.3). Call
context.callbackWaitsForEmptyEventLoop = false;
before calling
callback(...)
Maximum Event Age
When a function returns an error before execution, Lambda returns the event to the queue and attempts to run the function again for up to 6 hours by default. With Maximum Event Age, you can configure the lifetime of an event in the queue from 60 seconds to 6 hours. This allows you to remove any unwanted events based on the event age.
Maximum Retry Attempts
When a function returns an error after execution, Lambda attempts to run it two more times by default. With Maximum Retry Attempts, you can customize the maximum number of retries from 0 to 2. This gives you the option to continue processing new events with fewer or no retries.
under the Configuration > Asynchronous invocation > Retry Attempts
you can set it to 0-2
Source:
https://aws.amazon.com/about-aws/whats-new/2019/11/aws-lambda-supports-max-retry-attempts-event-age-asynchronous-invocations/