I have a simple test that checks to see a user's quota correctly changes after they upload a file.
casper.then(function() {
quota_begin = this.evaluate(function() {
return document.querySelector('.storage_used p').textContent;
});
});
casper.then(function() {
common.ACTIONS.uploadFile(casper);
});
casper.then(function() {
quota_changed = this.evaluate(function() {
return document.querySelector('.storage_used p').textContent;
});
this.echo('Storage quota change: ' + quota_begin + ' => ' + quota_changed);
});
That last echo's output gives me:
Storage quota change: Upload quota 0B of 1GB used => Upload quota 192 KB of 1GB used
I'd like to include an assert in the test that fails when quota_begin and quota_changed do not actually change.
Something like:
test.assert(parseFloat(quota_changed) > parseFloat(quota_begin), "Quota was increased by file");
(doesn't work)
Is there an easy way to assert a diff on the two? regex?
Write a simple function to parse used bytes from that string will do that task:
function get_used_bytes(input) {
var unit_dict = {'B':1,'KB':1024,'MB':1024*1024,'GB':1024*1024*1024}
var ret = /Upload quota ([\d.]+)(\S+) of ([\d.]+)(\S+) used/g.exec(input)
return ret[1] * unit_dict[ret[2]]
}
// get_used_bytes("Upload quota 192KB of 1GB used")
// 196608
test.assert(get_used_bytes(quota_changed) > get_used_bytes(quota_begin), "Quota was increased by file");
Related
I'm currently writing integration tests for my BatchWriteItem logic using Spock/Groovy. I'm running a docker container which spins up a real DynamoDb table for this same purpose.
This is my logic in Java for BatchWriteItems
public Promise<Boolean> createItemsInBatch(ClientKey clientKey, String accountId, List<SrItems> srItems) {
List<Item> items = srItems.stream()
.map(srItem -> createItemFromSrItem(clientKey, createItemRef(srItem.getId(), accountId), srItem))
.collect(Collectors.toList());
List<List<Item>> batchItems = Lists.partition(items, 25);
var promises = batchItems.stream().map(itemsList -> Blocking.get(() -> {
TableWriteItems tableWriteItems = new TableWriteItems(table.getTableName());
tableWriteItems.withItemsToPut(itemsList);
BatchWriteItemOutcome outcome = dynamoDB.batchWriteItem(tableWriteItems);
return outcome.getUnprocessedItems().values().stream().flatMap(Collection::stream).collect(Collectors.toList());
})).collect(Collectors.toList());
return ParallelPromises.yieldAll(promises).map((List<? extends ExecResult<List<WriteRequest>>> results) -> {
if(results.isEmpty()) {
return true;
} else {
results.stream().map(Result::getValue).flatMap(Collection::stream).forEach(failure -> {
var failedItem = failure.getPutRequest().getItem();
logger.error(append("item", failedItem), "Failed to batch write item");
});
return false;
}
});
}
And this is my current implementation for the test (happy path)
#Unroll
def "createItemsInBatch - #description"(description, srItemsList, createResult) {
given:
def dynamoItemService = new DynamoItemService(realTable, amazonDynamoDBClient1) //passing the table running in the docker image + the dynamo client associated
when:
def promised = ExecHarness.yieldSingle {
dynamoItemService.createItemsInBatch(CLIENT_KEY, 'account-id', srItemsList as List<SrItem>)
}
then:
promised.success == createResult
where:
description | srItemsList | createResult
"single batch req not reaching batch size limit" | srItems(10) | true
"double batch req reaching batch size limit" | srItems(25) | true
"triple batch req reaching batch size limit" | srItems(51) | true
}
For context:
srItems() is a function that just creates a bunch of different items to be injected in the service for the BatchWriteItem request
Want I want now is to be able to test the unhappy path of my logic, i.e. get some UnprocessedItems from my outcome, testing the below code is actually doing its job
BatchWriteItemOutcome outcome = dynamoDB.batchWriteItem(tableWriteItems);
return outcome.getUnprocessedItems().values().stream().flatMap(Collection::stream).collect(Collectors.toList());
Any help would be greatly appreciated
This is quite easy to do actually, we can force throttling on your DynamoDB table which will result in UnprocessedItems....
Configure your table to have 1WCU and disable auto-scaling. Now run your BatchWriteItem in batches of 25 for a couple of seconds and DynamoDB will begin to throttle requests, which will return the throttled items in the UnprocessedItems response, testing your unhappy path.
I have an issue: I've got some chat logs that are thousands of lines ong, and I'm trying to isolate the messages from one specific user. The log looks like this:
[dd-mm-yy hh:mm pm/am] Username
message
[dd-mm-yy hh:mm pm/am] Username
message
[dd-mm-yy hh:mm pm/am] Username
message
In my file, I want to only keep the messages (not the other information like day hour or their username) that one specific user has send, and delete everything else, so I can process the contents of those messages. Is there anything out there that can help me achieve it, because as you can see its a very tedious process to go through thousands of lines of logs doing this by hand.
I ended up writing a js script to do what I wanted since I couldnt find anything anywhere else, here it is:
const fs = require("fs");
const readline = require("readline");
async function processLineByLine() {
const fileStream = fs.createReadStream("./input.txt");
const rl = readline.createInterface({
input: fileStream,
crlfDelay: Infinity,
});
// Note: we use the crlfDelay option to recognize all instances of CR LF
// ('\r\n') in input.txt as a single line break.
let trigger = false;
for await (const line of rl) {
// Each line in input.txt will be successively available here as `line`.
console.log(`Line from file: ${line}`);
if (line.includes("YOU DID THIS TO MY BOI LIM#7483") == true) {
console.log("true");
trigger = true;
}
else if (trigger == true) {
console.log(`Line sent by user: ${line}`);
fs.appendFile("output.txt", line + " ", (err) => {
// throws an error, you could also catch it here
if (err) throw err;
// success case, the file was saved
console.log("line saved");
});
trigger = false;
}
}
}
processLineByLine();
I have created a Map/Reduce script which will fetch customer invoices and delete it. If I am creating saved search in UI based on the below criteria, it shows 4 million records. Now, if I run the script, execution stops before completing the "getInputData" stage as maximum storage limit of this stage is 200Mb. So, I want to fetch first 4000 records out of 4 million and execute it and schedule the script for every 15 mins. Here is the code of first stage (getInputData) -
var count=0;
var counter=0;
var result=[];
var testSearch = search.create({
type: 'customrecord1',
filters: [ 'custrecord_date_created', 'notonorafter', 'startOfLastMonth' ],
columns: [ 'internalid' ]
});
do{
var resultSearch = testSearch.run().getRange({
start : count,
end : count+1000
});
for(var arr=0;arr<resultSearch.length;arr++){
result.push(resultSearch[arr]);
}
counter = count+counter;
}while(resultSearch.length >= 1000 && counter != 4000);
return result;
During creating the saved search, it is taking long time, is there any work around where we can filter first 4000 records during saved search creation?
Why not a custom mass update?
It would be a 5-10 line script that grabs the internal id and record type of the current record in the criteria of the mass update then deletes the record.
I believe this is what search.runPaged() and pagedData.fetch() is for.
search.runPaged runs the current search and returns summary information about paginated results - it does not give you the result set or save the search.
pagedData.fetch retrieves the data within the specified page range.
If you are intent on the Map/Reduce you can just return your created search. Netsuite will run it and pass each line to the next phase. You can even use a saved search where you limit the number of lines and then in your summarize phase re-trigger the script if there's anything left to do.
The 4k record syntax though is:
var toDelete = [];
search.run().each(function(r){
toDelete.push(r.id);
return toDelete.length < 4000;
});
return toDelete;
finally I normally do this as scheduled mass update. It will tend to interfere less with any production scheduled and map/reduce scripts.
/**
* #NApiVersion 2.x
* #NScriptType MassUpdateScript
*/
define(["N/log", "N/record"], function (log, record) {
function each(params) {
try {
record.delete({
type: params.type,
id: params.id
});
log.audit({ title: 'deleted ' + params.type + ' ' + params.id, details: '' });
}
catch (e) {
log.error({ title: 'deleting: ' + params.type + ' ' + params.id, details: (e.message || e.toString()) + (e.getStackTrace ? (' \n \n' + e.getStackTrace().join(' \n')) : '') });
}
}
return {
each:each
};
});
My question is about a strange behavious I notice both on my iPhone device and the codenameone simulator (NetBeans).
I invoke the following code below which calls a google web service to provide a list of food places around a GPS coordinate:
The web service that is called is as follows (KEY OBSCURED):
https://maps.googleapis.com/maps/api/place/nearbysearch/json?location=40.714353,-74.00597299999998&radius=200&types=food&key=XXXXXXXXXXXXXXXXXXXXXXX
Each result contains the next page token and thus, the second call (for the subsequent page) is as follows:
https://maps.googleapis.com/maps/api/place/nearbysearch/json?location=40.714353,-74.00597299999998&radius=200&types=food&key=XXXXXXXXXXXXXXXXXXXXXXX&pagetoken=YYYYYYYYYYYYYYYYYY
public static byte[] getWSResponseData(String urlString, boolean usePost)
{
ConnectionRequest r = new ConnectionRequest();
r.setUrl(urlString);
r.setPost(usePost);
InfiniteProgress prog = new InfiniteProgress();
Dialog dlg = prog.showInifiniteBlocking();
r.setDisposeOnCompletion(dlg);
NetworkManager.getInstance().addToQueueAndWait(r);
try
{
Thread.sleep(2000);
}
catch (InterruptedException ex)
{
}
byte[] responseData = r.getResponseData();
return responseData;
}
public static void getLocationsList(double lat, double lng)
{
boolean done = false;
while (!done)
{
byte[] responseData = getWSResponseData(finalURL,false);
result = Result.fromContent(parser.parseJSON(new InputStreamReader(new ByteArrayInputStream(responseData))));
String venueNames[] = result.getAsStringArray("/results/name");
nextToken = result.getAsString("/next_page_token");
if ( nextToken == null || nextToken.equals(""))
done = true;
else
finalURL = completeURL + "&pagetoken=" + nextToken;
}
.....
}
This code works fine with the sleep timer, but when I remove the Thread.sleep, only the first page gets called.
Any help would be appreciated.
Using the debugger does not help as this is a timing issue and the issue does not occur when using the debugger.
Also when I put some print statements into the code
while (!done)
{
String nextToken = null;
**System.out.println(finalURL);**
...
}
System.out.println("Total Number of entries returned: " + itemCount);
I get the following output:
First Run (WITHOUT SLEEP):
https://maps.googleapis.com/maps/api/place/nearbysearch/json?location=40.714353,-74.00597299999998&radius=200&types=food&key=XXXXXXXX
https://maps.googleapis.com/maps/api/place/nearbysearch/json?location=40.714353,-74.00597299999998&radius=200&types=food&key=XXXXXXXX&pagetoken=CqQCF...
Total Number of entries returned: 20
Using the network monitor I see that the response to the second WS call returns:
{
"html_attributions" : [],
"results" : [],
"status" : "INVALID_REQUEST"
}
Which is strange as when I cut and paste the WS URL into my browser, it works fine...
Second Run (WITH SLEEP):
https://maps.googleapis.com/maps/api/place/nearbysearch/json?location=40.714353,-74.00597299999998&radius=200&types=food&key=XXXXXXXXX
https://maps.googleapis.com/maps/api/place/nearbysearch/json?location=40.714353,-74.00597299999998&radius=200&types=food&key=XXXXXXXXX&pagetoken=CqQCFQEAA...
https://maps.googleapis.com/maps/api/place/nearbysearch/json?location=40.714353,-74.00597299999998&radius=200&types=food&key=XXXXXXXXX&pagetoken=CsQDtQEAA...
Total Number of entries returned: 60
Well it seems to be a google API issue as indicated here:
Paging on Google Places API returns status INVALID_REQUEST
I still could not get it to work by changing the WS URL with a random parameter as they suggested, but I will keep trying and post something here if I get it to work. For now I will just keep a 2 second delay between the calls which seems to work.
Well gave up on using the google WS for this and switched to Yelp, works very well:
https://api.yelp.com/v3/businesses/search?.....
I have some giant folders saved in DropBox with more than 10k files in them. I want to check if a list of files exists there, but I can't get the metadata on the parent folder because I am over the 10k limit.
So I have written code to check if each file in a list of files is present.
What I can't figure out is how many requests will run concurrently and how can I increase this number to the max that my machine can handle?
foreach(string f in files)
{
client.GetMetaDataAsync("/blah/blah/" + f, (response) =>
{
found.Add(f);
count++;
Console.WriteLine("{0} of {1} found - {2}", count, files.Count, f);
},
(error) =>
{
if (error.Message.Contains("[NotFound]"))
{
missing.Add(f);
count++;
Console.WriteLine("{0} of {1} missing - {2}", count, files.Count, f);
}
else
{
Console.WriteLine("Unknown error");
}
});
}