Here is my test file:
const request = require('supertest');
const app = require('../../src/app');
const fs = require('fs');
jest.mock('fs');
describe('Get Accounts Info', () => {
it('Get Accounts Info', async () => {
fs.readFileSync.mockResolvedValue({ message: 'Test'})
const res = await request(app).get('/accounts')
});
});
However when I try to run by: npm test -- routes.test.js -t "Get Accounts Info", an error appears:
<--- Last few GCs --->
[20668:0000020C15EF9CD0] 82511 ms: Mark-sweep (reduce) 2047.1 (2050.7) -> 2046.4 (2051.9) MB, 4231.6 / 0.0 ms (+ 0.0 ms in 2 steps since start of marking, biggest step 0.0 ms, walltime since start of marking 4232 ms) (average mu = 0.113, current mu = [20668:0000020C15EF9CD0] 88662 ms: Mark-sweep (reduce) 2046.7 (2050.9) -> 2046.5 (2052.2)
MB, 5331.4 / 0.0 ms (+ 0.0 ms in 5 steps since start of marking, biggest step 0.0 ms, walltime since start of marking 6150 ms) (average mu = 0.124, current mu =
<--- JS stacktrace --->
FATAL ERROR: MarkCompactCollector: young object promotion failed Allocation failed - JavaScript heap out of memory
1: 00007FF7BD3A4C6F napi_wrap+111007
2: 00007FF7BD3481F6 v8::base::CPU::has_sse+59910
3: 00007FF7BD3490F6 node::OnFatalError+294
4: 00007FF7BDC2220E v8::Isolate::ReportExternalAllocationLimitReached+94
5: 00007FF7BDC06FDD v8::SharedArrayBuffer::Externalize+781
6: 00007FF7BDAB011C v8::internal::Heap::EphemeronKeyWriteBarrierFromCode+1516
7: 00007FF7BDA9AB9B v8::internal::NativeContextInferrer::Infer+59739
8: 00007FF7BDA7FE6F v8::internal::MarkingWorklists::SwitchToContextSlow+56975
9: 00007FF7BDA93B5B v8::internal::NativeContextInferrer::Infer+31003
10: 00007FF7BDA8AC3D v8::internal::MarkCompactCollector::EnsureSweepingCompleted+6285
11: 00007FF7BDA92DAE v8::internal::NativeContextInferrer::Infer+27502
12: 00007FF7BDA96DFB v8::internal::NativeContextInferrer::Infer+43963
13: 00007FF7BDAA07F2 v8::internal::ItemParallelJob::Task::RunInternal+18
14: 00007FF7BDAA0781 v8::internal::ItemParallelJob::Run+641
How to fix it or mock fs.readFileSync?
I prefer mocking only the methods I am going to use like below;
jest.spyOn(fs, 'readFileSync').mockImplementation(function () {
// Whatever you want to return. In your case, maybe a promise.
});
Related
I'm inconsistently getting this error in a first experiment with OCaml 5.0.0~beta1:
Fatal error: exception Stdlib.Effect.Unhandled(Domainslib__Task.Wait(_, _))
My setup:
Processor: Intel(R) Core(TM) i7-8750H CPU # 2.20GHz
Debian 10 (buster)
opam version 2.1.3 installed as binary from this script
opam switch: "→ 5.0.0~beta1 ocaml-base-compiler.5.0.0~beta1 5.0.0~beta1"
After a quick read of this tutorial, I copied the parallel_matrix_multiply function and added some code in the end just to use it:
open Domainslib
let parallel_matrix_multiply pool a b =
let i_n = Array.length a in
let j_n = Array.length b.(0) in
let k_n = Array.length b in
let res = Array.make_matrix i_n j_n 0 in
Task.parallel_for pool ~start:0 ~finish:(i_n - 1) ~body:(fun i ->
for j = 0 to j_n - 1 do
for k = 0 to k_n - 1 do
res.(i).(j) <- res.(i).(j) + a.(i).(k) * b.(k).(j)
done
done);
res ;;
let pool = Task.setup_pool ~num_domains:3 () in
let a = Array.make_matrix 2 2 1 in
let b = Array.make_matrix 2 2 2 in
let c = parallel_matrix_multiply pool a b in
for i = 0 to 1 do
for j = 0 to 1 do
Printf.printf "%d " c.(i).(j)
done;
print_char '\n'
done;;
I then compile it with no errors with
ocamlfind ocamlopt -linkpkg -package domainslib parallel_for.ml
and then comes the problem: executing the generated a.out file sometimes (rarely) prints the expected output
4 4
4 4
but usually ends with the error mentioned earlier:
Fatal error: exception Stdlib.Effect.Unhandled(Domainslib__Task.Wait(_, _))
Sorry if I am making some trivial mistake, but I can't understand what is going on, especially given that the error happens inconsistently.
The parallel_matrix_multiply computation is running outside of the Domainslib scheduler, thus whenever a task yields to the scheduler, the Wait effect is unhandled and transformed into a Effect.Unhandled exception.
The solution is to run the parallel computation within Task.run:
...
let c = Task.run pool (fun () -> parallel_matrix_multiply pool a b) in
...
I am facing a performance issue in Akka remoting. I have 2 actors Actor1 and Actor2. The message sending between the actor is synchronous ask request from Actor1 to Actor2 and the response back from Actor2 to Actor1. Below is the sample code snippets and config of my Actor:
Actor1.java:
object Actor1 extends App {
val conf = ConfigFactory.load()
val system = ActorSystem("testSystem1", conf.getConfig("remote1"))
val actor = system.actorOf(Props[Actor1].withDispatcher("my-dispatcher"), "actor1")
implicit val timeOut: Timeout = Timeout(10 seconds)
class Actor1 extends Actor {
var value = 0
var actorRef: ActorRef = null
override def preStart(): Unit = {
println(self.path)
}
override def receive: Receive = {
case "register" =>
actorRef = sender()
println("Registering the actor")
val time = System.currentTimeMillis()
(1 to 300000).foreach(value => {
if (value % 10000 == 0) {
println("message count -- " + value + " --- time taken - " + (System.currentTimeMillis() - time))
}
Await.result(actorRef ? value, 10 seconds)
})
val totalTime = System.currentTimeMillis() - time
println("Total Time - " + totalTime)
}
}
}
Actor2.java:
object Actor2 extends App {
val conf = ConfigFactory.load()
val system = ActorSystem("testSystem1", conf.getConfig("remote2"))
val actor = system.actorOf(Props[Actor2].withDispatcher("my-dispatcher"), "actor2")
implicit val timeOut: Timeout = Timeout(10 seconds)
actor ! "send"
class Actor2 extends Actor {
var value = 0
var actorSelection: ActorSelection = context.actorSelection("akka://testSystem1#127.0.0.1:6061/user/actor1")
override def receive: Receive = {
case "send" =>
actorSelection ! "register"
case int: Int => {
sender() ! 1
}
}
}
}
application.conf:
remote1 {
my-dispatcher {
executor = "thread-pool-executor"
type = PinnedDispatcher
}
akka {
actor {
provider = remote
}
remote {
artery {
transport = tcp # See Selecting a transport below
canonical.hostname = "127.0.0.1"
canonical.port = 6061
}
}
}
}
remote2 {
my-dispatcher {
executor = "thread-pool-executor"
type = PinnedDispatcher
}
akka {
actor {
provider = remote
}
remote {
artery {
transport = tcp # See Selecting a transport below
canonical.hostname = "127.0.0.1"
canonical.port = 6062
}
}
}
}
Output:
message count -- 10000 --- time taken - 5871
message count -- 20000 --- time taken - 9043
message count -- 30000 --- time taken - 12198
message count -- 40000 --- time taken - 15363
message count -- 50000 --- time taken - 18649
message count -- 60000 --- time taken - 22074
message count -- 70000 --- time taken - 25487
message count -- 80000 --- time taken - 28820
message count -- 90000 --- time taken - 32118
message count -- 100000 --- time taken - 35634
message count -- 110000 --- time taken - 39146
message count -- 120000 --- time taken - 42539
message count -- 130000 --- time taken - 45997
message count -- 140000 --- time taken - 50013
message count -- 150000 --- time taken - 53466
message count -- 160000 --- time taken - 57117
message count -- 170000 --- time taken - 61246
message count -- 180000 --- time taken - 65051
message count -- 190000 --- time taken - 68809
message count -- 200000 --- time taken - 72908
message count -- 210000 --- time taken - 77091
message count -- 220000 --- time taken - 80855
message count -- 230000 --- time taken - 84679
message count -- 240000 --- time taken - 89089
message count -- 250000 --- time taken - 93132
message count -- 260000 --- time taken - 97360
message count -- 270000 --- time taken - 101442
message count -- 280000 --- time taken - 105656
message count -- 290000 --- time taken - 109665
message count -- 300000 --- time taken - 113706
Total Time - 113707
Is there any wrong I am doing here?. Any observation or suggestion to improve the performance?
The main issue I see with the code is Await.result(). That is a blocking operation, and will most likely affect performance.
I suggest collecting the results in a fixed array / list, use an integer as an array, and consider it complete when the expected number of responses have been received.
It doesn't matter what arguments I supply for the bufferSize and overflowStrategy parameters of Source.queue, the result is always something like the output at the bottom. I was expecting to see the offer invocations and offer results complete more or less immediately, and to be able to see different processing and offer result messages based on bufferSize and overflowStrategy. What am I doing wrong here?
Code:
def main(args: Array[String]): Unit = {
implicit val system: ActorSystem = ActorSystem("scratch")
implicit val materializer: ActorMaterializer = ActorMaterializer()
implicit val executionContext: ExecutionContextExecutor = system.dispatcher
val start = Instant.now()
def elapsed = time.Duration.between(start, Instant.now()).toMillis
val intSource = Source.queue[Int](2, OverflowStrategy.dropHead)
val intSink = Sink foreach { ii: Int =>
Thread.sleep(1000)
println(s"processing $ii at $elapsed")
}
val intChannel = intSource.to(intSink).run()
(1 to 4) map { ii =>
println(s"offer invocation for $ii at $elapsed")
(ii, intChannel.offer(ii))
} foreach { intFutureOfferResultPair =>
val (ii, futureOfferResult) = intFutureOfferResultPair
futureOfferResult onComplete { offerResult =>
println(s"offer result for $ii: $offerResult at $elapsed")
}
}
intChannel.complete()
intChannel.watchCompletion.onComplete { _ => system.terminate() }
}
Output:
offer invocation for 1 at 72
offer invocation for 2 at 77
offer invocation for 3 at 77
offer invocation for 4 at 77
offer result for 1: Success(Enqueued) at 90
processing 1 at 1084
offer result for 2: Success(Enqueued) at 1084
processing 2 at 2084
offer result for 3: Success(Enqueued) at 2084
processing 3 at 3084
offer result for 4: Success(Enqueued) at 3084
processing 4 at 4084
I can get the expected behavior by replacing:
val intChannel = intSource.to(intSink).run()
with:
val (intChannel, futureDone) = intSource.async.toMat(intSink)(Keep.both).run()
and:
intChannel.watchCompletion.onComplete { _ => system.terminate() }
with:
futureDone.onComplete { _ => system.terminate() }
Fixed Code:
def main(args: Array[String]): Unit = {
implicit val system: ActorSystem = ActorSystem("scratch")
implicit val materializer: ActorMaterializer = ActorMaterializer()
implicit val executionContext: ExecutionContextExecutor = system.dispatcher
val start = Instant.now()
def elapsed = time.Duration.between(start, Instant.now()).toMillis
val intSource = Source.queue[Int](2, OverflowStrategy.dropHead)
val intSink = Sink foreach { ii: Int =>
Thread.sleep(1000)
println(s"processing $ii at $elapsed")
}
val (intChannel, futureDone) = intSource.async.toMat(intSink)(Keep.both).run()
(1 to 4) map { ii =>
println(s"offer invocation for $ii at $elapsed")
(ii, intChannel.offer(ii))
} foreach { intFutureOfferResultPair =>
val (ii, futureOfferResult) = intFutureOfferResultPair
futureOfferResult onComplete { offerResult =>
println(s"offer result for $ii: $offerResult at $elapsed")
}
}
intChannel.complete()
futureDone.onComplete { _ => system.terminate() }
}
Output
offer invocation for 1 at 84
offer invocation for 2 at 89
offer invocation for 3 at 89
offer invocation for 4 at 89
offer result for 3: Success(Enqueued) at 110
offer result for 4: Success(Enqueued) at 110
offer result for 1: Success(Enqueued) at 110
offer result for 2: Success(Enqueued) at 110
processing 3 at 1102
processing 4 at 2102
Hi I am having a web service.
https://rally1.rallydev.com/slm/webservice/v2.x/defect/objectId
What I want to do is instead of objectId search defect by using Formatted Id.
How can I do it?
You can find appropriate objectId by formattedID using Rally Lookback API request.
For example: I need search objectId of Test Case named TC12345. So I must send following GET query:
https://rally1.rallydev.com/analytics/v2.0/service/rally/workspace/WORKSPACE_ID/artifact/snapshot/query.js?find={"_UnformattedID":12345,"_TypeHierarchy":"TestCase"}&fields=["ObjectID"]
Where:
WORKSPACE_ID is the your workspace personal number which you can obtain on this page https://rally1.rallydev.com/slm/doc/webservice/objectModel.sp
UnformattedID = 12345 = numerial part of TC12345
Output can be (see searched "ObjectID: 22697085352" at the very end):
{
_rallyAPIMajor: "2"
_rallyAPIMinor: "0"
Errors: [0]
Warnings: [0]
ThreadStats: {
cpuTime: "10.0"
waitTime: "0"
blockedTime: "1"
waitCount: "1"
blockedCount: "2"
}-
Timings: {
preProcess: 0
findEtlDate: 36
allowedValuesDisambiguation: 1
mongoQuery: 9
authorization: 3
formattedId: 0
suppressNonRequested: 0
allowedValuesHydration: 0
compressSnapshots: 0
TOTAL: 49
}-
GeneratedQuery: {
find: {
_UnformattedID: 619
_TypeHierarchy: {
$in: [3]
0: -51012
1: 9467271631
2: 10486304042
-
}-
_ValidFrom: {
$lte: "2014-09-16T14:21:35.731Z"
}-
}-
limit: 100
skip: 0
fields: {
ObjectID: 1
Project: 1
}-
}-
TotalResultCount: 1
StartIndex: 0
PageSize: 100
ETLDate: "2014-09-16T14:21:35.731Z"
Results: [1]
0: {
ObjectID: 22697085352
}-
-
}
It is easier if you use "query" parameter over defect entity directly like:
https://eu1.rallydev.com/slm/webservice/v2.0/defect?query=(FormattedID = DEXXXXX)&fetch=Name,ObjectID
Regards.
A homework assignment for a computer networking software dev class, the prof has us building a port scanner for ports 1-1024 to be run against the local host. The point of the exercise is to demonstrate task level parallelism using actors. The prof provided code that scans each port in sequence. We are to create a version that does this in parallel, with an actor for each processor or hyper thread available to the system. The goal is to get the time to complete a full scan of all ports 1-1024 and compare the results of a parallel scan against the results of a serial scan. Here's my code for the parallel scan:
import java.net.Socket
import scala.actors._
import Actor._
import scala.collection.mutable.ArrayBuffer
object LowPortScanner {
var lastPort = 0
var openPorts = ArrayBuffer[Int]()
var longestRunTime = 00.00
var results = List[Tuple3[Int, Range, Double]]()
val host = "localhost"
val numProcs = 1 to Runtime.getRuntime().availableProcessors()
val portsPerProc = 1024 / numProcs.size
val caller = self
def main(args: Array[String]): Unit = {
//spawn an actor for each processor that scans a given port range
numProcs.foreach { proc =>
actor {
val portRange: Range = (lastPort + 1) to (lastPort + portsPerProc)
lastPort = lastPort + portsPerProc
caller ! scan(proc, portRange)
}
}
//catch results from the processor actors above
def act {
loop {
reactWithin(100) {
//update the list of results returned from scan
case scanResult: Tuple3[Int, Range, Double] =>
results = results ::: List(scanResult)
//check if all results have been returned for each actor
case TIMEOUT =>
if (results.size == numProcs.size) wrapUp
case _ =>
println("got back something weird from one of the port scan actors!")
wrapUp
}
}
}
//Attempt to open a socket on each port in the given range
//returns a Tuple3[procID: Int, ports: Range, time: Double
def scan(proc: Int, ports: Range) {
val startTime = System.nanoTime()
ports.foreach { n =>
try {
println("Processor " + proc + "is checking port " + n)
val socket = new Socket(host, n)
//println("Found open port: " + n)
openPorts += n
socket.close
} catch {
case e: Exception =>
//println("While scanning port " + n + " caught Exception: " + e)
}
}
(proc, ports, startTime - System.nanoTime())
}
//output results and kill the main actor
def wrapUp {
println("These are the open ports in the range 1-1024:")
openPorts.foreach { port => println(port) }
results.foreach { result => if (result._3 > longestRunTime) { longestRunTime = result._3} }
println("Time to scan ports 1 through 1024 is: %3.3f".format(longestRunTime / 1000))
caller ! exit
}
}
}
I have a quad core i7, so my numProcs = 8. On this hardware platform, each proc actor should scan 128 ports (1024/8 = 128). My intention is for the proc1 actor scan 0 - 128, proc2 should scan 129-256, etc... However, this isn't what's happening. Some of the actors end up working on the same range as other actors. The output sample below illustrates the issue:
Processor 2 is checking port 1
Processor 7 is checking port 385
Processor 1 is checking port 1
Processor 5 is checking port 1
Processor 4 is checking port 1
Processor 8 is checking port 129
Processor 3 is checking port 1
Processor 6 is checking port 257
Processor 1 is checking port 2
Processor 5 is checking port 2
Processor 1 is checking port 3
Processor 3 is checking port 2
Processor 5 is checking port 3
Processor 1 is checking port 4
EDIT
Final "working" code:
import java.net.Socket
import scala.actors._
import Actor._
import scala.collection.mutable.ArrayBuffer
object LowPortScanner {
var lastPort = 0
var openPorts = ArrayBuffer[Int]()
var longestRunTime = 00.00
var results = List[Tuple3[Int, Range, Double]]()
val host = "localhost"
val numProcs = 1 to Runtime.getRuntime().availableProcessors()
val portsPerProc = 1024 / numProcs.size
val caller = self
val procPortRanges = numProcs.foldLeft(List[Tuple2[Int, Range]]()) { (portRanges, proc) =>
val tuple2 = (proc.toInt, (lastPort + 1) to (lastPort + portsPerProc))
lastPort += portsPerProc
tuple2 :: portRanges
}
def main(args: Array[String]): Unit = {
//spawn an actor for each processor that scans a given port range
procPortRanges.foreach { proc =>
actor {
caller ! scan(proc._1, proc._2)
}
}
//catch results from the processor actors above
def act {
loop {
reactWithin(100) {
//update the list of results returned from scan
case scanResult: Tuple3[Int, Range, Double] =>
results = results ::: List(scanResult)
//check if results have been returned for each actor
case TIMEOUT =>
if (results.size == numProcs.size) wrapUp
case _ =>
println("got back something weird from one of the port scan actors!")
wrapUp
}
}
}
//Attempt to open a socket on each port in the given range
//returns a Tuple3[procID: Int, ports: Range, time: Double
def scan(proc: Int, ports: Range) {
val startTime = System.nanoTime()
ports.foreach { n =>
try {
println("Processor " + proc + "is checking port " + n)
val socket = new Socket(host, n)
//println("Found open port: " + n)
openPorts += n
socket.close
} catch {
case e: Exception =>
//println("While scanning port " + n + " caught Exception: " + e)
}
}
(proc, ports, startTime - System.nanoTime())
}
//output results and kill the main actor
def wrapUp {
println("These are the open ports in the range 1-1024:")
openPorts.foreach { port => println(port) }
results.foreach { result => if (result._3 > longestRunTime) { longestRunTime = result._3} }
println("Time to scan ports 1 through 1024 is: %3.3f".format(longestRunTime / 1000))
caller ! exit
}
}
}
On this hardware platform, each proc actor should scan 128 ports (1024/8 = 128).
Except you have
val portsPerProc = numProcs.size / 1024
and 8/1024 is 0. Note that you also have an off-by-one error which causes every actor to scan 1 more port than portsPerProc, it should scan either lastPort to (lastPort + portsPerProc) - 1 or (lastPort + 1) to (lastPort + portsPerProc).
For the future, if you have a different question, you should ask it separately :) But here you have a very obvious race condition: all actors are trying to execute
val portRange: Range = (lastPort + 1) to (lastPort + portsPerProc)
lastPort = lastPort + portsPerProc
concurrently. Think what happens when (for example) actors 1 and 2 execute first line before any actor gets to the second one.