bcrypt causing unit testing to go crazy - unit-testing

I am using Webpack. Karma and Mocha to run a unit test:
const assert = require('assert');
//const bcrypt = require('bcrypt');
describe('encryptPass', function() {
it('returned hash should return ', function() {
assert.equal(1, 1);
});
});
When I comment out the bcrypt include line, the test runs fine. However, when I leave this line in, it prints a whole load of information and errors to the console:
> auth_app#1.0.0 test /tidee/auth_app
> NODE_ENV=test karma start
Hash: 745e3239cb5645ae9c7c
Version: webpack 1.14.0
Time: 15923ms
Asset Size Chunks Chunk Names
79515ad0788973c533405f7012dfeccd.woff 22.7 kB [emitted]
f4769f9bdb7466be65088239c12046d1.eot 20.1 kB [emitted]
9b281482a6fbc52739698e0c130ea52a.woff2 11.3 kB [emitted]
1fff7dfa22a6c8ba9382ad03726139d3.woff 27.1 kB [emitted]
f2bce001e16fa48466239a51378474d1.woff 28 kB [emitted]
36eeac2edc10f0533f49c8e008896e40.woff 38.4 kB [emitted]
6a00de061f4fcc2ae24f25da53a02f4d.woff 13.2 kB [emitted]
fa2772327f55d8198301fdb8bcfc8158.woff 23.4 kB [emitted]
448c34a56d699c29117adc64c43affeb.woff2 18 kB [emitted]
89889688147bd7575d6327160d64e760.svg 109 kB [emitted]
main 5.62 MB 0 [emitted] main
tests/unit/encryptPass_test.js 14.3 MB 1 [emitted] tests/unit/encryptPass_test.js
chunk {0} main (main) 5.43 MB
[0] multi main 28 bytes {0}
[1] ./~/react/react.js 56 bytes {0} [built]
[2] ./~/process/browser.js 5.3 kB {0} {1} [built]
[3] ./~/moment/moment.js 123 kB {0} [built]
[4] ./~/babel-runtime/helpers/classCallCheck.js 208 bytes {0} [built]
[5] ./~/babel-runtime/helpers/inherits.js 1.11 kB {0} [built]
[6] ./~/babel-runtime/helpers/possibleConstructorReturn.js 542 bytes {0} [built]
[7] ./~/babel-runtime/helpers/extends.js 544 bytes {0} [built]
[8] ./~/babel-runtime/helpers/objectWithoutProperties.js 280 bytes {0} [built]
[9] ./~/classnames/index.js 1.1 kB {0} [built]
[10] ./~/react-bootstrap/lib/utils/bootstrapUtils.js 5.7 kB {0} [built]
[11] ./~/fbjs/lib/invariant.js 1.49 kB {0} [built]
[12] ./~/fbjs/lib/warning.js 2.1 kB {0} [built]
[13] ./~/react-dom/lib/reactProdInvariant.js 1.24 kB {0} [built]
[14] ./~/react-prop-types/lib/elementType.js 1.44 kB {0} [built]
[15] ./~/object-assign/index.js 1.99 kB {0} {1} [built]
[16] ./~/buffer/index.js 48.6 kB {0} {1} [built]
[17] ./~/react-dom/lib/ReactDOMComponentTree.js 6.16 kB {0} [built]
[18] ./~/invariant/browser.js 1.52 kB {0} [built]
[19] ./~/mongoose/lib/utils.js 18 kB {0} [built]
[20] ./~/warning/browser.js 1.81 kB {0} [built]
[21] ./~/fbjs/lib/ExecutionEnvironment.js 1.06 kB {0} [built]
[22] ./~/mongoose/lib/error.js 1.34 kB {0} [built]
[23] ./~/react-bootstrap/lib/utils/createChainedFunction.js 1.04 kB {0} [built]
[24] ./~/react-bootstrap/lib/utils/StyleConfig.js 573 bytes {0} [built]
[25] ./~/react-dom/index.js 59 bytes {0} [built]
[26] ./~/mongoose/lib/schematype.js 26.5 kB {0} [built]
[27] ./~/react-bootstrap/lib/utils/ValidComponentChildren.js 4.36 kB {0} [built]
[28] ./~/react/lib/ReactComponentTreeHook.js 10.4 kB {0} [built]
[29] ./~/axios/lib/utils.js 7.46 kB {0} [built]
[30] ./~/jquery/dist/jquery.js 267 kB {0} [built]
[31] ./~/react-dom/lib/ReactInstrumentation.js 601 bytes {0} [built]
[32] ./~/fbjs/lib/emptyFunction.js 1.08 kB {0} [built]
[33] ./~/mongoose/lib/document.js 68.8 kB {0} [built]
[34] ./~/core-js/library/modules/_core.js 117 bytes {0} [built]
[35] ./~/core-js/library/modules/_wks.js 368 bytes {0} [built]
[36] ./~/react-bootstrap/lib/SafeAnchor.js 3.8 kB {0} [built]
[37] ./~/react-dom/lib/ReactUpdates.js 9.53 kB {0} [built]
[38] ./~/react-intl/lib/index.js 62.1 kB {0} [built]
[39] ./~/react/lib/ReactCurrentOwner.js 623 bytes {0} [built]
[40] ./~/react-dom/lib/SyntheticEvent.js 9.18 kB {0} [built]
[41] ./client/src/helpers/doFetch.js 4.55 kB {0} [built]
...
WARNING in ./~/bcrypt/bcrypt.js
Critical dependencies:
6:15-36 the request of a dependency is an expression
# ./~/bcrypt/bcrypt.js 6:15-36
WARNING in ./~/node-pre-gyp/lib/pre-binding.js
Critical dependencies:
19:22-48 the request of a dependency is an expression
# ./~/node-pre-gyp/lib/pre-binding.js 19:22-48
WARNING in ./~/bcrypt/CHANGELOG.md
Module parse failed: /tidee/auth_app/node_modules/bcrypt/CHANGELOG.md Unexpected character '#' (1:0)
You may need an appropriate loader to handle this file type.
SyntaxError: Unexpected character '#' (1:0)
at Parser.pp$4.raise (/tidee/auth_app/node_modules/acorn/dist/acorn.js:2221:15)
at Parser.pp$7.getTokenFromCode (/tidee/auth_app/node_modules/acorn/dist/acorn.js:2756:10)
at Parser.pp$7.readToken (/tidee/auth_app/node_modules/acorn/dist/acorn.js:2477:17)
at Parser.pp$7.nextToken (/tidee/auth_app/node_modules/acorn/dist/acorn.js:2468:15)
at Parser.parse (/tidee/auth_app/node_modules/acorn/dist/acorn.js:515:10)
at Object.parse (/tidee/auth_app/node_modules/acorn/dist/acorn.js:3098:39)
at Parser.parse (/tidee/auth_app/node_modules/webpack/lib/Parser.js:902:15)
at NormalModule.<anonymous> (/tidee/auth_app/node_modules/webpack/lib/NormalModule.js:104:16)
at NormalModule.onModuleBuild (/tidee/auth_app/node_modules/webpack-core/lib/NormalModuleMixin.js:310:10)
at nextLoader (/tidee/auth_app/node_modules/webpack-core/lib/NormalModuleMixin.js:275:25)
at /tidee/auth_app/node_modules/webpack-core/lib/NormalModuleMixin.js:259:5
at Storage.finished (/tidee/auth_app/node_modules/enhanced-resolve/lib/CachedInputFileSystem.js:38:16)
at /tidee/auth_app/node_modules/graceful-fs/graceful-fs.js:78:16
at FSReqWrap.readFileAfterClose [as oncomplete] (fs.js:415:3)
# ./~/bcrypt ^\.\/.*$
and it goes on an on!!

Related

Clojure: Sets, order and purity

Is (vec #{1 2 3}) guaranteed to always return [1 3 2] or could the order be different?
I am not so much interested in the implementation details behind this but going from unordered to ordered in general in order to keep my functions pure and easily testable.
As mentioned, standard #{} sets (both PersistentArrayMap and PersistentHashMap; depending on the size) are considered unordered.
Regarding purity with respect to calling seq on a set though, the current implementation does seem to return a well-defined, consistent order; just not an easily predictable one:
(let [r (range 1000)
seqs (repeatedly 1000 #(seq (add-randomly #{} r)))]
; See how many different orders were produced
(println (count (set seqs)))
(println (first seqs)))
1
(0 893 920 558 453 584 487 637 972 519 357 716 950 275 530 929 789 389 586 410 433 765 521 451 291 443 798 779 970 249 638 299 121 734 287 65 702 70 949 218 648 812 62 74 774 475 497 580 891 164 282 769 799 273 186 430 641 529 898 370 834 233 298 188 240 110 130 982 620 311 931 882 128 399 989 377 468 259 210 229 153 621 213 670 977 343 958 887 472 7 894 59 934 473 86 756 830 613 491 154 20 224 355 592 610 806 571 466 72 454 888 463 851 770 814 859 58 964 980 205 555 552 60 835 459 175 322 510 662 27 352 493 899 416 777 694 1 631 854 69 101 24 901 547 102 788 713 385 988 135 397 773 490 752 354 884 360 998 961 55 568 797 688 763 269 676 448 527 206 966 165 715 387 652 683 85 721 862 615 681 225 865 297 39 805 274 88 217 46 682 508 149 415 239 478 878 157 345 300 743 921 4 550 204 470 646 77 106 197 405 897 726 776 940 755 902 518 232 260 823 267 119 319 534 222 603 293 95 450 329 144 504 819 818 505 723 992 176 863 471 349 512 710 192 54 92 221 141 502 871 464 801 307 935 758 290 627 517 361 264 137 356 728 976 678 327 234 856 817 104 353 15 48 945 759 242 832 969 50 956 917 557 251 394 116 585 583 75 437 516 994 930 967 687 159 848 995 709 99 540 645 749 479 890 630 916 815 281 402 669 781 740 975 429 309 458 21 388 495 952 626 875 31 113 32 811 827 407 398 136 691 847 825 139 506 396 460 483 589 581 932 174 578 855 331 363 284 208 305 955 796 708 182 256 657 514 731 619 985 485 214 193 685 804 869 836 785 635 442 561 954 656 607 241 314 782 226 235 672 420 418 262 263 304 401 673 40 129 600 729 467 445 317 294 91 810 364 987 880 515 412 553 974 341 117 665 523 172 601 108 156 358 308 908 649 531 923 223 419 365 944 181 417 979 278 56 942 33 13 867 22 618 380 257 338 500 909 993 168 833 496 947 347 501 596 872 792 90 237 826 292 109 216 191 498 829 761 375 525 367 143 742 178 640 247 328 391 990 167 707 36 41 474 187 551 996 528 971 599 376 195 889 316 668 428 303 671 794 905 368 560 565 310 366 118 522 150 886 313 384 567 238 846 962 845 196 162 393 184 219 999 461 89 100 426 604 477 844 541 351 243 131 790 963 629 873 122 933 43 231 61 654 883 598 413 29 784 800 151 369 348 575 693 44 739 258 250 674 539 301 838 424 93 6 684 951 573 408 563 850 616 866 111 997 689 28 456 374 608 737 548 538 895 411 957 134 943 64 623 465 816 334 323 189 280 198 155 295 808 248 587 285 507 227 724 476 941 911 853 494 220 842 103 697 611 170 51 25 261 768 822 201 904 590 489 778 166 447 34 252 978 775 325 594 436 828 535 813 146 741 876 228 907 306 125 276 340 148 482 622 588 17 312 606 3 520 760 720 286 279 879 536 663 12 440 332 330 382 152 544 803 642 435 342 703 783 695 973 2 948 66 484 439 236 556 373 142 359 727 371 772 444 570 757 107 532 984 23 745 719 230 625 47 526 180 786 870 537 659 158 991 350 35 849 644 881 127 927 675 383 533 910 302 564 701 566 821 787 82 76 735 492 718 771 215 97 704 277 926 751 19 335 597 938 57 609 202 68 452 200 868 11 115 946 983 339 431 462 337 698 255 503 546 9 953 857 706 632 457 427 145 5 733 624 831 244 918 824 289 112 925 730 699 712 414 839 802 860 179 344 481 732 661 245 378 913 906 658 266 324 793 680 446 524 254 404 617 283 513 572 705 959 83 634 138 346 14 455 265 449 333 650 639 569 326 746 647 45 53 559 78 924 562 542 912 664 315 914 480 132 753 900 26 766 123 203 667 392 577 807 140 321 795 441 700 268 840 16 320 133 288 381 605 163 81 120 643 79 211 38 173 126 981 421 593 636 98 422 423 614 762 582 666 554 409 574 595 124 747 171 87 169 653 679 843 160 30 400 767 896 928 696 738 809 509 736 207 874 434 690 194 511 73 486 336 96 837 937 10 660 272 499 488 903 386 270 576 717 543 271 18 395 403 469 105 185 52 545 633 114 968 253 612 628 748 209 147 655 750 852 425 864 67 296 602 318 161 651 725 372 406 438 780 711 71 939 579 877 722 42 919 80 885 986 714 677 199 841 754 791 861 591 744 960 37 183 965 892 432 379 63 212 94 362 8 686 692 764 246 190 549 922 177 915 936 820 49 858 390 84)
So yes, it seems that within a single run of a program, the order of (seq #{1 2 3}) can be relied upon, and can be considered pure. The language gives no guarantees though, and this property may not always exist, so really, I wouldn't rely on it. It's an implementation detail.
If you require a consistent ordering, it may be beneficial to have a vector along with the set to define the order. You could do something like:
(def pair [#{} []])
(defn add [p n]
(-> p
(update 0 conj n)
(update 1 conj n)))
(-> pair
(add 1)
(add 2))
=> [#{1 2} [1 2]]
Reference the set when you want to do a membership test, and the vector when you need order. Of course, this requires twice as much memory as it otherwise would though, so this may not always be practical. Additions to both sets and vectors are essentially constant however, so additions will still be quick.

How to remove observation's correlation loading points in correlation loading plot in SAS?

Correlation Loading Plot from Pro PLS in SAS
Hi All,
I used Proc PLS to do a multivariate analysis and got a plot as attached. How can I remove the green colored points in the picture? I think they are the observations' correlation values. For example, I have 90 observations, and each of them will have a loading value on factor1 and factor2, so there will be 90 green points shown in the picture. Who can tell me which option can suppress them?
for example, data is like this:
par1 par2 par3 par4 par5 par6 par7 location
2680 0.546089996 237 1 0.172 2.25 305 5
3750 0.54836587 140 1.55 0.111 1.06 425 5
3590 0.54878718 168 1.27 0.131 0.969 516 5
2390 0.549510935 183 1.07 0.096 1.84 260 5
3780 0.549631747 140 1.12 0.118 1.98 472 5
2790 0.549934008 200 1.1 0.221 2.13 321 5
2880 0.5499945 227 1.14 0.185 1.54 439 5
2910 0.550357733 259 1.31 0.116 1.31 289 5
2420 0.550842789 177 1.32 0.044067423 1.95 260 5
3850 0.550964187 128 1.41 0.117 1.08 471 5
3530 0.552425146 165 1.23 0.11 1.57 494 5
2730 0.552913856 223 1.03 0.17 2 330 5
3130 0.553158535 252 1.02 0.174 2.13 322 5
3040 0.553709856 272 1.21 0.155 1.97 317 5
3830 0.554139421 153 1.27 0.137 1.47 455 5
3930 0.554569654 164 1.17 0.116 1.5 481 5
2430 0.554569654 136 1.3 0.198 2.11 226 8
3630 0.555247085 137 1.17 0.1 1.75 413 5
2490 0.555432126 176 1.06 0.113 1.39 236 5
3490 0.555555556 166 1.28 0.044444444 1.65 465 5
3840 0.556173526 164 1.23 0.0949 1.66 470 5
2480 0.556173526 239 1.28 0.102 2.2 238 5
3760 0.556173526 191 1.33 0.131 2.12 447 5
3850 0.556173526 174 1.35 0.241 2.42 381 3
3410 0.557413601 174 1.14 0.107 1.48 419 5
2960 0.559284116 229 1.08 0.165 1.99 304 5
3410 0.559284116 137 1.19 0.291 2.17 375 8
3300 0.560538117 121 1.13 0.153 1.82 352 8
3090 0.560538117 134 1.16 0.167 1.17 416 4
3210 0.560538117 124 1.09 0.172 0.82 390 4
3950 0.560538117 130 1.29 0.199 1.89 440 4
3300 0.561167228 131 1.06 0.242 2.45 367 8
2210 0.561167228 162 0.885 0.288 3.32 208 4
3170 0.561797753 126 1.3 0.151 1.31 388 4
2740 0.561797753 96.1 1.22 0.245 0.827 254 3
3750 0.561797753 144 1.08 0.257 2.62 366 3
3640 0.562429696 120 1.32 0.159 1.63 347 8
3210 0.563063063 148 1.29 0.206 2.18 352 8
2300 0.563697858 179 0.936 0.181 2.29 223 2
3410 0.564334086 141 0.856 0.136 2.03 370 8
3500 0.564334086 126 1.38 0.177 1.45 355 8
3470 0.564334086 101 0.989 0.222 1.84 349 3
2260 0.564334086 171 0.942 0.224 2.08 219 2
2220 0.564334086 180 0.956 0.281 1.84 219 4
2340 0.564971751 165 1.05 0.228 2.25 240 8
2380 0.564971751 161 0.976 0.287 1.6 214 4
3220 0.56561086 148 1.21 0.121 0.568 520 6
3920 0.566251416 176 1.08 0.045300113 2.26 637 6
3830 0.566251416 137 1.48 0.203 1.23 387 3
2510 0.566251416 152 1.24 0.222 1.84 223 8
2760 0.566251416 168 0.994 0.282 1.31 280 4
2640 0.566251416 154 0.979 0.345 1.52 291 4
3570 0.566893424 165 1.33 0.155 2.18 505 6
3170 0.566893424 126 1.08 0.162 1.41 341 4
3700 0.566893424 159 1.3 0.17 1.64 449 4
3250 0.566893424 104 1.32 0.2 1.37 372 8
3740 0.566893424 159 1.23 0.216 1.69 409 1
3380 0.566893424 163 1.53 0.245 2.19 367 3
3240 0.56753689 136 1.07 0.153 1.88 383 4
3400 0.56753689 109 1.36 0.161 1.16 420 4
3760 0.56753689 150 0.93 0.169 1.68 537 4
3560 0.56753689 123 1.03 0.193 2.32 374 8
2360 0.56753689 163 0.697 0.235 1.94 243 8
2430 0.56753689 166 0.762 0.247 2.31 231 8
3330 0.568181818 148 1.11 0.174 2 393 4
3080 0.568181818 139 1.13 0.188 2.08 349 8
3230 0.568181818 116 1.23 0.199 1.77 328 8
2180 0.568181818 144 1.01 0.215 2.13 207 8
2520 0.568181818 128 0.809 0.369 1.65 306 4
3320 0.568828214 152 1.15 0.14 1.65 395 4
2300 0.568828214 134 0.908 0.221 1.56 233 8
3730 0.568828214 141 1.58 0.238 1.96 405 3
3800 0.568828214 160 1.24 0.241 2.2 402 3
2440 0.568828214 153 1.03 0.258 1.89 223 4
3910 0.568828214 209 1.26 0.275 2.26 350 3
4010 0.569476082 139 1.28 0.045558087 1.7 602 6
2340 0.570125428 167 1.1 0.18 1.57 208 2
2360 0.570125428 176 0.704 0.2 1.6 219 2
3490 0.570776256 171 1.43 0.269 2.4 360 3
2620 0.571428571 132 1.09 0.202 1.8 224 8
3740 0.571428571 172 1.27 0.256 1.92 355 3
3600 0.57208238 128 1.16 0.17 1.94 434 4
3360 0.57208238 150 1.18 0.171 1.81 353 1
3620 0.57208238 131 1.28 0.177 2.24 360 3
3560 0.57208238 139 1.15 0.229 1.9 366 3
2740 0.572737686 277 0.876 0.171 1.71 290 10
2340 0.572737686 148 0.964 0.231 1.18 250 6
2760 0.572737686 168 0.905 0.303 2.1 264 4
2890 0.572737686 204 0.857 0.331 2.32 272 2
code is :
proc pls data=check method=rrr;
class location;
model par1-par7=location;
run;
In general, I don't think there's a simple way to do what you're looking for. You may want to construct your own graph.
You can get the template for the graph; I'll paste that here. Unfortunately all of the data printed on the graph is printed in a single statement, so it's not helpful to just comment out one line: you comment out the scatterplot x=CORRX y=CORRY and you remove all of the data. I also don't see that ODS Graphics Editor will be able to do this.
You would be best off probably constructing your own chart using this as a base, but calling it from PROC SGRENDER so you can control how the data comes in.
Here's the template, and you'll see the spot I'm talking about:
proc template;
define statgraph Stat.PLS.Graphics.CorrLoadPlot;
dynamic Radius1 Radius2 Radius3 Radius4 xLabel xShortLabel yLabel
yShortLabel CorrX CorrXLab TraceX CorrY CorrYLab TraceY _byline_
_bytitle_ _byfootnote_;
BeginGraph /;
entrytitle "Correlation Loading Plot";
layout overlayequated / equatetype=square commonaxisopts=(
tickvaluelist=(-1.0 -0.75 -0.5 -0.25 0 0.25 0.5 0.75 1.0) viewmin=
-1 viewmax=1) xaxisopts=(label=XLABEL shortlabel=XSHORTLABEL
offsetmin=0.05 offsetmax=0.05 gridDisplay=auto_off) yaxisopts=(
label=YLABEL shortlabel=YSHORTLABEL offsetmin=0.05 offsetmax=0.05
gridDisplay=auto_off);
ellipseparm semimajor=RADIUS1 semiminor=RADIUS1 slope=0.0 xorigin=
0.0 yorigin=0.0 / clip=true display=(outline) outlineattrs=(
pattern=dash) datatransparency=0.75;
scatterplot x=XCIRCLE1LABEL y=YCIRCLE1LABEL / markercharacter=
CIRCLE1LABEL datatransparency=0.75 primary=true;
ellipseparm semimajor=RADIUS2 semiminor=RADIUS2 slope=0.0 xorigin=
0.0 yorigin=0.0 / clip=true display=(outline) outlineattrs=(
pattern=dash) datatransparency=0.75;
scatterplot x=XCIRCLE2LABEL y=YCIRCLE2LABEL / markercharacter=
CIRCLE2LABEL datatransparency=0.75 primary=true;
ellipseparm semimajor=RADIUS3 semiminor=RADIUS3 slope=0.0 xorigin=
0.0 yorigin=0.0 / clip=true display=(outline) outlineattrs=(
pattern=dash) datatransparency=0.75;
scatterplot x=XCIRCLE3LABEL y=YCIRCLE3LABEL / markercharacter=
CIRCLE3LABEL datatransparency=0.75 primary=true;
ellipseparm semimajor=RADIUS4 semiminor=RADIUS4 slope=0.0 xorigin=
0.0 yorigin=0.0 / clip=true display=(outline) outlineattrs=(
pattern=dash) datatransparency=0.75;
scatterplot x=XCIRCLE4LABEL y=YCIRCLE4LABEL / markercharacter=
CIRCLE4LABEL datatransparency=0.75 primary=true;
scatterplot x=CORRX y=CORRY / group=CORRGROUP Name="ScatterVars"
markercharacter=CORRLABEL rolename=(_id1=_ID1 _id2=_ID2 _id3=
_ID3 _id4=_ID4 _id5=_ID5) tip=(y x group markercharacter _id1
_id2 _id3 _id4 _id5) tiplabel=(y=CORRXLAB x=CORRYLAB group=
"Corr Type" markercharacter="Corr ID");
SeriesPlot x=TRACEX y=TRACEY / tip=(y x) tiplabel=(y=CORRYLAB x=
CORRXLAB);
endlayout;
if (_BYTITLE_)
entrytitle _BYLINE_ / textattrs=GRAPHVALUETEXT;
else
if (_BYFOOTNOTE_)
entryfootnote halign=left _BYLINE_;
endif;
endif;
EndGraph;
end;
run;
I would consider posting this on communities.sas.com and seeing if one of the developers can give you more specific information; Sanjay and Dan often post there and may well be able to give you a simpler answer.

MPI_Rank value is disturbed by MPI_RECV subroutine [duplicate]

This question already has an answer here:
MPI_Recv overwrites parts of memory it should not access
(1 answer)
Closed 7 years ago.
Despite having written long, heavily parallelized codes with complicated send/receives over three dimensional arrays, this simple code with a two dimensional array of integers has got me at my wits end. I combed stackoverflow for possible solutions and found one that resembled slightly with the issue I am having:
Boost.MPI: What's received isn't what was sent!
However the solutions seem to point the looping segment of code as the culprit for overwriting sections of the memory. But this one seems to act even stranger. Maybe it is a careless oversight of some simple detail on my part. The problem is with the below code:
program main
implicit none
include 'mpif.h'
integer :: i, j
integer :: counter, offset
integer :: rank, ierr, stVal
integer, dimension(10, 10) :: passMat, prntMat !! passMat CONTAINS VALUES TO BE PASSED TO prntMat
call MPI_INIT(ierr)
call MPI_COMM_RANK(MPI_COMM_WORLD, rank, ierr)
counter = 0
offset = (rank + 1)*300
do j = 1, 10
do i = 1, 10
prntMat(i, j) = 10 !! prntMat OF BOTH RANKS CONTAIN 10
passMat(i, j) = offset + counter !! passMat OF rank=0 CONTAINS 300..399 AND rank=1 CONTAINS 600..699
counter = counter + 1
end do
end do
if (rank == 1) then
call MPI_SEND(passMat(1:10, 1:10), 100, MPI_INTEGER, 0, 1, MPI_COMM_WORLD, ierr) !! SEND passMat OF rank=1 to rank=0
else
call MPI_RECV(prntMat(1:10, 1:10), 100, MPI_INTEGER, 1, 1, MPI_COMM_WORLD, stVal, ierr)
do i = 1, 10
print *, prntMat(:, i)
end do
end if
call MPI_FINALIZE(ierr)
end program main
When I compile the code with mpif90 with no flags and run it on my machine with mpirun -np 2, I get the following output with wrong values in the first four indices of the array:
0 0 400 0 604 605 606 607 608 609
610 611 612 613 614 615 616 617 618 619
620 621 622 623 624 625 626 627 628 629
630 631 632 633 634 635 636 637 638 639
640 641 642 643 644 645 646 647 648 649
650 651 652 653 654 655 656 657 658 659
660 661 662 663 664 665 666 667 668 669
670 671 672 673 674 675 676 677 678 679
680 681 682 683 684 685 686 687 688 689
690 691 692 693 694 695 696 697 698 699
However, when I compile it with the same compiler but with the -O3 flag on, I get the correct output:
600 601 602 603 604 605 606 607 608 609
610 611 612 613 614 615 616 617 618 619
620 621 622 623 624 625 626 627 628 629
630 631 632 633 634 635 636 637 638 639
640 641 642 643 644 645 646 647 648 649
650 651 652 653 654 655 656 657 658 659
660 661 662 663 664 665 666 667 668 669
670 671 672 673 674 675 676 677 678 679
680 681 682 683 684 685 686 687 688 689
690 691 692 693 694 695 696 697 698 699
This error is machine dependent. This issue turns up only on my system running Ubuntu 14.04.2, using OpenMPI 1.6.5
I tried this on other systems running RedHat and CentOS and the code ran well with and without the -O3 flag. Curiously those machines use an older version of OpenMPI - 1.4
I am guessing that the -O3 flag is performing some odd optimization that is modifying the manner in which arrays are being passed between the processes.
I also tried other versions of array allocation. The above code uses explicit shape arrays. With assumed shape and allocated arrays I am receiving equally, if not more bizarre results, with some of them seg-faulting. I tried using Valgrind to trace the origin of these seg-faults, but I still haven't gotten the hang of getting Valgrind to not give false positives when running with MPI programs.
I believe that resolving the difference in performance of the above code will help me understand the tantrums of my other codes as well.
Any help would be greatly appreciated! This code has really gotten me questioning if all the other MPI codes I wrote are sound at all.
Using the Fortran 90 interface to MPI reveals a mismatch in your call to MPI_RECV
call MPI_RECV(prntMat(1:10, 1:10), 100, MPI_INTEGER, 1, 1, MPI_COMM_WORLD, stVal, ierr)
1
Error: There is no specific subroutine for the generic ‘mpi_recv’ at (1)
This is because the status variable stVal is an integer scalar, rather than an array of MPI_STATUS_SIZE. The F77 interface (include 'mpif.h') to MPI_RECV is:
INCLUDE ’mpif.h’
MPI_RECV(BUF, COUNT, DATATYPE, SOURCE, TAG, COMM, STATUS, IERROR)
<type> BUF(*)
INTEGER COUNT, DATATYPE, SOURCE, TAG, COMM
INTEGER STATUS(MPI_STATUS_SIZE), IERROR
Changing
integer :: rank, ierr, stVal
to
integer :: rank, ierr, stVal(mpi_status_size)
produces a program that works as expected, tested with gfortran 5.1 and OpenMPI 1.8.5.
Using the F90 interface (use mpi vs include "mpif.h") lets the compiler detect the mismatched arguments at compile time rather than producing confusing runtime problems.

double free or corruption (fasttop): 0x000000000063d070 *** c++ sieve program

I am writing a sieve program in c++. But for every legitimate input, the program always produces output with 4 primes founded and "2 3 5", no matter how the input varies. As I try to run the program via the console, it gives an error message saying that double free or corruption (fasttop): 0x000000000063d070 ***. Btw, I am new to c++.
And also, I am trying to format the output correctly, but the they are just flying around.
This is the desired format.
2 3 5 7 11 13 17 19 23 29 31 37 41 43 47 53 59 61 67 71
73 79 83 89 97 101 103 107 109 113 127 131 137 139 149 151 157 163 167 173
179 181 191 193 197 199 211 223 227 229 233 239 241 251 257 263 269 271 277 281
283 293 307 311 313 317 331 337 347 349 353 359 367 373 379 383 389 397 401 409
419 421 431 433 439 443 449 457 461 463 467 479 487 491 499 503 509 521 523 541
547 557 563 569 571 577 587 593 599 601 607 613 617 619 631 641 643 647 653 659
661 673 677 683 691 701 709 719 727 733 739 743 751 757 761 769 773 787 797 809
811 821 823 827 829 839 853 857 859 863 877 881 883 887 907 911 919 929 937 941
947 953 967 971 977 983 991 997
Aside from your double-free being caused by calling the destructor explicitly as #PaulMcKenzie said in the comments, your problem with only outputting the first few primes is because of this line:
int n = sizeof(is_prime_);
is_prime_ is a pointer and so its size is fixed at compile time (probably 4 or 8 bytes depending on your system).
You already have limit_ as a value, you should use that to work out your n.

Qt application killed because Out Of Memory (OOM)

I am running a Qt application on embedded Linux platform. The system has 128 MB RAM, 512MB NAND, no swap. The application uses a custom library for the peripherals, the rest are all Qt and c/c++ libs. The application uses SQLITE3 as well.
After 2-3 hours, the machine starts running very slow, shell commands take 10 or so seconds to respond. Eventually the machine hangs, and finally OOM killer kills the application, and the system starts behaving at normal speed.
After some system memory observations using top command reveals that while application is running, the system free memory is decreasing, while slab keeps on increasing. These are the snaps of top given below. The application is named xyz.
At Application start :
Mem total:126164 anon:3308 map:8436 free:32456
slab:60936 buf:0 cache:27528 dirty:0 write:0
Swap total:0 free:0
PID VSZ VSZRW^ RSS (SHR) DIRTY (SHR) STACK COMMAND
776 29080 9228 8036 528 968 0 84 ./xyz -qws
781 3960 736 1976 1456 520 0 84 sshd: root#notty
786 3676 680 1208 764 416 0 88 /usr/libexec/sftp-server
770 3792 568 1948 1472 464 0 84 {sshd} sshd: root#pts/0
766 3792 568 956 688 252 0 84 /usr/sbin/sshd
388 1864 284 552 332 188 0 84 udevd --daemon
789 2832 272 688 584 84 0 84 top
774 2828 268 668 560 84 0 84 -sh
709 2896 268 556 464 80 0 84 /usr/sbin/inetd
747 2828 268 596 516 68 0 84 /sbin/getty -L ttymxc0 115200 vt100
777 2824 264 444 368 68 0 84 tee out.log
785 2824 264 484 416 68 0 84 sh -c /usr/libexec/sftp-server
1 2824 264 556 488 64 0 84 init
After some time :
Mem total:126164 anon:3312 map:8440 free:9244
slab:83976 buf:0 cache:27584 dirty:0 write:0
Swap total:0 free:0
PID VSZ VSZRW^ RSS (SHR) DIRTY (SHR) STACK COMMAND
776 29080 9228 8044 528 972 0 84 ./xyz -qws
781 3960 736 1976 1456 520 0 84 sshd: root#notty
786 3676 680 1208 764 416 0 88 /usr/libexec/sftp-server
770 3792 568 1948 1472 464 0 84 {sshd} sshd: root#pts/0
766 3792 568 956 688 252 0 84 /usr/sbin/sshd
388 1864 284 552 332 188 0 84 udevd --daemon
789 2832 272 688 584 84 0 84 top
774 2828 268 668 560 84 0 84 -sh
709 2896 268 556 464 80 0 84 /usr/sbin/inetd
747 2828 268 596 516 68 0 84 /sbin/getty -L ttymxc0 115200 vt100
777 2824 264 444 368 68 0 84 tee out.log
785 2824 264 484 416 68 0 84 sh -c /usr/libexec/sftp-server
1 2824 264 556 488 64 0 84 init
Funnily though, I can not see any major changes in the output of top involving the application itself. Eventually the application is killed, top output after that :
Mem total:126164 anon:2356 map:916 free:2368
slab:117944 buf:0 cache:1580 dirty:0 write:0
Swap total:0 free:0
PID VSZ VSZRW^ RSS (SHR) DIRTY (SHR) STACK COMMAND
781 3960 736 708 184 520 0 84 sshd: root#notty
786 3724 728 736 172 484 0 88 /usr/libexec/sftp-server
770 3792 568 648 188 460 0 84 {sshd} sshd: root#pts/0
766 3792 568 252 0 252 0 84 /usr/sbin/sshd
388 1864 284 188 0 188 0 84 udevd --daemon
819 2832 272 676 348 84 0 84 top
774 2828 268 512 324 96 0 84 -sh
709 2896 268 80 0 80 0 84 /usr/sbin/inetd
747 2828 268 68 0 68 0 84 /sbin/getty -L ttymxc0 115200 vt100
785 2824 264 68 0 68 0 84 sh -c /usr/libexec/sftp-server
1 2824 264 64 0 64 0 84 init
The dmesg shows :
sh invoked oom-killer: gfp_mask=0xd0, order=2, oomkilladj=0
[<c002d4c4>] (unwind_backtrace+0x0/0xd4) from [<c0073ac0>] (oom_kill_process+0x54/0x1b8)
[<c0073ac0>] (oom_kill_process+0x54/0x1b8) from [<c0073f14>] (__out_of_memory+0x154/0x178)
[<c0073f14>] (__out_of_memory+0x154/0x178) from [<c0073fa0>] (out_of_memory+0x68/0x9c)
[<c0073fa0>] (out_of_memory+0x68/0x9c) from [<c007649c>] (__alloc_pages_nodemask+0x3e0/0x4c8)
[<c007649c>] (__alloc_pages_nodemask+0x3e0/0x4c8) from [<c0076598>] (__get_free_pages+0x14/0x4c)
[<c0076598>] (__get_free_pages+0x14/0x4c) from [<c002f528>] (get_pgd_slow+0x14/0xdc)
[<c002f528>] (get_pgd_slow+0x14/0xdc) from [<c0043890>] (mm_init+0x84/0xc4)
[<c0043890>] (mm_init+0x84/0xc4) from [<c0097b94>] (bprm_mm_init+0x10/0x138)
[<c0097b94>] (bprm_mm_init+0x10/0x138) from [<c00980a8>] (do_execve+0xf4/0x2a8)
[<c00980a8>] (do_execve+0xf4/0x2a8) from [<c002afc4>] (sys_execve+0x38/0x5c)
[<c002afc4>] (sys_execve+0x38/0x5c) from [<c0027d20>] (ret_fast_syscall+0x0/0x2c)
Mem-info:
DMA per-cpu:
CPU 0: hi: 0, btch: 1 usd: 0
Normal per-cpu:
CPU 0: hi: 42, btch: 7 usd: 0
Active_anon:424 active_file:11 inactive_anon:428
inactive_file:3 unevictable:0 dirty:0 writeback:0 unstable:0
free:608 slab:29498 mapped:14 pagetables:59 bounce:0
DMA free:692kB min:268kB low:332kB high:400kB active_anon:0kB inactive_anon:0kB active_file:4kB inactive_file:0kB unevictable:0kB present:24384kB pages_scanned:0 all_unreclaimable? no
lowmem_reserve[]: 0 103 103
Normal free:1740kB min:1168kB low:1460kB high:1752kB active_anon:1696kB inactive_anon:1712kB active_file:40kB inactive_file:12kB unevictable:0kB present:105664kB pages_scanned:0 all_unreclaimable? no
lowmem_reserve[]: 0 0 0
DMA: 3*4kB 3*8kB 5*16kB 2*32kB 4*64kB 2*128kB 0*256kB 0*512kB 0*1024kB 0*2048kB 0*4096kB = 692kB
Normal: 377*4kB 1*8kB 4*16kB 1*32kB 2*64kB 0*128kB 0*256kB 0*512kB 0*1024kB 0*2048kB 0*4096kB = 1740kB
30 total pagecache pages
0 pages in swap cache
Swap cache stats: add 0, delete 0, find 0/0
Free swap = 0kB
Total swap = 0kB
32768 pages of RAM
687 free pages
1306 reserved pages
29498 slab pages
59 pages shared
0 pages swap cached
Out of memory: kill process 774 (sh) score 339 or a child
Killed process 776 (xyz)
So it's obvious that there is a memory leak, it must be my app since my app is killed. But I am not doing any malloc s from the program. I have taken care as to limit the scope of variables so that they are deallocated after they are used. So I am at a complete loss as to why is slab increasing in the top output. I have tried http://valgrind.org/docs/manual/faq.html#faq.reports but didn't work.
Currently trying to use Valgrind on desktop (since I have read it only works for arm-cortex) to check my business logic.
Addittional info :
root#freescale ~/Application/app$ uname -a
Linux freescale 2.6.31-207-g7286c01 #2053 Fri Jun 22 10:29:11 IST 2012 armv5tejl GNU/Linux
Compiler : arm-none-linux-gnueabi-4.1.2 glibc2.5
cpp libs : libstdc++.so.6.0.8
Qt : 4.7.3 libs
Any pointers would be greatly appreciated...
I don't think the problem is directly in your code.
The reason is obvious: your application space does not increase (both RSS and VSW do not increase).
However, you do see the number of slabs increasing. You cannot use or increase the number of slabs from your application - it's a kernel-only thingie.
Some obvious causes of slab size increase from the top of my head:
you never really close network sockets
you read many files, but never close them
you use many ioctls
I would run strace and look at its output for a while. strace intercepts interactions with the kernel. If you have memory issues, I'd expect repeated calls to brk(). If you have other issues, you'll see repeated calls to open without close.
If you have some data structure allocation, check for the correctness of adding children and etc.. I had similar bug in my code. Also if you make big and large queries to the database it may use more ram memory. Try to find some memory leak detector to find if there is any leak.