Why 'make test' and "./test/Test" - c++

Now I'm setting up a c++ test environment with CMake. Actually I've realized what I want to do, but I'm confused by 2 different test output style.
In my example below, what 'make test' do actually? I think both 'make test' and './test/Test' output are same, but not exactly. 'make test' output is different from googletest output style. Although test results looks same, I couldn't be satisfied with these output.
Output Differences
$ make test
Running tests...
Test project /path/to/sample/build
Start 1: MyTest
1/1 Test #1: MyTest ...........................***Failed 0.02 sec
0% tests passed, 1 tests failed out of 1
Total Test time (real) = 0.02 sec
The following tests FAILED:
1 - MyTest (Failed)
Errors while running CTest
make: *** [test] エラー 8
$ ./test/Test
Running main() from gtest_main.cc
[==========] Running 2 tests from 1 test case.
[----------] Global test environment set-up.
[----------] 2 tests from MyLibTest
[ RUN ] MyLibTest.valCheck
/path/to/test/test.cc:10: Failure
Expected: sqr(1.0)
Which is: 1
To be equal to: 2.0
Which is: 2
[ FAILED ] MyLibTest.valCheck (0 ms)
[ RUN ] MyLibTest.negativeValCheck
[ OK ] MyLibTest.negativeValCheck (0 ms)
[----------] 2 tests from MyLibTest (0 ms total)
[----------] Global test environment tear-down
[==========] 2 tests from 1 test case ran. (0 ms total)
[ PASSED ] 1 test.
[ FAILED ] 1 test, listed below:
[ FAILED ] MyLibTest.valCheck
1 FAILED TEST
Commands
mkdir build
cd build
cmake ..
make test // NOT googletest output style
./test/Test // It looks googletest output
My Environment
root
- CMakeLists.txt
+ src/
- CMakeLists.txt
- main.cc
- sqr.cc
- sqr.h
+ test/
- CMakeLists.txt
- test.cc
root /CMakeLists.txt
cmake_minimum_required(VERSION 2.8)
project (MYTEST)
add_subdirectory(src)
add_subdirectory(test)
enable_testing()
add_test(NAME MyTest COMMAND Test)
test/CMakeLists.txt
cmake_minimum_required(VERSION 2.8)
set (CMAKE_RUNTIME_OUTPUT_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR})
set(GTEST_ROOT /path/to/googletest/googletest)
include_directories(${GTEST_ROOT}/include/)
link_directories(${GTEST_ROOT}/build/)
add_executable(Test ${CMAKE_CURRENT_SOURCE_DIR}/test.cc)
target_link_libraries(Test sqr gtest gtest_main pthread)
test/test/cc
#include "../src/sqr.h"
#include <gtest/gtest.h>
namespace {
class MyLibTest : public ::testing::Test{};
TEST_F(MyLibTest, valCheck) {
EXPECT_EQ(sqr(3.0), 9.0);
EXPECT_EQ(sqr(1.0), 2.0); // it fails!
}
TEST_F(MyLibTest, negativeValCheck) {
EXPECT_EQ(sqr(-3.0), 9.0);
}
}

You can modify the behaviour of ctest (which is what make test will ultimately execute) with environment variables.
For example:
CTEST_OUTPUT_ON_FAILURE=1 make test
This will print full output for test executables that had a failure.
Another one you may be interested in is CTEST_PARALLEL_LEVEL

Related

How do I accumulate code coverage with ctest and gtest_discover_tests?

I am using ctest with test_discover_tests to unit test my applications. It appears that ctest calls my test executable for each individual test (see below for an example). I believe this results in the default.profraw code coverage file being overwritten for each test, so I only get code coverage for the last test that was executed.
As an example:
ctest --verbose -R Test1|Test2
57: Test command: /home/user/project_dir/build_unit_test/project/project_test "--gtest_filter=TestUnit.Test1" "--gtest_also_run_disabled_tests"
57: Test timeout computed to be: 10000000
57: Running main() from gtest_main.cc
57: Note: Google Test filter = TestUnit.Test1
57: [==========] Running 1 test from 1 test case.
57: [----------] Global test environment set-up.
57: [----------] 1 test from TestUnit
57: [ RUN ] TestUnit.Test1
57: [ OK ] TestUnit.Test1 (6 ms)
57: [----------] 1 test from TestUnit (6 ms total)
57:
57: [----------] Global test environment tear-down
57: [==========] 1 test from 1 test case ran. (6 ms total)
57: [ PASSED ] 1 test.
9/10 Test #57: project.TestUnit.Test1 .............. Passed 0.05 sec
test 58
Start 58: project.TestUnit.Test2
58: Test command: /home/user/project_dir/build_unit_test/project/project_test "--gtest_filter=TestUnit.Test2" "--gtest_also_run_disabled_tests"
58: Test timeout computed to be: 10000000
58: Running main() from gtest_main.cc
58: Note: Google Test filter = TestUnit.Test2
58: [==========] Running 1 test from 1 test case.
58: [----------] Global test environment set-up.
58: [----------] 1 test from TestUnit
58: [ RUN ] TestUnit.Test2
58: [ OK ] TestUnit.Test2 (1 ms)
58: [----------] 1 test from TestUnit (1 ms total)
58:
58: [----------] Global test environment tear-down
58: [==========] 1 test from 1 test case ran. (2 ms total)
58: [ PASSED ] 1 test.
10/10 Test #58: project.TestUnit.Test2 .............. Passed 0.04 sec

How do I make the `groovy` command exit non-zero for failing JUnit4 tests?

When running JUnit tests with the groovy command using the built-in automatic test runner, it exits 0 even when tests fail. I'd like the command to exit non-zero if there are test failures. Is there a way I can do this?
#!/usr/bin/env groovy
import org.junit.*
class BasicTest {
#Test
void test_failure() {
assert false
}
}
$ groovy --version
Groovy Version: 3.0.2 JVM: 13.0.2 Vendor: Oracle Corporation OS: Mac OS X
$ groovy basic_test.groovy
JUnit 4 Runner, Tests: 1, Failures: 1, Time: 8
Test Failure: test_failure(BasicTest)
Assertion failed:
assert false
at org.codehaus.groovy.runtime.InvokerHelper.assertFailed(InvokerHelper.java:434)
...
$ echo $?
0
Thanks folks!
Not seen people running tests like that before... (writing them as a *nix script)
You can do this though, to catch a failure, and exiting with 1
#!/usr/bin/env groovy
import org.junit.*
import org.junit.rules.*
class BasicTest {
#Rule
public TestRule watchman = [
failed: {e, d ->
println d
e.printStackTrace()
System.exit(1)
}
] as TestWatcher
#Test
void test_failure() {
assert false
}
}

Regex: Negative matcher on javascript string ^(?!\\[functional\\]).+$ files fails to exclude [functional]

In package.json I have 2 script commands:
"test:unit": "jest --watch --testNamePattern='^(?!\\[functional\\]).+$'",
"test:functional": "jest --watch --testNamePattern='\\[functional\\]'",
copying ^(?!\\[functional\\]).+$ into https://regex101.com/, it does not match the test string below inside argument 1 of describe()
describe("[functional] live tests", () => {
When changed to ([functional]).+$, the pattern does match. I have to remove a pair of \ on each end to remove escapes for .json files (I think).
Here is what I see when running npm run test:unit in my project root:
// the functional test runs (not desired)
$ npm run test:unit
functions/src/classes/__tests__/Functional.test.ts:30:47 - error TS2339: Property 'submit' does not exist on type 'Element'.
30 await emailForm.evaluate(form => form.submit());
~~~~~~
RUNS ...s/__tests__/Functional.test.ts
Test Suites: 1 failed, 1 skipped, 3 passed, 4 of 5 total
Tests: 2 skipped, 16 passed, 18 total
Snapshots: 0 total
Time: 8.965s, estimated 27s
Ran all test suites with tests matching "^(?!\[functional\]).+$".
Active Filters: test name /^(?!\[functional\]).+$/
The functional tests are not built out which explains the syntax error, it's not important here. The key issue, is why the tests were not skipped.
I believe the problem has to do with the regex negative matcher. The positive matcher without the ! only matches tests that have, or are nested in a describe block with [functional]
$ npm run test:functional
Test Suites: 1 failed, 4 skipped, 1 of 5 total
Active Filters: test name /\[functional\]/
Anyone know why the negative regex pattern is failing during npm run test:unit ?
Instead of a regex fix I changed the flag on the unit testing script to an ignore, then copying the matching pattern for [functional]:
"test:unit": "jest --watch --testIgnorePattern='\\[functional\\]'",
"test:functional": "jest --watch --testNamePattern='\\[functional\\]'",

Bug with writing to file in linux /sys/class/gpio

I'm having the strangest bug I've ever seen with a linux system right now and there seem to be only two possible explanations for it -
Either appending sudo makes file writes instant
Or appending sudo produces a short delay in executing statements
Or I've got no clue what's happening with my program
Well let me give you some background. I'm currently writing a c++ program for raspberry pi gpio manipulation. There are no visible error in the program as far as I know & since it works with sudo successfully and with delays successfully too. So here's how rpi's gpio work -
First you've to export one, to reserve it for manipulation, it will create a new directory as gpio+number with several files in it.
echo 17 > /sys/class/gpio/export
Then set it's direction(in means read and out means write)
echo "out" > /sys/class/gpio/gpio17/direction
Then write the value (0 or 1 for off and on)
echo 1 > /sys/class/gpio/gpio17/value
At the end, unexport it back, the directory will get deleted.
echo 17 > /sys/class/gpio/unexport
It doesn't matter whether you do this through bash commands or through c/c++ or any other language IO, since in unix these are just files and you just need to read/write to them. Everything works fine till now. I've tested this manually and it works, so my manual test passes.
Now I've a simple test written for my program which looks like this -
TEST(LEDWrites, LedDevice)
{
Led led1(17, "MyLED");
// auto b = sleep(1);
EXPECT_EQ(true, led1.on());
}
The Led class constructor does the export part - echo 17 > /sys/class/gpio/export, while the .on() call sets the direction - echo "write" > /sys/class/gpio/gpio17/direction and outputs the value as well - echo 1 > /sys/class/gpio/gpio17/value. Forget about unexport here since it is handled by destructor and plays no role here.
If you're curious, these functions handle I/O like this -
{
const std::string direction = _dir ? "out" : "in";
const std::string path = GPIO_PATH + "/gpio" + std::to_string(powerPin) + "/direction";
std::ofstream dirStream(path.c_str(), std::ofstream::trunc);
if (dirStream) {
dirStream << direction;
} else {
// LOG error here
return false;
}
return true;
}
means basic c++ file/io. Now let me explain the bug.
First, here are 3 runs of same test -
Normal run FAILS
[isaac#alarmpi build]$ ./test/testexe
Running main() from gtest_main.cc
[==========] Running 2 tests from 2 test cases.
[----------] Global test environment set-up.
[----------] 1 test from LEDConstruction
[ RUN ] LEDConstruction.LedDevice
[ OK ] LEDConstruction.LedDevice (1 ms)
[----------] 1 test from LEDConstruction (1 ms total)
[----------] 1 test from LEDWrites
[ RUN ] LEDWrites.LedDevice
../test/test.cpp:20: Failure
Value of: led1.on()
Actual: false
Expected: true
[ FAILED ] LEDWrites.LedDevice (2 ms)
[----------] 1 test from LEDWrites (3 ms total)
[----------] Global test environment tear-down
[==========] 2 tests from 2 test cases ran. (6 ms total)
[ PASSED ] 1 test.
[ FAILED ] 1 test, listed below:
[ FAILED ] LEDWrites.LedDevice
1 FAILED TEST
run with sudo PASSES
[isaac#alarmpi build]$ sudo ./test/testexe
[sudo] password for isaac:
Running main() from gtest_main.cc
[==========] Running 2 tests from 2 test cases.
[----------] Global test environment set-up.
[----------] 1 test from LEDConstruction
[ RUN ] LEDConstruction.LedDevice
[ OK ] LEDConstruction.LedDevice (1 ms)
[----------] 1 test from LEDConstruction (2 ms total)
[----------] 1 test from LEDWrites
[ RUN ] LEDWrites.LedDevice
[ OK ] LEDWrites.LedDevice (2 ms)
[----------] 1 test from LEDWrites (2 ms total)
[----------] Global test environment tear-down
[==========] 2 tests from 2 test cases ran. (5 ms total)
[ PASSED ] 2 tests.
wtf delay run PASSES has uncommented // auto b = sleep(1);
[isaac#alarmpi build]$ ./test/testexe
Running main() from gtest_main.cc
[==========] Running 2 tests from 2 test cases.
[----------] Global test environment set-up.
[----------] 1 test from LEDConstruction
[ RUN ] LEDConstruction.LedDevice
[ OK ] LEDConstruction.LedDevice (1 ms)
[----------] 1 test from LEDConstruction (2 ms total)
[----------] 1 test from LEDWrites
[ RUN ] LEDWrites.LedDevice
[ OK ] LEDWrites.LedDevice (1001 ms)
[----------] 1 test from LEDWrites (1003 ms total)
[----------] Global test environment tear-down
[==========] 2 tests from 2 test cases ran. (1005 ms total)
[ PASSED ] 2 tests.
The only difference b/w delay and normal run is of single uncommented line - // auto b = sleep(1); Everything is same including device, directory structure, build conf and everything. The only things that explains this is linux might be creating that file and its friends sometimes later or it takes some time? and I call .on() before that. Well that could explain it...
But then why does sudo invocation with no delay passes? Does it makes those writes faster/instant or does it puts the delay statement by itself? Is this the cause of some kind of buffering? Please say no :/
If it matters, I'm using following dev rule for getting non-sudo access to gpio directory -
SUBSYSTEM=="bcm2835-gpiomem", KERNEL=="gpiomem", GROUP="gpio", MODE="0660"
SUBSYSTEM=="gpio", KERNEL=="gpiochip*", ACTION=="add", PROGRAM="/bin/sh -c 'chown root:gpio /sys/class/gpio/export /sys/class/gpio/unexport ; chmod 220 /sys/class/gpio/export /sys/class/gpio/unexport'"
SUBSYSTEM=="gpio", KERNEL=="gpio*", ACTION=="add", PROGRAM="/bin/sh -c 'chown root:gpio /sys%p/active_low /sys%p/direction /sys%p/edge /sys%p/value ; chmod 660 /sys%p/active_low /sys%p/direction /sys%p/edge /sys%p/value'"
EDIT - As #charles mentioned, I used std::flush after every write I made on I/O operations. Still failing.
Strace to the rescue
Let's see the execution of the failing build command -
open("/sys/class/gpio/export", O_WRONLY|O_CREAT|O_TRUNC|O_LARGEFILE, 0666) = 3
open("/sys/class/gpio/unexport", O_WRONLY|O_CREAT|O_TRUNC|O_LARGEFILE, 0666) = 3
open("/sys/class/gpio/export", O_WRONLY|O_CREAT|O_TRUNC|O_LARGEFILE, 0666) = 3
open("/sys/class/gpio/gpio17/value", O_WRONLY|O_CREAT|O_TRUNC|O_LARGEFILE, 0666) = -1 EACCES (Permission denied)
open("/sys/class/gpio/gpio17/direction", O_WRONLY|O_CREAT|O_TRUNC|O_LARGEFILE, 0666) = -1 EACCES (Permission denied)
open("/sys/class/gpio/unexport", O_WRONLY|O_CREAT|O_TRUNC|O_LARGEFILE, 0666) = 3
..., 0666) = -1 EACCES (Permission denied)
Okaaay, here's something, that explains why it is passing with sudo. But why is it passing with delay? Let's check that too,
open("/sys/class/gpio/export", O_WRONLY|O_CREAT|O_TRUNC|O_LARGEFILE, 0666) = 3
open("/sys/class/gpio/unexport", O_WRONLY|O_CREAT|O_TRUNC|O_LARGEFILE, 0666) = 3
open("/sys/class/gpio/export", O_WRONLY|O_CREAT|O_TRUNC|O_LARGEFILE, 0666) = 3
open("/sys/class/gpio/gpio17/value", O_WRONLY|O_CREAT|O_TRUNC|O_LARGEFILE, 0666) = 3
open("/sys/class/gpio/gpio17/direction", O_WRONLY|O_CREAT|O_TRUNC|O_LARGEFILE, 0666) = 4
open("/sys/class/gpio/unexport", O_WRONLY|O_CREAT|O_TRUNC|O_LARGEFILE, 0666) = 3
No wait, wtf? This means the permission denied must be for if files aren't created at that time. But how does using sudo solves that?
Here's relevant output for sudo -
open("/sys/class/gpio/export", O_WRONLY|O_CREAT|O_TRUNC|O_LARGEFILE, 0666) = 3
open("/sys/class/gpio/unexport", O_WRONLY|O_CREAT|O_TRUNC|O_LARGEFILE, 0666) = 3
open("/sys/class/gpio/export", O_WRONLY|O_CREAT|O_TRUNC|O_LARGEFILE, 0666) = 3
open("/sys/class/gpio/gpio17/value", O_WRONLY|O_CREAT|O_TRUNC|O_LARGEFILE, 0666) = 3
open("/sys/class/gpio/gpio17/direction", O_WRONLY|O_CREAT|O_TRUNC|O_LARGEFILE, 0666) = 4
open("/sys/class/gpio/unexport", O_WRONLY|O_CREAT|O_TRUNC|O_LARGEFILE, 0666) = 3
There is a race between udev and your program. When you write to /sys/class/gpio/export, the write will not return until the GPIO is fully created. However, once it has been created, you have two processes that simultaneously take action on the new device:
A hotplug/uevent triggers udev to evaluate its rules. As part of these rules, it will change the ownership and permissions of /sys/class/gpio/gpio17/value.
Your program continues. It will immediately try to open /sys/class/gpio/gpio17/value.
So there is some chance that your program will open the value file before udev has changed its ownership and permissions. This is in fact very likely, because your udev handler does an execve of a shell which then execve's chown and chmod. But even without that, the scheduler will normally give priority to the task that was already running when returning from a syscall, so your program will usually open the value file before udev has even woken up.
By inserting a sleep, you allow udev to do its thing. So to make it robust, you could poll the file with access() before opening it.
It would also help by giving udev higher priority. E.g. chrt -f -p $(pidof systemd-udevd) 3. This gives udev real-time priority, which means it will always run before your program. It can also make your system unresponsive so take care.
From your strace output
open("/sys/class/gpio/gpio17/value", O_WRONLY|O_CREAT|O_TRUNC|O_LARGEFILE, 0666) = -1 EACCES (Permission denied)
open("/sys/class/gpio/gpio17/direction", O_WRONLY|O_CREAT|O_TRUNC|O_LARGEFILE, 0666) = -1 EACCES (Permission denied)
You are first writing value, then direction.
Of course, you should first set the proper direction before writing the value.
Also, you should probably end your output
if (dirStream) {
dirStream << direction;
} else {
// LOG error here
return false;
}
with a newline.
The echo command also appends a newline.
if (dirStream) {
dirStream << direction << std::endl;
} else {
// LOG error here
return false;
}
(In this case, I would explicitly use std::endl to flush. Of course just adding '\n' works as well, but making the flush explicit makes the code more robust. As it is, you are now relying on the fact that the stream gets closed immediately after writing—which it might not if you later decide to keep the stream open until the end of your program.)
The missing trailing newline might explain why it works with a delay: after that delay, the driver might interpret the data as if there was a newline and assumes no more letters are waiting in the stream.

Running only changed or failed tests with CMake/CTest?

I work on a large code base that has close to 400 test executables, with run times varying between 0.001 second and 1800 seconds. When some bit of code changes CMake will rebuild intelligently only the targets that have changed, many times taking shorter than the actual test run will take.
The only way I know around this is to manually filter on tests you know you want to run. My intuition says that I would want to re-run any test suite that does not have a successful run stored - either because it failed, or because it was recompiled.
Is this possible? If so, how?
ctest command accepts several parameters, which affects on set of tests to run. E.g. ,"-R" - filter tests by name, "-L" - filter tests by label. Probably, using dashboard-related options, you may also choose tests to run.
As for generating values for these options according to changed executables, you may write program or script, which checks modification time of executables and/or parses last log file for find failed tests.
Another way for run only changed executables is to wrap tests into additional script. This script will run executable only if some condition is saticfied.
For Linux wrapper script could be implemented as follows:
test_wrapper.sh:
# test_wrapper.sh <test_name> <executable> <params..>
# Run executable, given as second argument, with parameters, given as futher arguments.
#
# If environment variable `LAST_LOG_FILE` is set,
# checks that this file is older than the executable.
#
# If environment variable LAST_LOG_FAILED_FILE is set,
# check that testname is listed in this file.
#
# Test executable is run only if one of these checks succeed, or if none of checks is performed.
check_succeed=
check_performed=
if [ -n $LAST_LOG_FILE ]; then
check_performed=1
executable=$2
if [ ! ( -e "$LAST_LOG_FILE" ) ]; then
check_succeed=1 # Log file is absent
elif [ "$LAST_LOG_FILE" -ot "$executable" ]; then
check_succeed=1 # Log file is older than executable
fi
fi
if [ -n "$LAST_LOG_FAILED_FILE" ]; then
check_performed=1
testname=$1
if [ ! ( -e "$LAST_LOG_FAILED_FILE" ) ]; then
# No failed tests at all
elif grep ":${testname}\$" "$LAST_LOG_FAILED_FILE" > /dev/null; then
check_succeed=1 # Test has been failed previously
fi
fi
if [ -n "$check_performed" -a -z "$check_succeed" ]; then
echo "Needn't to run test."
exit 0
fi
shift 1 # remove `testname` argument
eval "$*"
CMake macro for add wrapped test:
CMakeLists.txt:
# Similar to add_test(), but test is executed with our wrapper.
function(add_wrapped_test name command)
if(name STREQUAL "NAME")
# Complex add_test() command flow: NAME <name> COMMAND <command> ...
set(other_params ${ARGN})
list(REMOVE_AT other_params 0) # COMMAND keyword
# Actual `command` argument
list(GET other_params 0 real_command)
list(REMOVE_AT other_params 0)
# If `real_command` is a target, need to translate it to path to executable.
if(TARGET real_command)
# Generator expression is perfectly OK here.
set(real_command "$<TARGET_FILE:${real_command}")
endif()
# `command` is actually value of 'NAME' parameter
add_test("NAME" ${command} "COMMAND" /bin/sh <...>/test_wrapper.sh
${command} ${real_command} ${other_params}
)
else() # Simple add_test() command flow
add_test(${name} /bin/sh <...>/test_wrapper.sh
${name} ${command} ${ARGN}
)
endif()
endfunction(add_wrapped_test)
When you want to run only those tests, which executables have been changed since last run or which has been failed last time, use
LAST_LOG_FILE=<build-dir>/Testing/Temporary/LastTest.log \
LAST_FAILED_LOG_FILE=<build-dir>/Testing/Temporary/LastTestsFailed.log \
ctest
All other tests will be automatically passed.