I have a small cli program in go that implements one command line flag (-config=<config file>) using flag.String().
When I call go test it errors out because my program says flag provided but not defined: -test.timeout.
go test -v -x -args -config=./config.yml gives me:
/usr/lib/go/pkg/tool/linux_amd64/link -o $WORK/b001/... .../b001/myprogram.test -test.timeout=10m0s -test.v=true -config=./config.yml
In other words, it passes the test arguments on to myprogram, even though I expect this to be inhibited by the -args flag (should only pass arguments after the -args flag?).
Is this a bug, resp. is there a way to inhibit go test from passing any flags to the tested program other than the ones I specifically tell it to)?
EDIT:
The relevant code snippet:
package main
import (
"context"
"encoding/json"
"flag"
// ...
)
// ...
var (
configpath = flag.String("config", "config.yml", "Config file to use, if omitted defaults to config.yml")
// ...
func init() {
var err error
flag.Parse()
_, err = configGlobal.getConfig(*configpath)
if err != nil {
panic("No config loaded")
}
// ...
}
Normally running the program without arguments or with the argument -config ./config.yml works, obviously passing other flags doesn't.
Related
I am using Monkey Patching in Go. When I debug the following code in VSCode it shows that the function proc.Signal return the error programmed.
func TestCheckProcessRunning(t *testing.T) {
monkey.Patch((*os.Process).Signal, func(p *os.Process, sig os.Signal) error {
return errors.New("Signal failed")
})
proc := &os.Process{}
sig_e := proc.Signal(syscall.Signal(0))
fmt.Printf("%s\n", sig_e)
}
Signal failed
But when I tried to run the test using go test . , the patch is no longer applied and got a different error:
os: process not initialized
Any idea of what I am doing wrong?
It seems it needed the flag -gcflags=-l
go test -gcflags=-l .
Is there any way to do required argument for crystal program?
For example
./myprog ~/Music -r
Instead of
./myprog -d ~/Music -r
So my program wont run if there's no [directory] argument. Right now using "option_parser" and can only do -arguments.
There is no way to create required arguments using option_parser, but you can parse arguments and throw an error or exit if there is no argument passed you expect:
require "option_parser"
directory = nil
parser = OptionParser.new
parser.on("-d DIR", "Directory [required]") do |d|
directory = d
end
parser.parse ARGV
if directory.nil?
# directory argument was not set
# print help and exit
puts parser
exit 1
else
# ...
end
get_ue_supported_srvcc([]) ->
?SRVCC_3GPP_NONE_SUPPORT;
get_ue_supported_srvcc([#sip_contactV{extensionsP = EP} | T]) ->
case b2bLib:support_tags_to_value(EP) of
?SRVCC_3GPP_NONE_SUPPORT ->
get_ue_supported_srvcc(T);
Flag ->
Flag
end.
I want create a unit test for this function,
Here is my unit test case:
get_ue_supported_srvcc_test() ->
Contact =
[#sip_contactV{extensionsP =
[{"+sip.instance",
{quoted_string,"<urn:gsma:imei:35502406-005233-0>"}},
{"+g.3gpp.icsi-ref",
{quoted_string,"urn%3Aurn-7%3A3gpp-service.ims.icsi.mmtel"}},
"+g.3gpp.mid-call",
"+g.3gpp.srvcc-alerting",
"+g.3gpp.ps2cs-srvcc-orig-pre-alerting",
"video"]}],
?assertEqual(7, b2bAtcfLib:get_ue_supported_srvcc(Contact)).
But when I run it, I get this error:
======================== EUnit ========================
module 'b2bAtcfLib'
b2bAtcfLib_tests: get_ue_supported_srvcc_test (module 'b2bAtcfLib_tests')...*failed*
in function b2bLib:support_tags_to_value/1
called as support_tags_to_value([{"+sip.instance",{quoted_string,"<urn:gsma:imei:35502406-005233-0>"}},
{"+g.3gpp.icsi-ref",
{quoted_string,"urn%3Aurn-7%3A3gpp-service.ims.icsi.mmtel"}},
"+g.3gpp.mid-call","+g.3gpp.srvcc-alerting",
"+g.3gpp.ps2cs-srvcc-orig-pre-alerting","video"])
in call from b2bAtcfLib:get_ue_supported_srvcc/1 (src/b2bAtcfLib.erl, line 1735)
in call from b2bAtcfLib_tests:'-get_ue_supported_srvcc_test/0-fun-0-'/1 (test/unit/b2bAtcfLib_tests.erl, line 49)
in call from b2bAtcfLib_tests:get_ue_supported_srvcc_test/0
**error:undef
output:<<"">>
[done in 0.008 s]
=======================================================
The error means b2bLib:support_tags_to_value/1 is undef.
The define for this function b2bLib:support_tags_to_value:
support_tags_to_value(FieldStr) ->
lists:sum([Val || {Tag, Val} <- ?TAGLIST, lists:member(Tag, FieldStr)]).
The error is:
**error:undef
That means that the test is calling a function that's not defined. Either the module couldn't be found, or the module in question doesn't define a function with that name and arity.
The whole error message is a bit confusing. Now that we know that we got a "function undefined" error, we should be looking at this line:
in function b2bLib:support_tags_to_value/1
Even though it says that the error occurred "in" this function, this is the function that's undefined.
So either the test is run in such a way that it doesn't find the b2bLib module, or that module doesn't define a function called support_tags_to_value taking one argument. If it's the former, add -pa path/to/ebin to the Erlang command line in order to add the right directory to the code path.
Developing a script in PowerShell, I require to call an external executable file(.exe). currently I am developing this script with a TDD approach, therefore I require to mock the called to this .exe file.
I try this :
Describe "Create-NewObject" {
Context "Create-Object" {
It "Runs" {
Mock '& "c:\temp\my.exe"' {return {$true}}
Create-Object| Should Be $true
}
}
}
I got this response:
Describing Create-NewObject
Context Create-Object
[-] Runs 574ms
CommandNotFoundException: Could not find Command & "C:\temp\my.exe"
at Validate-Command, C:\Program Files\WindowsPowerShell\Modules\Pester\Functions\Mock.ps1: line 801
at Mock, C:\Program Files\WindowsPowerShell\Modules\Pester\Functions\Mock.ps1: line 168
at <ScriptBlock>, C:\T\Create-NewObject.tests.ps1: line 13
Tests completed in 574ms
Passed: 0 Failed: 1 Skipped: 0 Pending: 0 Inconclusive: 0
Is there a way to mock this kind of calls without encapsulate them inside a function?
I found a way to mock the call to this executable files:
function Create-Object
{
$exp = '& "C:\temp\my.exe"'
Invoke-Expression -Command $exp
}
And the test with the mock should looks like:
Describe "Create-NewObject" {
Context "Create-Object" {
It "Runs" {
Mock Invoke-Expression {return {$true}} -ParameterFilter {($Command -eq '& "C:\temp\my.exe"')
Create-Object| Should Be $true
}
}
}
Yes, unfortunately, as of Pester 4.8.1:
you cannot mock external executables by their full paths (e.g, C:\Windows\System32\cmd.exe)
you can mock them by file name only (e.g., cmd), but beware that in older Pester versions the mock is only called for invocations that explicitly use the .exe extension (e.g., cmd.exe) - see this (obsolete) GitHub issue
Your own workaround is effective, but it involves Invoke-Expression, which is awkward; Invoke-Expression should generally be avoided
Here's a workaround that uses a helper function, Invoke-External, which wraps the invocation of external programs and, as a function, can itself be mocked, using a -ParameterFilter to filter by executable path:
In your code, define the Invoke-External function and then use it to make your call to c:\temp\my.exe:
# Helper function for invoking an external utility (executable).
# The raison d'ĂȘtre for this function is to allow
# calls to external executables via their *full paths* to be mocked in Pester.
function Invoke-External {
param(
[Parameter(Mandatory=$true)]
[string] $LiteralPath,
[Parameter(ValueFromRemainingArguments=$true)]
$PassThruArgs
)
& $LiteralPath $PassThruArgs
}
# Call c:\temp\my.exe via invoke-External
# Note that you may pass arguments to pass the executable as usual (none here):
Invoke-External c:\temp\my.exe
Then, to mock the call to c:\temp\my.exe in your Pester tests:
Mock Invoke-External -ParameterFilter { $LiteralPath -eq 'c:\temp\my.exe' } `
-MockWith { $true }
Note: If you only have one call to an external executable in your code, you can omit the -ParameterFilter argument.
I tried something this and seemed to work.
$PathToExe = 'C:\Windows\System32\wevtutil.exe'
New-Item -Path function: -Name $PathToExe -Value { ... }
Mock $PathToExe { ... }
I work on a large code base that has close to 400 test executables, with run times varying between 0.001 second and 1800 seconds. When some bit of code changes CMake will rebuild intelligently only the targets that have changed, many times taking shorter than the actual test run will take.
The only way I know around this is to manually filter on tests you know you want to run. My intuition says that I would want to re-run any test suite that does not have a successful run stored - either because it failed, or because it was recompiled.
Is this possible? If so, how?
ctest command accepts several parameters, which affects on set of tests to run. E.g. ,"-R" - filter tests by name, "-L" - filter tests by label. Probably, using dashboard-related options, you may also choose tests to run.
As for generating values for these options according to changed executables, you may write program or script, which checks modification time of executables and/or parses last log file for find failed tests.
Another way for run only changed executables is to wrap tests into additional script. This script will run executable only if some condition is saticfied.
For Linux wrapper script could be implemented as follows:
test_wrapper.sh:
# test_wrapper.sh <test_name> <executable> <params..>
# Run executable, given as second argument, with parameters, given as futher arguments.
#
# If environment variable `LAST_LOG_FILE` is set,
# checks that this file is older than the executable.
#
# If environment variable LAST_LOG_FAILED_FILE is set,
# check that testname is listed in this file.
#
# Test executable is run only if one of these checks succeed, or if none of checks is performed.
check_succeed=
check_performed=
if [ -n $LAST_LOG_FILE ]; then
check_performed=1
executable=$2
if [ ! ( -e "$LAST_LOG_FILE" ) ]; then
check_succeed=1 # Log file is absent
elif [ "$LAST_LOG_FILE" -ot "$executable" ]; then
check_succeed=1 # Log file is older than executable
fi
fi
if [ -n "$LAST_LOG_FAILED_FILE" ]; then
check_performed=1
testname=$1
if [ ! ( -e "$LAST_LOG_FAILED_FILE" ) ]; then
# No failed tests at all
elif grep ":${testname}\$" "$LAST_LOG_FAILED_FILE" > /dev/null; then
check_succeed=1 # Test has been failed previously
fi
fi
if [ -n "$check_performed" -a -z "$check_succeed" ]; then
echo "Needn't to run test."
exit 0
fi
shift 1 # remove `testname` argument
eval "$*"
CMake macro for add wrapped test:
CMakeLists.txt:
# Similar to add_test(), but test is executed with our wrapper.
function(add_wrapped_test name command)
if(name STREQUAL "NAME")
# Complex add_test() command flow: NAME <name> COMMAND <command> ...
set(other_params ${ARGN})
list(REMOVE_AT other_params 0) # COMMAND keyword
# Actual `command` argument
list(GET other_params 0 real_command)
list(REMOVE_AT other_params 0)
# If `real_command` is a target, need to translate it to path to executable.
if(TARGET real_command)
# Generator expression is perfectly OK here.
set(real_command "$<TARGET_FILE:${real_command}")
endif()
# `command` is actually value of 'NAME' parameter
add_test("NAME" ${command} "COMMAND" /bin/sh <...>/test_wrapper.sh
${command} ${real_command} ${other_params}
)
else() # Simple add_test() command flow
add_test(${name} /bin/sh <...>/test_wrapper.sh
${name} ${command} ${ARGN}
)
endif()
endfunction(add_wrapped_test)
When you want to run only those tests, which executables have been changed since last run or which has been failed last time, use
LAST_LOG_FILE=<build-dir>/Testing/Temporary/LastTest.log \
LAST_FAILED_LOG_FILE=<build-dir>/Testing/Temporary/LastTestsFailed.log \
ctest
All other tests will be automatically passed.