vector storing lambdas -- CLANG vs the other two - c++

Here is the code.
// .
#include <stdio.h>
#include <memory>
#include <vector>
#define CAT_(a, b) a##b
#define CAT(a, b) CAT_(a, b)
#define REG(x) inline auto CAT(unused_name_, __LINE__ ) = tu_register(x)
#define LNE printf("%d", __LINE__ )
typedef void (*fp_required)( ) ;
using fvec_type = std::vector< fp_required > ;
static fvec_type fvec{};
inline bool tu_register( fp_required const & fp ){
printf("\nRegistered: %p" , fp ) ;
fvec.push_back( fp ) ;
return true ;
}
REG([]{ LNE; }) ; // 3 stateless lambdas
REG([]{ LNE; }) ;
REG([]{ LNE; }) ;
int main()
{
printf("\n\nRegistered %lu lambdas. %s\n", fvec.size(), (fvec.size() > 0 ? "Now calling them": ""));
for ( auto & fun : fvec ) {
printf("\nCalling lambda %p , rezult: " , fun);
fun() ;
}
return 0;
}
CLANG compiles with no warnings, but places exactly nothing in the vector?
https://wandbox.org/permlink/nkYjgqvr5QOprKEn
G++ compiles with no warnings, and works as expected.
https://wandbox.org/permlink/a6HB6xzavE8FOyOi
MSVC (the latest VS2019, all up to date) too, compiles and works no problems.
Who is right?

That code should work, and in Clang 8.0.0, it does. Looks like it was broken after that, and doesn't work in 9.0.0 nor 10.0.0 on Wandbox.

Related

Why is my LLVM JIT implementation segfaulting?

I'm trying to implement a simple JIT compiler using LLVM, following along with the tutorial (http://releases.llvm.org/4.0.1/docs/tutorial/BuildingAJIT1.html), and I'm running into a segfault. I've rewritten my code in the form a minimal (albeit still kinda long) example. The example loops through the integers 0 through 9 and for each one attempts to compile a function that prints that integer, add it to a module, execute the function, and then remove the module from the JIT. This is to simulate an interactive session in which a user inputs commands such as print 0, print 1, etc.
#include <array>
#include <cstdint>
#include <iostream>
#include <llvm/ExecutionEngine/ExecutionEngine.h>
#include <llvm/ExecutionEngine/JITSymbol.h>
#include <llvm/ExecutionEngine/Orc/CompileUtils.h>
#include <llvm/ExecutionEngine/Orc/IRCompileLayer.h>
#include <llvm/ExecutionEngine/Orc/LambdaResolver.h>
#include <llvm/ExecutionEngine/Orc/ObjectLinkingLayer.h>
#include <llvm/ExecutionEngine/SectionMemoryManager.h>
#include <llvm/ExecutionEngine/RuntimeDyld.h>
#include <llvm/IR/BasicBlock.h>
#include <llvm/IR/Constants.h>
#include <llvm/IR/DerivedTypes.h>
#include <llvm/IR/Function.h>
#include <llvm/IR/GlobalValue.h>
#include <llvm/IR/GlobalVariable.h>
#include <llvm/IR/IRBuilder.h>
#include <llvm/IR/LLVMContext.h>
#include <llvm/IR/LegacyPassManager.h>
#include <llvm/IR/Mangler.h>
#include <llvm/IR/Module.h>
#include <llvm/IR/Type.h>
#include <llvm/IR/Value.h>
#include <llvm/IR/Verifier.h>
#include <llvm/Support/DynamicLibrary.h>
#include <llvm/Support/TargetSelect.h>
#include <llvm/Support/raw_ostream.h>
#include <llvm/Target/TargetMachine.h>
#include <llvm/Transforms/Scalar.h>
#include <llvm/Transforms/Scalar/GVN.h>
#include <memory>
#include <stdexcept>
#include <string>
#include <utility>
#include <vector>
int main() {
llvm::InitializeNativeTarget();
llvm::InitializeNativeTargetAsmPrinter();
llvm::InitializeNativeTargetAsmParser();
auto machine = llvm::EngineBuilder().selectTarget();
llvm::orc::ObjectLinkingLayer<> linking_layer;
llvm::orc::IRCompileLayer<llvm::orc::ObjectLinkingLayer<>> compile_layer(
linking_layer, llvm::orc::SimpleCompiler(*machine)
);
llvm::LLVMContext context;
llvm::IRBuilder<> builder(context);
auto layout = machine->createDataLayout();
auto module = std::make_unique<llvm::Module>("module", context);
auto manager = std::make_unique<llvm::legacy::FunctionPassManager>(
module.get()
);
for (
auto p : {
llvm::createInstructionCombiningPass(),
llvm::createReassociatePass(), llvm::createGVNPass(),
llvm::createCFGSimplificationPass()
}
) manager->add(p);
module->setDataLayout(layout);
llvm::sys::DynamicLibrary::LoadLibraryPermanently(nullptr);
auto index = llvm::ConstantInt::get(context, llvm::APInt(8, 0));
std::vector<llvm::Constant*> indices = {index, index};
std::string func_name = "func";
for (auto i = 0; i < 10; ++i) {
auto format_str = new llvm::GlobalVariable(
*module, llvm::ArrayType::get(llvm::Type::getInt8Ty(context), 4),
true, llvm::GlobalValue::PrivateLinkage,
llvm::ConstantDataArray::getString(context, "%i\n"), "format_str"
);
format_str->setAlignment(1);
auto function = llvm::Function::Create(
llvm::FunctionType::get(
llvm::Type::getVoidTy(context), std::vector<llvm::Type*>{},
false
), llvm::Function::ExternalLinkage, func_name, module.get()
);
builder.SetInsertPoint(
llvm::BasicBlock::Create(context, "entry", function)
);
builder.CreateCall(
module->getOrInsertFunction(
"printf", llvm::FunctionType::get(
llvm::IntegerType::getInt32Ty(context),
llvm::PointerType::get(llvm::Type::getInt8Ty(context), 0),
true
)
), std::vector<llvm::Value*>{
llvm::ConstantExpr::getGetElementPtr(
nullptr, format_str, indices
), llvm::ConstantInt::get(context, llvm::APInt(32, i))
}, "call"
);
builder.CreateRetVoid();
std::string message;
llvm::raw_string_ostream message_stream(message);
if (llvm::verifyFunction(*function, &message_stream))
throw std::runtime_error(message_stream.str());
auto handle = compile_layer.addModuleSet(
std::array<std::unique_ptr<llvm::Module>, 1>{std::move(module)},
std::make_unique<llvm::SectionMemoryManager>(),
llvm::orc::createLambdaResolver(
[&](const std::string& name) {
auto symbol = compile_layer.findSymbol(name, false);
return symbol ? symbol : llvm::JITSymbol(nullptr);
}, [](const std::string& name) {
auto address = llvm::RTDyldMemoryManager::
getSymbolAddressInProcess(name);
return address ? llvm::JITSymbol(
address, llvm::JITSymbolFlags::Exported
) : llvm::JITSymbol(nullptr);
}
)
);
std::string mangled_name;
llvm::raw_string_ostream mangled_name_stream(mangled_name);
llvm::Mangler::getNameWithPrefix(
mangled_name_stream, func_name, layout
);
(
reinterpret_cast <void(*)()> (
static_cast <intptr_t> (
compile_layer.findSymbol(
mangled_name_stream.str(), true
).getAddress()
)
)
)();
compile_layer.removeModuleSet(handle);
}
}
The expected output is as follows.
0
1
2
3
4
5
6
7
8
9
Instead I get this.
0
Segmentation fault (core dumped)
According to GDB, the segfault is occuring during the call to llvm::GlobalVariable::GlobalVariable. Here's the backtrace.
#0 0x00007ffcdb8b6541 in llvm::GlobalVariable::GlobalVariable(llvm::Module&, llvm::Type*, bool, llvm::GlobalValue::LinkageTypes, llvm::Constant*, llvm::Twine const&, llvm::GlobalVariable*, llvm::GlobalValue::ThreadLocalMode, unsigned int, bool) () from /usr/lib/libLLVM-4.0.so
#1 0x000000010000698a in main () at main.cc:83
I'm using LLVM version 4.0.1 and GCC version 7.1.1 and compiling with the following command.
g++ -std=c++17 main.cc -o main -O0 -Wall -Wextra -Wno-unused-function \
-Wno-unused-value -Wno-unused-parameter -Werror -ggdb \
`llvm-config --system-libs --libs core`
I'm hoping some LLVM veteran can find my mistake. Thanks, guys!
module is initialized before the for loop:
auto module = std::make_unique<llvm::Module>("module", context);
then in the for loop:
for(...)
{
auto format_str = new llvm::GlobalVariable(*module, ...);
^~~~~~~
...
std::array<std::unique_ptr<llvm::Module>, 1>{std::move(module)},
^~~~~~~~~~~~~~~~~
}
At first iteration you access the object owned by module (ok) and then move from it. This will transfer the ownership of the managed object away from module.
At the second iteration you access the object managed by module -> crash (because it doesn't have a managed object anymore)

Error compiling in Assembly Code for cmpxchg16b

I am working on a project that requires a double-width-compare-and-swap operation (cmpxchg16b). I found the following code by luke h, however when I compile it with "g++-4.7 -g -DDEBUG=1 -std=c++0x dwcas2.c -o dwcas2.o" I get the following error:
Error:
g++-4.7 -g -DDEBUG=1 -m64 -std=c++0x dwcas2.c -o dwcas2.o
dwcas2.c: Assembler messages:
dwcas2.c:29: Error: junk `ptr ' after expression
Any ideas why?, I feel like it is something small and easy to fix, I just can not see it.
Computer Specs:
64-core ThinkMate RAX QS5-4410 server running Ubuntu 12.04 LTS. It is a NUMA system with four AMD Opteron 6272 CPUs (16 cores per chip #2.1 GHz) and 314 GB of shared memory.
Code:
#include <stdint.h>
namespace types
{
struct uint128_t
{
uint64_t lo;
uint64_t hi;
}
__attribute__ (( __aligned__( 16 ) ));
}
template< class T > inline bool cas( volatile T * src, T cmp, T with );
template<> inline bool cas( volatile types::uint128_t * src, types::uint128_t cmp, types::uint128_t with )
{
bool result;
__asm__ __volatile__
(
"lock cmpxchg16b oword ptr %1\n\t"
"setz %0"
: "=q" ( result )
, "+m" ( *src )
, "+d" ( cmp.hi )
, "+a" ( cmp.lo )
: "c" ( with.hi )
, "b" ( with.lo )
: "cc"
);
return result;
}
int main()
{
using namespace types;
uint128_t test = { 0xdecafbad, 0xfeedbeef };
uint128_t cmp = test;
uint128_t with = { 0x55555555, 0xaaaaaaaa };
return ! cas( & test, cmp, with );
}
On x86 GCC defaults to using AT&T syntax assembly, but your source is in Intel syntax. You probably also need "memory" in the clobber list.

c++ macro concatation not worked under gcc

#include <iostream>
void LOG_TRACE() { std::cout << "reach here"; }
#define LOG_LL_TRACE LOG_TRACE
#define LL_TRACE 0
#define __LOG(level) LOG_##level()
#define LOG(level) __LOG(##level)
int main()
{
LOG(LL_TRACE);
return 0;
}
The code is worked under Visual Studio, but report: test.cpp:13:1: error: pasting "(" and "LL_TRACE" does not give a valid preprocessing token.
How can I fix it?
ps: The macro expansion is supposed to be LOG(LL_TRACE) --> __LOG(LL_TRACE) --> LOG_LL_TRACE().
ps: suppose LL_TRACE must have a 0 value, do not remove it.
Two things make this code not compile on g++:
First, the error you're quoting is because you want to have this:
#define LOG(level) __LOG(level)
Notice no ##. Those hashmarks mean concatenate, but you're not concatenating anything. Just forwarding an argument.
The second error is that you have to remove
#define LL_TRACE 0
This line means you end up calling LOG(0) which expands into LOG_0 which isn't defined.
Shouldn't it be :
#define LOG(level) __LOG(level)
That works:
#include <iostream>
void LOG_TRACE() { std::cout << "reach here"; }
#define LOG_LL_TRACE LOG_TRACE
#define __LOG( level ) LOG_##level()
#define LOG(level) __LOG(level)
int main()
{
LOG( LL_TRACE );
return 0;
}

How to profile each call to a function?

I want to profile my execution in a non-standard way. Using gprof, Valgrind, Oprofile... for a given function, I only get the mean of its execution time. What I would like is to obtain the standard deviation of this execution time.
Example:
void a()
sleep ( rand() % 10 + 10 )
void b()
sleep ( rand() % 14 + 2 )
main
for (1 .. 100)
a()
b()
With standard tools, a and b functions will have similar behaviour. Do you know any tool which could give me this result, with an automatic approach.
I already tested with TAU, but until now, it is not really relevant. I think there is a solution in this way, but I am not enough confident with TAU. If anyone is Tau expert, I try to keep all the function execution times, and do the math at the end. But I don't know how to specify it in Tau.
I want to profile C / C++ code, but if you have any lead in other programing language, I'm open.
A profiling tool is not magic, and you can roll your own for whatever purpose in a few lines.
Something like this, perhaps:
// code profile.cpp : Defines the entry point for the console application.
//
#include "stdafx.h"
class cProfile
{
public:
// construct profiler for a particular scope
// call at begining of scope to be timed
// pass unique name of scope
cProfile( const char* name )
{
myName = string( name );
QueryPerformanceCounter( (LARGE_INTEGER *)&myTimeStart );
}
// destructor - automatically called when scope ends
~cProfile();
// constructor - produces report when called without parameters
cProfile();
private:
typedef accumulator_set<__int64, stats<tag::variance(lazy)> > acc_t;
static map < string, acc_t > myMap;
string myName;
__int64 myTimeStart;
};
map < string, accumulator_set<__int64, stats<tag::variance(lazy)> > > cProfile::myMap;
cProfile::~cProfile()
{
__int64 t=0;
QueryPerformanceCounter( (LARGE_INTEGER *)&t );
t -= myTimeStart;
map < string, acc_t >::iterator p = myMap.find( myName );
if( p == myMap.end() ) {
// this is the first time this scope has run
acc_t acc;
pair<string,acc_t > pr(myName,acc);
p = myMap.insert( pr ).first;
}
// add the time of running to the accumulator for this scope
(p->second)( t );
}
// Generate profile report
cProfile::cProfile()
{
__int64 f;
QueryPerformanceFrequency( (LARGE_INTEGER *)&f );
printf("%20s Calls\tMean (secs)\tStdDev\n","Scope");
for( map < string, accumulator_set<__int64, stats<tag::variance(lazy)> > >::iterator p = myMap.begin();
p != myMap.end(); p++ )
{
float av = mean(p->second) / f;
float stdev = sqrt( ((double) variance(p->second)) ) / f;
printf("%20s %d\t%f\t%f\n",p->first.c_str(),
boost::accumulators::count(p->second), av, stdev);
}
}
void a()
{
cProfile profile("a");
Sleep ( rand() % 10 + 10 );
}
void b()
{
cProfile profile("b");
Sleep ( rand() % 20 + 5 );
}
int _tmain(int argc, _TCHAR* argv[])
{
for (int k=1;k<=100;k++) {
a();
b();
}
cProfile profile_report;
return 0;
}
Which produces
Scope Calls Mean (secs) StdDev
a 100 0.014928 0.002827
b 100 0.015254 0.005671
Maybe does not apply, as it's gcc-specific, but I had found this save me a couple of
times at least. If you compile the code with "-finstrument-functions" flag, then every entry and exit point of a function in the module that was compiled with this flag, will be stubbed with the calls to instrument functions. All you have to do is to have an inline that would read some high-precision counter (e.g. rdtsc on x86, though see this discussion) and a large array of records: [ func_addr, is_enter, timer_value ] to which you would continually write in the instrument functions. Upon exit, dump this array to file and analyze offline.
Quite far from "automated" approach, that you probably were looking for - but hope this is of use. The sample below shows the behaviour of gcc if it is compiled with -finstrument-functions. If you do not include the flag, it will work "as normal".
#include <stdio.h>
#include <stdlib.h>
void __cyg_profile_func_enter(void *fn, void *call)
__attribute__ ((no_instrument_function));
void __cyg_profile_func_exit(void *fn, void *call)
__attribute__ ((no_instrument_function));
void __cyg_profile_func_enter(void *fn, void *call) {
printf("Enter %x,%x\n",fn,call);
}
void __cyg_profile_func_exit(void *fn, void *call) {
printf("Exit %x,%x\n",fn,call);
}
int foo(int i) {
printf("inside foo\n");
}
int main(int argc, char *argv[]) {
printf("inside main 1\n");
foo(123);
printf("inside main 2\n");
exit(0);
}
I think Apple's Shark profiling tool can generate the mean for each function. Of course, that only helps you on a Mac.
Actually Oprofile can profile function from a call graph view, which means same callee routine with different caller will be profiled in different stats.
Try opreport command for the report.

How do you create a debug only function that takes a variable argument list? Like printf()

I'd like to make a debug logging function with the same parameters as printf. But one that can be removed by the pre-processor during optimized builds.
For example:
Debug_Print("Warning: value %d > 3!\n", value);
I've looked at variadic macros but those aren't available on all platforms. gcc supports them, msvc does not.
I still do it the old way, by defining a macro (XTRACE, below) which correlates to either a no-op or a function call with a variable argument list. Internally, call vsnprintf so you can keep the printf syntax:
#include <stdio.h>
void XTrace0(LPCTSTR lpszText)
{
::OutputDebugString(lpszText);
}
void XTrace(LPCTSTR lpszFormat, ...)
{
va_list args;
va_start(args, lpszFormat);
int nBuf;
TCHAR szBuffer[512]; // get rid of this hard-coded buffer
nBuf = _vsnprintf(szBuffer, 511, lpszFormat, args);
::OutputDebugString(szBuffer);
va_end(args);
}
Then a typical #ifdef switch:
#ifdef _DEBUG
#define XTRACE XTrace
#else
#define XTRACE
#endif
Well that can be cleaned up quite a bit but it's the basic idea.
This is how I do debug print outs in C++. Define 'dout' (debug out) like this:
#ifdef DEBUG
#define dout cout
#else
#define dout 0 && cout
#endif
In the code I use 'dout' just like 'cout'.
dout << "in foobar with x= " << x << " and y= " << y << '\n';
If the preprocessor replaces 'dout' with '0 && cout' note that << has higher precedence than && and short-circuit evaluation of && makes the whole line evaluate to 0. Since the 0 is not used the compiler generates no code at all for that line.
Here's something that I do in C/C++. First off, you write a function that uses the varargs stuff (see the link in Stu's posting). Then do something like this:
int debug_printf( const char *fmt, ... );
#if defined( DEBUG )
#define DEBUG_PRINTF(x) debug_printf x
#else
#define DEBUG_PRINTF(x)
#endif
DEBUG_PRINTF(( "Format string that takes %s %s\n", "any number", "of args" ));
All you have to remember is to use double-parens when calling the debug function, and the whole line will get removed in non-DEBUG code.
Ah, vsprintf() was the thing I was missing. I can use this to pass the variable argument list directly to printf():
#include <stdarg.h>
#include <stdio.h>
void DBG_PrintImpl(char * format, ...)
{
char buffer[256];
va_list args;
va_start(args, format);
vsprintf(buffer, format, args);
printf("%s", buffer);
va_end(args);
}
Then wrap the whole thing in a macro.
Another fun way to stub out variadic functions is:
#define function sizeof
#CodingTheWheel:
There is one slight problem with your approach. Consider a call such as
XTRACE("x=%d", x);
This works fine in the debug build, but in the release build it will expand to:
("x=%d", x);
Which is perfectly legitimate C and will compile and usually run without side-effects but generates unnecessary code. The approach I usually use to eliminate that problem is:
Make the XTrace function return an int (just return 0, the return value doesn't matter)
Change the #define in the #else clause to:
0 && XTrace
Now the release version will expand to:
0 && XTrace("x=%d", x);
and any decent optimizer will throw away the whole thing since short-circuit evaluation would have prevented anything after the && from ever being executed.
Of course, just as I wrote that last sentence, I realized that perhaps the original form might be optimized away too and in the case of side effects, such as function calls passed as parameters to XTrace, it might be a better solution since it will make sure that debug and release versions will behave the same.
In C++ you can use the streaming operator to simplify things:
#if defined _DEBUG
class Trace
{
public:
static Trace &GetTrace () { static Trace trace; return trace; }
Trace &operator << (int value) { /* output int */ return *this; }
Trace &operator << (short value) { /* output short */ return *this; }
Trace &operator << (Trace &(*function)(Trace &trace)) { return function (*this); }
static Trace &Endl (Trace &trace) { /* write newline and flush output */ return trace; }
// and so on
};
#define TRACE(message) Trace::GetTrace () << message << Trace::Endl
#else
#define TRACE(message)
#endif
and use it like:
void Function (int param1, short param2)
{
TRACE ("param1 = " << param1 << ", param2 = " << param2);
}
You can then implement customised trace output for classes in much the same way you would do it for outputting to std::cout.
What platforms are they not available on? stdarg is part of the standard library:
http://www.opengroup.org/onlinepubs/009695399/basedefs/stdarg.h.html
Any platform not providing it is not a standard C implementation (or very, very old). For those, you will have to use varargs:
http://opengroup.org/onlinepubs/007908775/xsh/varargs.h.html
Part of the problem with this kind of functionality is that often it requires
variadic macros. These were standardized fairly recently(C99), and lots of
old C compilers do not support the standard, or have their own special work
around.
Below is a debug header I wrote that has several cool features:
Supports C99 and C89 syntax for debug macros
Enable/Disable output based on function argument
Output to file descriptor(file io)
Note: For some reason I had some slight code formatting problems.
#ifndef _DEBUG_H_
#define _DEBUG_H_
#if HAVE_CONFIG_H
#include "config.h"
#endif
#include "stdarg.h"
#include "stdio.h"
#define ENABLE 1
#define DISABLE 0
extern FILE* debug_fd;
int debug_file_init(char *file);
int debug_file_close(void);
#if HAVE_C99
#define PRINT(x, format, ...) \
if ( x ) { \
if ( debug_fd != NULL ) { \
fprintf(debug_fd, format, ##__VA_ARGS__); \
} \
else { \
fprintf(stdout, format, ##__VA_ARGS__); \
} \
}
#else
void PRINT(int enable, char *fmt, ...);
#endif
#if _DEBUG
#if HAVE_C99
#define DEBUG(x, format, ...) \
if ( x ) { \
if ( debug_fd != NULL ) { \
fprintf(debug_fd, "%s : %d " format, __FILE__, __LINE__, ##__VA_ARGS__); \
} \
else { \
fprintf(stderr, "%s : %d " format, __FILE__, __LINE__, ##__VA_ARGS__); \
} \
}
#define DEBUGPRINT(x, format, ...) \
if ( x ) { \
if ( debug_fd != NULL ) { \
fprintf(debug_fd, format, ##__VA_ARGS__); \
} \
else { \
fprintf(stderr, format, ##__VA_ARGS__); \
} \
}
#else /* HAVE_C99 */
void DEBUG(int enable, char *fmt, ...);
void DEBUGPRINT(int enable, char *fmt, ...);
#endif /* HAVE_C99 */
#else /* _DEBUG */
#define DEBUG(x, format, ...)
#define DEBUGPRINT(x, format, ...)
#endif /* _DEBUG */
#endif /* _DEBUG_H_ */
Have a look at this thread:
How to make a variadic macro (variable number of arguments)
It should answer your question.
This is what I use:
inline void DPRINTF(int level, char *format, ...)
{
# ifdef _DEBUG_LOG
va_list args;
va_start(args, format);
if(debugPrint & level) {
vfprintf(stdout, format, args);
}
va_end(args);
# endif /* _DEBUG_LOG */
}
which costs absolutely nothing at run-time when the _DEBUG_LOG flag is turned off.
This is a TCHAR version of user's answer, so it will work as ASCII (normal), or Unicode mode (more or less).
#define DEBUG_OUT( fmt, ...) DEBUG_OUT_TCHAR( \
TEXT(##fmt), ##__VA_ARGS__ )
#define DEBUG_OUT_TCHAR( fmt, ...) \
Trace( TEXT("[DEBUG]") #fmt, \
##__VA_ARGS__ )
void Trace(LPCTSTR format, ...)
{
LPTSTR OutputBuf;
OutputBuf = (LPTSTR)LocalAlloc(LMEM_ZEROINIT, \
(size_t)(4096 * sizeof(TCHAR)));
va_list args;
va_start(args, format);
int nBuf;
_vstprintf_s(OutputBuf, 4095, format, args);
::OutputDebugString(OutputBuf);
va_end(args);
LocalFree(OutputBuf); // tyvm #sam shaw
}
I say, "more or less", because it won't automatically convert ASCII string arguments to WCHAR, but it should get you out of most Unicode scrapes without having to worry about wrapping the format string in TEXT() or preceding it with L.
Largely derived from MSDN: Retrieving the Last-Error Code
Not exactly what's asked in the question . But this code will be helpful for debugging purposes , it will print each variable's value along with it's name . This is completely type independent and supports variable number of arguments.
And can even display values of STL's nicely , given that you overload output operator for them
#define show(args...) describe(#args,args);
template<typename T>
void describe(string var_name,T value)
{
clog<<var_name<<" = "<<value<<" ";
}
template<typename T,typename... Args>
void describe(string var_names,T value,Args... args)
{
string::size_type pos = var_names.find(',');
string name = var_names.substr(0,pos);
var_names = var_names.substr(pos+1);
clog<<name<<" = "<<value<<" | ";
describe(var_names,args...);
}
Sample Use :
int main()
{
string a;
int b;
double c;
a="string here";
b = 7;
c= 3.14;
show(a,b,c);
}
Output :
a = string here | b = 7 | c = 3.14
Having come across the problem today, my solution is the following macro:
static TCHAR __DEBUG_BUF[1024];
#define DLog(fmt, ...) swprintf(__DEBUG_BUF, fmt, ##__VA_ARGS__); OutputDebugString(__DEBUG_BUF);
You can then call the function like this:
int value = 42;
DLog(L"The answer is: %d\n", value);