Sometimes I have to parse text files with various encodings,
I wonder if the upcoming standard will bring some tools for this
because I'm not very happy with my current solution.
I'm not even sure if this is the right approach, however
I define a functor template to extract a character from stream:
#include <string>
#include <istream> // 'std::istream'
/////////////////////////////////////////////////////////////////////////////
// Generic implementation (couldn't resist to put one)
template<bool LE,typename T> class ReadChar
{
public:
std::istream& operator()(T& c, std::istream& in)
{
in.read(buf,bufsiz);
//const std::streamsize n_read = in ? bufsiz : in.gcount();
if(!in)
{// Could not real all bytes
c = std::char_traits<T>::eof();
}
else if constexpr (LE)
{// Little endian
c = buf[0];
for(int i=1; i<bufsiz; ++i) c |= buf[i] << (8*i);
}
else
{// Big endian
const std::size_t imax = bufsiz-1;
for(std::size_t i=0; i<imax; ++i) c |= buf[i] << (8*(imax-i));
c |= buf[imax];
}
return in;
}
private:
static constexpr std::size_t bufsiz = sizeof(T);
unsigned char buf[bufsiz];
};
/////////////////////////////////////////////////////////////////////////////
// Partial specialization for 32bit chars
template<bool LE> class ReadChar<LE,char32_t>
{
public:
std::istream& operator()(char32_t& c, std::istream& in)
{
in.read(buf,4);
if constexpr (LE) c = buf[0] | (buf[1] << 8) | (buf[2] << 16) | (buf[3] << 24); // Little endian
else c = (buf[0] << 24) | (buf[1] << 16) | (buf[2] << 8) | buf[3]; // Big endian
return in;
}
private:
char buf[4];
};
/////////////////////////////////////////////////////////////////////////////
// Partial specialization for 16bit chars
template<bool LE> class ReadChar<LE,char16_t>
{
public:
std::istream& operator()(char16_t& c, std::istream& in)
{
in.read(buf,2);
if constexpr (LE) c = buf[0] | (buf[1] << 8); // Little endian
else c = (buf[0] << 8) | buf[1]; // Big endian
return in;
}
private:
char buf[2];
};
/////////////////////////////////////////////////////////////////////////////
// Specialization for 8bit chars
template<> class ReadChar<false,char>
{
public:
std::istream& operator()(char& c, std::istream& in)
{
return in.get(c);
}
};
I use ReadChar to implement the parsing function:
template<typename T,bool LE> void parse(std::istream& fin)
{
ReadChar<LE,T> get;
T c;
while( get(c,fin) )
{
if(c==static_cast<T>('a')) {/* ... */} // Ugly comparison of T with a char literal
}
}
The ugly part are the static_cast when I need to compare to a char literal.
Then I use parse with this ugly boilerplate code:
#include <fstream> // 'std::ifstream'
std::ifstream fin("/path/to/file", std::ios::binary);
auto bom = check_bom(fin); // 'check_bom' function is quite trivial
if( bom.is_empty() ) parse<char>(fin);
else if( bom.is_utf8() ) parse<char>(fin); // In my case there's no need to handle multi-byte chars
else if( bom.is_utf16le() ) parse<char16_t,true>(fin);
else if( bom.is_utf16be() ) parse<char16_t,false>(fin);
else if( bom.is_utf32le() ) parse<char32_t,true>(fin);
else if( bom.is_utf32be() ) parse<char32_t,false>(fin);
else throw std::runtime_error("Unrecognized BOM");
Now, this solution has some quirks (can't use directly string literals in parse)
my question is if there are alternative approaches to this problem,
maybe using existing or upcoming standard facilities that I'm ignoring.
In c++17 we gained type-safe unions. These can be used to map between runtime and compile time state together with std::visit.
template<auto x>
using constant_t = std::integral_constant<std::decay_t<decltype(x)>, x>;
template<auto x>
constexpr constant_t<x> constant = {};
template<auto...Xs>
using variant_enum_t = std::variant< constant_t<Xs>... >;
enum class EBom {
None,
utf8,
utf16le,
utf16be,
utf32le,
utf32be,
count,
};
// you could use the existence of EBom::count and the
// assumption of contiguous indexes to automate this as well:
using VEBom = variant_enum< EBom::None, EBom::utf8, EBom::utf16le, EBom::utf16be, EBom::utf32le, EBom::utf32be >;
template<std::size_t...Is>
constexpr VEBom make_ve_bom( EBom bom, std::index_sequence<Is...> ) {
static constexpr VEBom retvals[] = {
constant<static_cast<EBom>(Is)>...
};
return retvals[ static_cast<std::size_t>(bom) ];
}
constexpr VEBom make_ve_bom( EBom bom ) {
return make_ve_bom( bom, std::make_index_sequence< static_cast<std::size_t>(EBom::count) >{} );
}
And now, with a runtime EBom value, we can produce a VEBom.
With that VEBom we can get at the type at compile time. Suppose you have traits, like:
template<EBom>
constexpr boom bom_is_bigendian_v = ???;
template<EBom>
using bom_chartype_t = ???;
you can now write code like:
std::visit( vebom, [&](auto bom) {
bom_chartype_t<bom> next = ???;
if constexpr (bom_is_bigendian_v<bom>) {
// swizzle
}
} );
etc.
Your non-DRY code
template<bool LE, class char_t> class ReadChar {
public:
std::istream& operator()(char_t& c, std::istream& in)
{
in.read(buf,sizeof(char_t));
c = buf[0] | (buf[1] << 8);
if constexpr(!LE)
reverse_bytes(&c);
return in;
}
private:
char buf[sizeof(char_t)];
};
becomes DRY with a simple rewrite.
Your boilerplate becomes:
std::ifstream fin("/path/to/file", std::ios::binary);
auto bom = check_bom(fin); // 'check_bom' function is quite trivial
if (bom.invalid())
throw std::runtime_error("Unrecognized BOM");
auto vebom = make_ve_bom( bom.getEnum() );
std:visit( vebom, [&]( auto ebom ) {
parse<bom_chartype_t<ebom>, !bom_is_bigendian_v<ebom>>( fin );
});
and the magic is done elsewhere.
That magic here is that the std::variant holds a bunch of integral_constants, each of which is both stateless and knows (in its type) what its value is.
So the only state in the std::variant is which of the stateless enum values it contains.
std::visit proceeds to call the passed in lambda with whichever stateless std::integral_constant that is in the std::variant. Within that lambda, we can use its value as a compile time constant, like we would with any other std::integral_constant.
The runtime state of the std::variant is actually the value of the EBom because of how we set it up, so converting an EBom to a VEBom is literally copying the number over (so, free). The magic is in std::visit, which automates writing the switch statement and injecting the compile time (integral constant) value for each of the possibilities into your code.
None of this is c++23. Most of it is c++17, I may have used a c++20 feature in there as well.
The above code is not compiled, it is just written. It probably contains typos, but the technique is sound.
--
We can automate the making of the variant type:
template<class Enum, std::size_t...Is, class VEnum=variant_enum<
constant_t<static_cast<Enum>(Is)>...
>>
constexpr VEnum make_venum( Enum e, std::index_sequence<Is...> ) {
static constexpr VEnum retvals[] = {
constant<static_cast<Enum>(Is)>...
};
return retvals[ static_cast<std::size_t>(e) ];
}
template<class Enum>
constexpr auto make_venum( Enum e ) {
return make_venum( e, std::make_index_sequence< static_cast<std::size_t>(Enum::count) >{} );
}
template<class Enum>
using venum_t = decltype(make_venum( static_cast<Enum>(0) ));
now our VEBom is just:
using VEBom = venum_t<EBom>;
Anyhow, a live example with typos fixed.
I have a class that looks like this: Foo is my class; FooBar is a bunch of different types of classes from a library that each have independent names.
Foo.h
class Foo {
public:
Foo() = default;
// There many types, and many of these types have multiple constructors
// All of the appropriate overloads are available here.
template<class Type>
FooBar<Type>& getFooBarByFullName( ... ) {
// construct & return FooBar<Type>(...);
}
// Then I have a hand full of overloaded function template declarations
// with a generic name to call the appropriate functions from above.
// Each FooBar has different parameters, and FooBar is a class template.
template<class Type, template<typename> class FooBar>
FooBar<Type>& getFooBar(...);
};
// Outside of any class I have a generic function template
template<class Other, class Type, template<typename> class FooBar, class... FP>
Type doSomething( some param A, some param B, some param C, FP... params ) {
// Code here to work with Other using A, B & C
FooBar<Type> fooBar = getFooBar<Type, FooBar>( params... );
// do something to fooBar
return value generated from fooBar;
}
Foo.cpp
#include **Foo.h**
template<class Type, template<typename> class FooBar>
FooBar<Type>& getFooBar(...) {
return {};
}
template<>
FooBar<int>& Foo::getFooBar( ... ) {
return getFooBarByFullName( ... );
}
template<>
FooBar<short>& Foo::getFooBar( ... ) {
return getFooBarByFullName( ... );
}
// etc...
One of the implementations that I'm working on has for one of its template parameter is a class unary_op.
I do not want to define any such class. I need to be able to pass either a function object, function pointer, lambda, or std::function to these functions as the unary_op class.
The problem I'm running into is if my declaration(s) in my header looks like this:
template<class IntType = int, class UnaryOp>
FooBar<IntType>& getFooBarByFullName( std::size_t count, double xmin, double xmax, UnaryOp fw ) {
// Constructors last parameter is defined as a class UnaryOp;
// but can be any of a function object, function pointer, lambda, std::function<...> etc.
FooBar<IntType> fooBar( count, xmin, xmax, fw );
}
// Then I can declare the appropriate generic declaration overload here
template<class Type, template<typename> class FooBar, class FuncOp>
FooBar<Type>& getFooBar( std::size_t count, double xmin, double xmax, FuncOp fw ); // Declaration only
However when I go to the cpp file to write the definitions explicit specialization using the provided appropriate overloaded declaration while trying to avoid ambiguity is where I get into trouble.
template<>
FooBar<int>& Foo::getFooBar( std::size_t count, double xmin, double xmax, ? ) {
return getFooBarByFullName<int>( count, xmin, xmax, ? );
}
template<>
FooBar<short>& Foo:getFooBar( std::size_t count, double xmin, double xmax, ? ) {
return getFooBarByFullName<short>( count, xmin, xmax, ? );
}
As you can see I don't know how to define the last parameter of type class UnaryOp. I would also like to be able to support that the caller can pass any of the types I mentioned above: function object, function pointer, lambda, std::function<> as the last parameter for the UnaryOp. I don't know where to go from here...
Edit - I forgot to mention that in my actual code; the two classes above have deleted default constructors; and all the class methods are static.
It's unclear what you're actually asking, but it appears that your problem is to create an instantiatable yet generic function in your .cpp file. I think there are two options to solve this problem:
Abandon your plan: make these methods templates living only the .hpp file and taking UnaryOp as (deducible) template parameter.
.hpp:
template<typename Type, typename UnaryOp>
Type Qoo(Type const&x, UnaryOp&&func)
{
// some simple/short code calling func()
}
Implement a function overload for UnaryOp = std::function in your .cpp file and implement the general UnaryOp (lambda, functor, function pointer etc) as template in your .hpp file, calling the former using a std::function object created from whatever UnaryOp is.
.hpp:
template<typename Type>
Type Qoo(Type const&, std::function<Type(Type)>&&);
template<typename Type, typename UnaryOp>
Type Qoo(Type const&x, UnaryOp&&func)
{
return Qoo(x, std::function<Type(Type)>{func});
}
.cpp
template<typename Type>
Type Qoo(Type const&t, std::function<Type(Type)>&&func);
{
// some lengthy code calling func()
}
// explicit instantiations
template int Qoo(int const&, std::function<int(int)>&&);
template short Qoo(short const&, std::function<short(short)>&&);
...
The second version allows pre-compilation, but generates overheads in case UnaryOp≠std::function<>.
The first solution avoids such overheads but exposes the full implementation to the .hpp file and does not offer the benefit of pre-compilation.
In similar situations, I tend to use the second version if the code implemented is substantial, such that the overhead of the std::function object can be tolerated, and the first version only for small code, which generally should be inline anyway.
Finally, note that in the .cpp file you don't need to define all the specialisations, but give the template and specify the explicit instantiations.
Okay scrap that whole idea above: I went and entirely rewritten my classes into a single class. The class itself is now a class template. And it looks like this:
#ifndef GENERATOR_H
#define GENERATOR_H
#include <limits>
#include <chrono>
#include <random>
#include <type_traits>
enum SeedType { USE_CHRONO_CLOCK, USE_RANDOM_DEVICE, USE_SEED_VALUE, USE_SEED_SEQ };
template<class Engine, class Type, template<typename> class Distribution>
class Generator {
public:
using Clock = std::conditional_t<std::chrono::high_resolution_clock::is_steady,
std::chrono::high_resolution_clock,
std::chrono::steady_clock>;
private:
Engine _engine;
Distribution<Type> _distribution;
Type _value;
public:
template<class... Params>
explicit Generator( Engine engine, Params... params ) : _engine( engine ) {
_distribution = Distribution<Type>( params... );
}
void seed( SeedType type = USE_RANDOM_DEVICE, std::size_t seedValue = 0, std::initializer_list<std::size_t> list = {} ) {
switch( type ) {
case USE_CHRONO_CLOCK: { _engine.seed( getTimeNow() ); break; }
case USE_RANDOM_DEVICE: { std::random_device device{};
_engine.seed( device() ); break; }
case USE_SEED_VALUE: { _engine.seed( seedValue ); break; }
case USE_SEED_SEQ: { std::seed_seq seq( list );
_engine.seed( seq ); break; }
}
}
void generate() {
_value = _distribution( _engine );
}
Type getGeneratedValue() const {
return _value;
}
Distribution<Type> getDistribution() const {
return _distribution;
}
std::size_t getTimeNow() {
std::size_t now = static_cast<std::size_t>(Clock::now().time_since_epoch().count());
return now;
}
};
#endif // !GENERATOR_H
And to use it is as simple as:
#include <iostream>
#include <iomanip>
#include <vector>
#include "generator.h"
int main() {
// Engine, Seeding Type, & Distribution Combo 1
std::mt19937 engine1;
Generator<std::mt19937, short, std::uniform_int_distribution> g1( engine1, 1, 100 );
g1.seed( USE_RANDOM_DEVICE );
std::vector<short> vals1;
for( unsigned int i = 0; i < 200; i++ ) {
g1.generate();
auto v = g1.getGeneratedValue();
vals1.push_back( v );
}
int i = 0;
for( auto& v : vals1 ) {
if( (i % 10) != 0 ) {
std::cout << std::setw( 3 ) << v << " ";
} else {
std::cout << '\n' << std::setw( 3 ) << v << " ";
}
i++;
}
std::cout << "\n\n";
// Engine, Seeding Type, & Distribution Combo 2
std::ranlux48 engine2;
std::initializer_list<std::size_t> list2{ 3, 7, 13, 17, 27, 31, 43 };
Generator<std::ranlux48, unsigned, std::binomial_distribution> g2( engine2, 50, 0.75 );
g2.seed( USE_SEED_SEQ, std::size_t(7), list2 );
std::vector<unsigned> vals2;
for( int i = 0; i < 200; i++ ) {
g2.generate();
auto v = g2.getGeneratedValue();
vals2.push_back( v );
}
for( auto& v : vals2 ) {
if( (i % 10) != 0 ) {
std::cout << std::setw( 3 ) << v << " ";
} else {
std::cout << '\n' << std::setw( 3 ) << v << " ";
}
i++;
}
std::cout << "\n\n";
// Engine, Seeding Type, & Distribution Combo 3
std::minstd_rand engine3;
Generator<std::minstd_rand, float, std::gamma_distribution> g3( engine3, 0.22222f, 0.7959753f );
g3.seed( USE_CHRONO_CLOCK );
std::vector<float> vals3;
for( int i = 0; i < 200; i++ ) {
g3.generate();
auto v = g3.getGeneratedValue();
vals3.push_back( v );
}
for( auto& v : vals3 ) {
if( (i % 5 ) != 0 ) {
std::cout << std::setw( 12 ) << v << " ";
} else {
std::cout << '\n' << std::setw( 12 ) << v << " ";
}
i++;
}
std::cout << "\n\n";
std::cout << "\nPress any key and enter to quit.\n";
std::cin.get();
return 0;
}
Treating enums as flags works nicely in C# via the [Flags] attribute, but what's the best way to do this in C++?
For example, I'd like to write:
enum AnimalFlags
{
HasClaws = 1,
CanFly =2,
EatsFish = 4,
Endangered = 8
};
seahawk.flags = CanFly | EatsFish | Endangered;
However, I get compiler errors regarding int/enum conversions. Is there a nicer way to express this than just blunt casting? Preferably, I don't want to rely on constructs from 3rd party libraries such as boost or Qt.
EDIT: As indicated in the answers, I can avoid the compiler error by declaring seahawk.flags as int. However, I'd like to have some mechanism to enforce type safety, so someone can't write seahawk.flags = HasMaximizeButton.
The "correct" way is to define bit operators for the enum, as:
enum AnimalFlags
{
HasClaws = 1,
CanFly = 2,
EatsFish = 4,
Endangered = 8
};
inline AnimalFlags operator|(AnimalFlags a, AnimalFlags b)
{
return static_cast<AnimalFlags>(static_cast<int>(a) | static_cast<int>(b));
}
Etc. rest of the bit operators. Modify as needed if the enum range exceeds int range.
Note (also a bit off topic): Another way to make unique flags can be done using a bit shift. I, myself, find this easier to read.
enum Flags
{
A = 1 << 0, // binary 0001
B = 1 << 1, // binary 0010
C = 1 << 2, // binary 0100
D = 1 << 3 // binary 1000
};
It can hold values up to an int so that is, most of the time, 32 flags which is clearly reflected in the shift amount.
Note if you are working in Windows environment, there is a DEFINE_ENUM_FLAG_OPERATORS macro defined in winnt.h that does the job for you. So in this case, you can do this:
enum AnimalFlags
{
HasClaws = 1,
CanFly =2,
EatsFish = 4,
Endangered = 8
};
DEFINE_ENUM_FLAG_OPERATORS(AnimalFlags)
seahawk.flags = CanFly | EatsFish | Endangered;
For lazy people like me, here is templated solution to copy&paste:
template<class T> inline T operator~ (T a) { return (T)~(int)a; }
template<class T> inline T operator| (T a, T b) { return (T)((int)a | (int)b); }
template<class T> inline T operator& (T a, T b) { return (T)((int)a & (int)b); }
template<class T> inline T operator^ (T a, T b) { return (T)((int)a ^ (int)b); }
template<class T> inline T& operator|= (T& a, T b) { return (T&)((int&)a |= (int)b); }
template<class T> inline T& operator&= (T& a, T b) { return (T&)((int&)a &= (int)b); }
template<class T> inline T& operator^= (T& a, T b) { return (T&)((int&)a ^= (int)b); }
What type is the seahawk.flags variable?
In standard C++, enumerations are not type-safe. They are effectively integers.
AnimalFlags should NOT be the type of your variable. Your variable should be int and the error will go away.
Putting hexadecimal values like some other people suggested is not needed. It makes no difference.
The enum values ARE of type int by default. So you can surely bitwise OR combine them and put them together and store the result in an int.
The enum type is a restricted subset of int whose value is one of its enumerated values. Hence, when you make some new value outside of that range, you can't assign it without casting to a variable of your enum type.
You can also change the enum value types if you'd like, but there is no point for this question.
EDIT: The poster said they were concerned with type safety and they don't want a value that should not exist inside the int type.
But it would be type unsafe to put a value outside of AnimalFlags's range inside a variable of type AnimalFlags.
There is a safe way to check for out of range values though inside the int type...
int iFlags = HasClaws | CanFly;
//InvalidAnimalFlagMaxValue-1 gives you a value of all the bits
// smaller than itself set to 1
//This check makes sure that no other bits are set.
assert(iFlags & ~(InvalidAnimalFlagMaxValue-1) == 0);
enum AnimalFlags {
HasClaws = 1,
CanFly =2,
EatsFish = 4,
Endangered = 8,
// put new enum values above here
InvalidAnimalFlagMaxValue = 16
};
The above doesn't stop you from putting an invalid flag from a different enum that has the value 1,2,4, or 8 though.
If you want absolute type safety then you could simply create a std::set and store each flag inside there. It is not space efficient, but it is type safe and gives you the same ability as a bitflag int does.
C++0x note: Strongly typed enums
In C++0x you can finally have type safe enum values....
enum class AnimalFlags {
CanFly = 2,
HasClaws = 4
};
if(CanFly == 2) { }//Compiling error
I find the currently accepted answer by eidolon too dangerous. The compiler's optimizer might make assumptions about possible values in the enum and you might get garbage back with invalid values. And usually nobody wants to define all possible permutations in flags enums.
As Brian R. Bondy states below, if you're using C++11 (which everyone should, it's that good) you can now do this more easily with enum class:
enum class ObjectType : uint32_t
{
ANIMAL = (1 << 0),
VEGETABLE = (1 << 1),
MINERAL = (1 << 2)
};
constexpr enum ObjectType operator |( const enum ObjectType selfValue, const enum ObjectType inValue )
{
return (enum ObjectType)(uint32_t(selfValue) | uint32_t(inValue));
}
// ... add more operators here.
This ensures a stable size and value range by specifying a type for the enum, inhibits automatic downcasting of enums to ints etc. by using enum class, and uses constexpr to ensure the code for the operators gets inlined and thus just as fast as regular numbers.
For people stuck with pre-11 C++ dialects
If I was stuck with a compiler that doesn't support C++11, I'd go with wrapping an int-type in a class that then permits only use of bitwise operators and the types from that enum to set its values:
template<class ENUM,class UNDERLYING=typename std::underlying_type<ENUM>::type>
class SafeEnum
{
public:
SafeEnum() : mFlags(0) {}
SafeEnum( ENUM singleFlag ) : mFlags(singleFlag) {}
SafeEnum( const SafeEnum& original ) : mFlags(original.mFlags) {}
SafeEnum& operator |=( ENUM addValue ) { mFlags |= addValue; return *this; }
SafeEnum operator |( ENUM addValue ) { SafeEnum result(*this); result |= addValue; return result; }
SafeEnum& operator &=( ENUM maskValue ) { mFlags &= maskValue; return *this; }
SafeEnum operator &( ENUM maskValue ) { SafeEnum result(*this); result &= maskValue; return result; }
SafeEnum operator ~() { SafeEnum result(*this); result.mFlags = ~result.mFlags; return result; }
explicit operator bool() { return mFlags != 0; }
protected:
UNDERLYING mFlags;
};
You can define this pretty much like a regular enum + typedef:
enum TFlags_
{
EFlagsNone = 0,
EFlagOne = (1 << 0),
EFlagTwo = (1 << 1),
EFlagThree = (1 << 2),
EFlagFour = (1 << 3)
};
typedef SafeEnum<enum TFlags_> TFlags;
And usage is similar as well:
TFlags myFlags;
myFlags |= EFlagTwo;
myFlags |= EFlagThree;
if( myFlags & EFlagTwo )
std::cout << "flag 2 is set" << std::endl;
if( (myFlags & EFlagFour) == EFlagsNone )
std::cout << "flag 4 is not set" << std::endl;
And you can also override the underlying type for binary-stable enums (like C++11's enum foo : type) using the second template parameter, i.e. typedef SafeEnum<enum TFlags_,uint8_t> TFlags;.
I marked the operator bool override with C++11's explicit keyword to prevent it from resulting in int conversions, as those could cause sets of flags to end up collapsed into 0 or 1 when writing them out. If you can't use C++11, leave that overload out and rewrite the first conditional in the example usage as (myFlags & EFlagTwo) == EFlagTwo.
Easiest way to do this as shown here, using the standard library class bitset.
To emulate the C# feature in a type-safe way, you'd have to write a template wrapper around the bitset, replacing the int arguments with an enum given as a type parameter to the template. Something like:
template <class T, int N>
class FlagSet
{
bitset<N> bits;
FlagSet(T enumVal)
{
bits.set(enumVal);
}
// etc.
};
enum MyFlags
{
FLAG_ONE,
FLAG_TWO
};
FlagSet<MyFlags, 2> myFlag;
In my opinion none of the answers so far are ideal. To be ideal I would expect the solution:
Support the ==,!=,=,&,&=,|,|= and ~ operators in the conventional
sense (i.e. a & b)
Be type safe i.e. not permit non-enumerated values such as literals or integer types to be assigned (except for bitwise combinations of enumerated values) or allow an enum variable to be assigned to an integer type
Permit expressions such as if (a & b)...
Not require evil macros, implementation specific features or other hacks
Most of the solutions thus far fall over on points 2 or 3. WebDancer's is the closes in my opinion but fails at point 3 and needs to be repeated for every enum.
My proposed solution is a generalized version of WebDancer's that also addresses point 3:
#include <cstdint>
#include <type_traits>
template<typename T, typename = typename std::enable_if<std::is_enum<T>::value, T>::type>
class auto_bool
{
T val_;
public:
constexpr auto_bool(T val) : val_(val) {}
constexpr operator T() const { return val_; }
constexpr explicit operator bool() const
{
return static_cast<std::underlying_type_t<T>>(val_) != 0;
}
};
template <typename T, typename = typename std::enable_if<std::is_enum<T>::value, T>::type>
constexpr auto_bool<T> operator&(T lhs, T rhs)
{
return static_cast<T>(
static_cast<typename std::underlying_type<T>::type>(lhs) &
static_cast<typename std::underlying_type<T>::type>(rhs));
}
template <typename T, typename = typename std::enable_if<std::is_enum<T>::value, T>::type>
constexpr T operator|(T lhs, T rhs)
{
return static_cast<T>(
static_cast<typename std::underlying_type<T>::type>(lhs) |
static_cast<typename std::underlying_type<T>::type>(rhs));
}
enum class AnimalFlags : uint8_t
{
HasClaws = 1,
CanFly = 2,
EatsFish = 4,
Endangered = 8
};
enum class PlantFlags : uint8_t
{
HasLeaves = 1,
HasFlowers = 2,
HasFruit = 4,
HasThorns = 8
};
int main()
{
AnimalFlags seahawk = AnimalFlags::CanFly; // Compiles, as expected
AnimalFlags lion = AnimalFlags::HasClaws; // Compiles, as expected
PlantFlags rose = PlantFlags::HasFlowers; // Compiles, as expected
// rose = 1; // Won't compile, as expected
if (seahawk != lion) {} // Compiles, as expected
// if (seahawk == rose) {} // Won't compile, as expected
// seahawk = PlantFlags::HasThorns; // Won't compile, as expected
seahawk = seahawk | AnimalFlags::EatsFish; // Compiles, as expected
lion = AnimalFlags::HasClaws | // Compiles, as expected
AnimalFlags::Endangered;
// int eagle = AnimalFlags::CanFly | // Won't compile, as expected
// AnimalFlags::HasClaws;
// int has_claws = seahawk & AnimalFlags::CanFly; // Won't compile, as expected
if (seahawk & AnimalFlags::CanFly) {} // Compiles, as expected
seahawk = seahawk & AnimalFlags::CanFly; // Compiles, as expected
return 0;
}
This creates overloads of the necessary operators but uses SFINAE to limit them to enumerated types. Note that in the interests of brevity I haven't defined all of the operators but the only one that is any different is the &. The operators are currently global (i.e. apply to all enumerated types) but this could be reduced either by placing the overloads in a namespace (what I do), or by adding additional SFINAE conditions (perhaps using particular underlying types, or specially created type aliases). The underlying_type_t is a C++14 feature but it seems to be well supported and is easy to emulate for C++11 with a simple template<typename T> using underlying_type_t = underlying_type<T>::type;
Edit: I incorporated the change suggested by Vladimir Afinello. Tested with GCC 10, CLANG 13 and Visual Studio 2022.
Only syntactic sugar. No additional metadata.
namespace UserRole // grupy
{
constexpr uint8_t dea = 1;
constexpr uint8_t red = 2;
constexpr uint8_t stu = 4;
constexpr uint8_t kie = 8;
constexpr uint8_t adm = 16;
constexpr uint8_t mas = 32;
}
Flag operators on integral type just works.
The C++ standard explicitly talks about this, see section "17.5.2.1.3 Bitmask types":
http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2012/n3485.pdf
Given this "template" you get:
enum AnimalFlags : unsigned int
{
HasClaws = 1,
CanFly = 2,
EatsFish = 4,
Endangered = 8
};
constexpr AnimalFlags operator|(AnimalFlags X, AnimalFlags Y) {
return static_cast<AnimalFlags>(
static_cast<unsigned int>(X) | static_cast<unsigned int>(Y));
}
AnimalFlags& operator|=(AnimalFlags& X, AnimalFlags Y) {
X = X | Y; return X;
}
And similar for the other operators.
Also note the "constexpr", it is needed if you want the compiler to be able to execute the operators compile time.
If you are using C++/CLI and want to able assign to enum members of ref classes you need to use tracking references instead:
AnimalFlags% operator|=(AnimalFlags% X, AnimalFlags Y) {
X = X | Y; return X;
}
NOTE: This sample is not complete, see section "17.5.2.1.3 Bitmask types" for a complete set of operators.
I use the following macro:
#define ENUM_FLAG_OPERATORS(T) \
inline T operator~ (T a) { return static_cast<T>( ~static_cast<std::underlying_type<T>::type>(a) ); } \
inline T operator| (T a, T b) { return static_cast<T>( static_cast<std::underlying_type<T>::type>(a) | static_cast<std::underlying_type<T>::type>(b) ); } \
inline T operator& (T a, T b) { return static_cast<T>( static_cast<std::underlying_type<T>::type>(a) & static_cast<std::underlying_type<T>::type>(b) ); } \
inline T operator^ (T a, T b) { return static_cast<T>( static_cast<std::underlying_type<T>::type>(a) ^ static_cast<std::underlying_type<T>::type>(b) ); } \
inline T& operator|= (T& a, T b) { return reinterpret_cast<T&>( reinterpret_cast<std::underlying_type<T>::type&>(a) |= static_cast<std::underlying_type<T>::type>(b) ); } \
inline T& operator&= (T& a, T b) { return reinterpret_cast<T&>( reinterpret_cast<std::underlying_type<T>::type&>(a) &= static_cast<std::underlying_type<T>::type>(b) ); } \
inline T& operator^= (T& a, T b) { return reinterpret_cast<T&>( reinterpret_cast<std::underlying_type<T>::type&>(a) ^= static_cast<std::underlying_type<T>::type>(b) ); }
It is similar to the ones mentioned above but has several improvements:
It is type safe (it does not suppose that the underlying type is an int)
It does not require to specify manually the underlying type (as opposed to #LunarEclipse 's answer)
It does need to include type_traits:
#include <type_traits>
I found myself asking the same question and came up with a generic C++11 based solution, similar to soru's:
template <typename TENUM>
class FlagSet {
private:
using TUNDER = typename std::underlying_type<TENUM>::type;
std::bitset<std::numeric_limits<TUNDER>::max()> m_flags;
public:
FlagSet() = default;
template <typename... ARGS>
FlagSet(TENUM f, ARGS... args) : FlagSet(args...)
{
set(f);
}
FlagSet& set(TENUM f)
{
m_flags.set(static_cast<TUNDER>(f));
return *this;
}
bool test(TENUM f)
{
return m_flags.test(static_cast<TUNDER>(f));
}
FlagSet& operator|=(TENUM f)
{
return set(f);
}
};
The interface can be improved to taste. Then it can be used like so:
FlagSet<Flags> flags{Flags::FLAG_A, Flags::FLAG_C};
flags |= Flags::FLAG_D;
If your compiler doesn't support strongly typed enums yet, you can give a look to the following article from the c++ source:
From the abstract:
This article presents a solution to the problem of constraining bit operations to
allow only safe and legitimate ones, and turn all invalid bit manipulations into
compile-time errors. Best of all, the syntax of bit operations remains unchanged,
and the code working with bits does not need to be modified, except possibly to
fix errors that had as yet remained undetected.
Here's an option for bitmasks if you don't actually have a use for the individual enum values (ex. you don't need to switch off of them)... and if you aren't worried about maintaining binary compatibility ie: you don't care where your bits live... which you probably are. Also you'd better not be too concerned with scoping and access control. Hmmm, enums have some nice properties for bit-fields... wonder if anyone has ever tried that :)
struct AnimalProperties
{
bool HasClaws : 1;
bool CanFly : 1;
bool EatsFish : 1;
bool Endangered : 1;
};
union AnimalDescription
{
AnimalProperties Properties;
int Flags;
};
void TestUnionFlags()
{
AnimalDescription propertiesA;
propertiesA.Properties.CanFly = true;
AnimalDescription propertiesB = propertiesA;
propertiesB.Properties.EatsFish = true;
if( propertiesA.Flags == propertiesB.Flags )
{
cout << "Life is terrible :(";
}
else
{
cout << "Life is great!";
}
AnimalDescription propertiesC = propertiesA;
if( propertiesA.Flags == propertiesC.Flags )
{
cout << "Life is great!";
}
else
{
cout << "Life is terrible :(";
}
}
We can see that life is great, we have our discrete values, and we have a nice int to & and | to our hearts content, which still has context of what its bits mean. Everything is consistent and predictable... for me... as long as I keep using Microsoft's VC++ compiler w/ Update 3 on Win10 x64 and don't touch my compiler flags :)
Even though everything is great... we have some context as to the meaning of flags now, since its in a union w/ the bitfield in the terrible real world where your program may be be responsible for more than a single discrete task you could still accidentally (quite easily) smash two flags fields of different unions together (say, AnimalProperties and ObjectProperties, since they're both ints), mixing up all yours bits, which is a horrible bug to trace down... and how I know many people on this post don't work with bitmasks very often, since building them is easy and maintaining them is hard.
class AnimalDefinition {
public:
static AnimalDefinition *GetAnimalDefinition( AnimalFlags flags ); //A little too obvious for my taste... NEXT!
static AnimalDefinition *GetAnimalDefinition( AnimalProperties properties ); //Oh I see how to use this! BORING, NEXT!
static AnimalDefinition *GetAnimalDefinition( int flags ); //hmm, wish I could see how to construct a valid "flags" int without CrossFingers+Ctrl+Shift+F("Animal*"). Maybe just hard-code 16 or something?
AnimalFlags animalFlags; //Well this is *way* too hard to break unintentionally, screw this!
int flags; //PERFECT! Nothing will ever go wrong here...
//wait, what values are used for this particular flags field? Is this AnimalFlags or ObjectFlags? Or is it RuntimePlatformFlags? Does it matter? Where's the documentation?
//Well luckily anyone in the code base and get confused and destroy the whole program! At least I don't need to static_cast anymore, phew!
private:
AnimalDescription m_description; //Oh I know what this is. All of the mystery and excitement of life has been stolen away :(
}
So then you make your union declaration private to prevent direct access to "Flags", and have to add getters/setters and operator overloads, then make a macro for all that, and you're basically right back where you started when you tried to do this with an Enum.
Unfortunately if you want your code to be portable, I don't think there is any way to either A) guarantee the bit layout or B) determine the bit layout at compile time (so you can track it and at least correct for changes across versions/platforms etc)
Offset in a struct with bit fields
At runtime you can play tricks w/ setting the the fields and XORing the flags to see which bits did change, sounds pretty crappy to me though verses having a 100% consistent, platform independent, and completely deterministic solution ie: an ENUM.
TL;DR:
Don't listen to the haters. C++ is not English. Just because the literal definition of an abbreviated keyword inherited from C might not fit your usage doesn't mean you shouldn't use it when the C and C++ definition of the keyword absolutely includes your use case. You can also use structs to model things other than structures, and classes for things other than school and social caste. You may use float for values which are grounded. You may use char for variables which are neither un-burnt nor a person in a novel, play, or movie. Any programmer who goes to the dictionary to determine the meaning of a keyword before the language spec is a... well I'll hold my tongue there.
If you do want your code modeled after spoken language you'd be best off writing in Objective-C, which incidentally also uses enums heavily for bitfields.
I'd like to elaborate on Uliwitness answer, fixing his code for C++98 and using the Safe Bool idiom, for lack of the std::underlying_type<> template and the explicit keyword in C++ versions below C++11.
I also modified it so that the enum values can be sequential without any explicit assignment, so you can have
enum AnimalFlags_
{
HasClaws,
CanFly,
EatsFish,
Endangered
};
typedef FlagsEnum<AnimalFlags_> AnimalFlags;
seahawk.flags = AnimalFlags() | CanFly | EatsFish | Endangered;
You can then get the raw flags value with
seahawk.flags.value();
Here's the code.
template <typename EnumType, typename Underlying = int>
class FlagsEnum
{
typedef Underlying FlagsEnum::* RestrictedBool;
public:
FlagsEnum() : m_flags(Underlying()) {}
FlagsEnum(EnumType singleFlag):
m_flags(1 << singleFlag)
{}
FlagsEnum(const FlagsEnum& original):
m_flags(original.m_flags)
{}
FlagsEnum& operator |=(const FlagsEnum& f) {
m_flags |= f.m_flags;
return *this;
}
FlagsEnum& operator &=(const FlagsEnum& f) {
m_flags &= f.m_flags;
return *this;
}
friend FlagsEnum operator |(const FlagsEnum& f1, const FlagsEnum& f2) {
return FlagsEnum(f1) |= f2;
}
friend FlagsEnum operator &(const FlagsEnum& f1, const FlagsEnum& f2) {
return FlagsEnum(f1) &= f2;
}
FlagsEnum operator ~() const {
FlagsEnum result(*this);
result.m_flags = ~result.m_flags;
return result;
}
operator RestrictedBool() const {
return m_flags ? &FlagsEnum::m_flags : 0;
}
Underlying value() const {
return m_flags;
}
protected:
Underlying m_flags;
};
Currently there is no language support for enum flags, Meta classes might inherently add this feature if it would ever be part of the c++ standard.
My solution would be to create enum-only instantiated template functions adding support for type-safe bitwise operations for enum class using its underlying type:
File: EnumClassBitwise.h
#pragma once
#ifndef _ENUM_CLASS_BITWISE_H_
#define _ENUM_CLASS_BITWISE_H_
#include <type_traits>
//unary ~operator
template <typename Enum, typename std::enable_if_t<std::is_enum<Enum>::value, int> = 0>
constexpr inline Enum& operator~ (Enum& val)
{
val = static_cast<Enum>(~static_cast<std::underlying_type_t<Enum>>(val));
return val;
}
// & operator
template <typename Enum, typename std::enable_if_t<std::is_enum<Enum>::value, int> = 0>
constexpr inline Enum operator& (Enum lhs, Enum rhs)
{
return static_cast<Enum>(static_cast<std::underlying_type_t<Enum>>(lhs) & static_cast<std::underlying_type_t<Enum>>(rhs));
}
// &= operator
template <typename Enum, typename std::enable_if_t<std::is_enum<Enum>::value, int> = 0>
constexpr inline Enum operator&= (Enum& lhs, Enum rhs)
{
lhs = static_cast<Enum>(static_cast<std::underlying_type_t<Enum>>(lhs) & static_cast<std::underlying_type_t<Enum>>(rhs));
return lhs;
}
//| operator
template <typename Enum, typename std::enable_if_t<std::is_enum<Enum>::value, int> = 0>
constexpr inline Enum operator| (Enum lhs, Enum rhs)
{
return static_cast<Enum>(static_cast<std::underlying_type_t<Enum>>(lhs) | static_cast<std::underlying_type_t<Enum>>(rhs));
}
//|= operator
template <typename Enum, typename std::enable_if_t<std::is_enum<Enum>::value, int> = 0>
constexpr inline Enum& operator|= (Enum& lhs, Enum rhs)
{
lhs = static_cast<Enum>(static_cast<std::underlying_type_t<Enum>>(lhs) | static_cast<std::underlying_type_t<Enum>>(rhs));
return lhs;
}
#endif // _ENUM_CLASS_BITWISE_H_
For convenience and for reducing mistakes, you might want to wrap your bit flags operations for enums and for integers as well:
File: BitFlags.h
#pragma once
#ifndef _BIT_FLAGS_H_
#define _BIT_FLAGS_H_
#include "EnumClassBitwise.h"
template<typename T>
class BitFlags
{
public:
constexpr inline BitFlags() = default;
constexpr inline BitFlags(T value) { mValue = value; }
constexpr inline BitFlags operator| (T rhs) const { return mValue | rhs; }
constexpr inline BitFlags operator& (T rhs) const { return mValue & rhs; }
constexpr inline BitFlags operator~ () const { return ~mValue; }
constexpr inline operator T() const { return mValue; }
constexpr inline BitFlags& operator|=(T rhs) { mValue |= rhs; return *this; }
constexpr inline BitFlags& operator&=(T rhs) { mValue &= rhs; return *this; }
constexpr inline bool test(T rhs) const { return (mValue & rhs) == rhs; }
constexpr inline void set(T rhs) { mValue |= rhs; }
constexpr inline void clear(T rhs) { mValue &= ~rhs; }
private:
T mValue;
};
#endif //#define _BIT_FLAGS_H_
Possible usage:
#include <cstdint>
#include <BitFlags.h>
void main()
{
enum class Options : uint32_t
{
NoOption = 0 << 0
, Option1 = 1 << 0
, Option2 = 1 << 1
, Option3 = 1 << 2
, Option4 = 1 << 3
};
const uint32_t Option1 = 1 << 0;
const uint32_t Option2 = 1 << 1;
const uint32_t Option3 = 1 << 2;
const uint32_t Option4 = 1 << 3;
//Enum BitFlags
BitFlags<Options> optionsEnum(Options::NoOption);
optionsEnum.set(Options::Option1 | Options::Option3);
//Standard integer BitFlags
BitFlags<uint32_t> optionsUint32(0);
optionsUint32.set(Option1 | Option3);
return 0;
}
#Xaqq has provided a really nice type-safe way to use enum flags here by a flag_set class.
I published the code in GitHub, usage is as follows:
#include "flag_set.hpp"
enum class AnimalFlags : uint8_t {
HAS_CLAWS,
CAN_FLY,
EATS_FISH,
ENDANGERED,
_
};
int main()
{
flag_set<AnimalFlags> seahawkFlags(AnimalFlags::HAS_CLAWS
| AnimalFlags::EATS_FISH
| AnimalFlags::ENDANGERED);
if (seahawkFlags & AnimalFlags::ENDANGERED)
cout << "Seahawk is endangered";
}
Another macro solution, but unlike the existing answers this does not use reinterpret_cast (or a C-cast) to cast between Enum& and Int&, which is forbidden in standard C++ (see this post).
#define MAKE_FLAGS_ENUM(TEnum, TUnder) \
TEnum operator~ ( TEnum a ) { return static_cast<TEnum> (~static_cast<TUnder> (a) ); } \
TEnum operator| ( TEnum a, TEnum b ) { return static_cast<TEnum> ( static_cast<TUnder> (a) | static_cast<TUnder>(b) ); } \
TEnum operator& ( TEnum a, TEnum b ) { return static_cast<TEnum> ( static_cast<TUnder> (a) & static_cast<TUnder>(b) ); } \
TEnum operator^ ( TEnum a, TEnum b ) { return static_cast<TEnum> ( static_cast<TUnder> (a) ^ static_cast<TUnder>(b) ); } \
TEnum& operator|= ( TEnum& a, TEnum b ) { a = static_cast<TEnum>(static_cast<TUnder>(a) | static_cast<TUnder>(b) ); return a; } \
TEnum& operator&= ( TEnum& a, TEnum b ) { a = static_cast<TEnum>(static_cast<TUnder>(a) & static_cast<TUnder>(b) ); return a; } \
TEnum& operator^= ( TEnum& a, TEnum b ) { a = static_cast<TEnum>(static_cast<TUnder>(a) ^ static_cast<TUnder>(b) ); return a; }
Losing the reinterpret_cast means we can't rely on the x |= y syntax any more, but by expanding these into their x = x | y forms we no longer need it.
Note: You can use std::underlying_type to obtain TUnder, I've not included it for brevity.
You are confusing objects and collections of objects. Specifically, you are confusing binary flags with sets of binary flags. A proper solution would look like this:
// These are individual flags
enum AnimalFlag // Flag, not Flags
{
HasClaws = 0,
CanFly,
EatsFish,
Endangered
};
class AnimalFlagSet
{
int m_Flags;
public:
AnimalFlagSet() : m_Flags(0) { }
void Set( AnimalFlag flag ) { m_Flags |= (1 << flag); }
void Clear( AnimalFlag flag ) { m_Flags &= ~ (1 << flag); }
bool Get( AnimalFlag flag ) const { return (m_Flags >> flag) & 1; }
};
Here is my solution without needing any bunch of overloading or casting:
namespace EFoobar
{
enum
{
FB_A = 0x1,
FB_B = 0x2,
FB_C = 0x4,
};
typedef long Flags;
}
void Foobar(EFoobar::Flags flags)
{
if (flags & EFoobar::FB_A)
// do sth
;
if (flags & EFoobar::FB_B)
// do sth
;
}
void ExampleUsage()
{
Foobar(EFoobar::FB_A | EFoobar::FB_B);
EFoobar::Flags otherflags = 0;
otherflags|= EFoobar::FB_B;
otherflags&= ~EFoobar::FB_B;
Foobar(otherflags);
}
I think it's ok, because we identify (non strongly typed) enums and ints anyway.
Just as a (longer) side note, if you
want to use strongly typed enums and
don't need heavy bit fiddling with your flags
performance is not an issue
I would come up with this:
#include <set>
enum class EFoobarFlags
{
FB_A = 1,
FB_B,
FB_C,
};
void Foobar(const std::set<EFoobarFlags>& flags)
{
if (flags.find(EFoobarFlags::FB_A) != flags.end())
// do sth
;
if (flags.find(EFoobarFlags::FB_B) != flags.end())
// do sth
;
}
void ExampleUsage()
{
Foobar({EFoobarFlags::FB_A, EFoobarFlags::FB_B});
std::set<EFoobarFlags> otherflags{};
otherflags.insert(EFoobarFlags::FB_B);
otherflags.erase(EFoobarFlags::FB_B);
Foobar(otherflags);
}
using C++11 initializer lists and enum class.
Copy-pasteable "evil" macro based on some of the other answers in this thread:
#include <type_traits>
/*
* Macro to allow enum values to be combined and evaluated as flags.
* * Based on:
* - DEFINE_ENUM_FLAG_OPERATORS from <winnt.h>
* - https://stackoverflow.com/a/63031334/1624459
*/
#define MAKE_ENUM_FLAGS(TEnum) \
inline TEnum operator~(TEnum a) { \
using TUnder = typename std::underlying_type_t<TEnum>; \
return static_cast<TEnum>(~static_cast<TUnder>(a)); \
} \
inline TEnum operator|(TEnum a, TEnum b) { \
using TUnder = typename std::underlying_type_t<TEnum>; \
return static_cast<TEnum>(static_cast<TUnder>(a) | static_cast<TUnder>(b)); \
} \
inline TEnum operator&(TEnum a, TEnum b) { \
using TUnder = typename std::underlying_type_t<TEnum>; \
return static_cast<TEnum>(static_cast<TUnder>(a) & static_cast<TUnder>(b)); \
} \
inline TEnum operator^(TEnum a, TEnum b) { \
using TUnder = typename std::underlying_type_t<TEnum>; \
return static_cast<TEnum>(static_cast<TUnder>(a) ^ static_cast<TUnder>(b)); \
} \
inline TEnum& operator|=(TEnum& a, TEnum b) { \
using TUnder = typename std::underlying_type_t<TEnum>; \
a = static_cast<TEnum>(static_cast<TUnder>(a) | static_cast<TUnder>(b)); \
return a; \
} \
inline TEnum& operator&=(TEnum& a, TEnum b) { \
using TUnder = typename std::underlying_type_t<TEnum>; \
a = static_cast<TEnum>(static_cast<TUnder>(a) & static_cast<TUnder>(b)); \
return a; \
} \
inline TEnum& operator^=(TEnum& a, TEnum b) { \
using TUnder = typename std::underlying_type_t<TEnum>; \
a = static_cast<TEnum>(static_cast<TUnder>(a) ^ static_cast<TUnder>(b)); \
return a; \
}
Usage
enum class Passability : std::uint8_t {
Clear = 0,
GroundUnit = 1 << 1,
FlyingUnit = 1 << 2,
Building = 1 << 3,
Tree = 1 << 4,
Mountain = 1 << 5,
Blocked = 1 << 6,
Water = 1 << 7,
Coastline = 1 << 8
};
MAKE_ENUM_FLAGS(Passability)
Advantages
Only applies to chosen enums when used explicitly.
No use of illegal reinterpret_cast.
No need to specify the underlying type.
Notes
Replace std::underlying_type_t<TEnum> with std::underlying_type<TEnum>::type if using C++ <14.
Here's a lazy C++11 solution that doesn't change the default behavior of enums. It also works for enum struct and enum class, and is constexpr.
#include <type_traits>
template<class T = void> struct enum_traits {};
template<> struct enum_traits<void> {
struct _allow_bitops {
static constexpr bool allow_bitops = true;
};
using allow_bitops = _allow_bitops;
template<class T, class R = T>
using t = typename std::enable_if<std::is_enum<T>::value and
enum_traits<T>::allow_bitops, R>::type;
template<class T>
using u = typename std::underlying_type<T>::type;
};
template<class T>
constexpr enum_traits<>::t<T> operator~(T a) {
return static_cast<T>(~static_cast<enum_traits<>::u<T>>(a));
}
template<class T>
constexpr enum_traits<>::t<T> operator|(T a, T b) {
return static_cast<T>(
static_cast<enum_traits<>::u<T>>(a) |
static_cast<enum_traits<>::u<T>>(b));
}
template<class T>
constexpr enum_traits<>::t<T> operator&(T a, T b) {
return static_cast<T>(
static_cast<enum_traits<>::u<T>>(a) &
static_cast<enum_traits<>::u<T>>(b));
}
template<class T>
constexpr enum_traits<>::t<T> operator^(T a, T b) {
return static_cast<T>(
static_cast<enum_traits<>::u<T>>(a) ^
static_cast<enum_traits<>::u<T>>(b));
}
template<class T>
constexpr enum_traits<>::t<T, T&> operator|=(T& a, T b) {
a = a | b;
return a;
}
template<class T>
constexpr enum_traits<>::t<T, T&> operator&=(T& a, T b) {
a = a & b;
return a;
}
template<class T>
constexpr enum_traits<>::t<T, T&> operator^=(T& a, T b) {
a = a ^ b;
return a;
}
To enable bitwise operators for an enum:
enum class my_enum {
Flag1 = 1 << 0,
Flag2 = 1 << 1,
Flag3 = 1 << 2,
// ...
};
// The magic happens here
template<> struct enum_traits<my_enum> :
enum_traits<>::allow_bitops {};
constexpr my_enum foo = my_enum::Flag1 | my_enum::Flag2 | my_enum::Flag3;
As above(Kai) or do the following. Really enums are "Enumerations", what you want to do is have a set, therefore you should really use stl::set
enum AnimalFlags
{
HasClaws = 1,
CanFly =2,
EatsFish = 4,
Endangered = 8
};
int main(void)
{
AnimalFlags seahawk;
//seahawk= CanFly | EatsFish | Endangered;
seahawk= static_cast<AnimalFlags>(CanFly | EatsFish | Endangered);
}
Maybe like NS_OPTIONS of Objective-C.
#define ENUM(T1, T2) \
enum class T1 : T2; \
inline T1 operator~ (T1 a) { return (T1)~(int)a; } \
inline T1 operator| (T1 a, T1 b) { return static_cast<T1>((static_cast<T2>(a) | static_cast<T2>(b))); } \
inline T1 operator& (T1 a, T1 b) { return static_cast<T1>((static_cast<T2>(a) & static_cast<T2>(b))); } \
inline T1 operator^ (T1 a, T1 b) { return static_cast<T1>((static_cast<T2>(a) ^ static_cast<T2>(b))); } \
inline T1& operator|= (T1& a, T1 b) { return reinterpret_cast<T1&>((reinterpret_cast<T2&>(a) |= static_cast<T2>(b))); } \
inline T1& operator&= (T1& a, T1 b) { return reinterpret_cast<T1&>((reinterpret_cast<T2&>(a) &= static_cast<T2>(b))); } \
inline T1& operator^= (T1& a, T1 b) { return reinterpret_cast<T1&>((reinterpret_cast<T2&>(a) ^= static_cast<T2>(b))); } \
enum class T1 : T2
ENUM(Options, short) {
FIRST = 1 << 0,
SECOND = 1 << 1,
THIRD = 1 << 2,
FOURTH = 1 << 3
};
auto options = Options::FIRST | Options::SECOND;
options |= Options::THIRD;
if ((options & Options::SECOND) == Options::SECOND)
cout << "Contains second option." << endl;
if ((options & Options::THIRD) == Options::THIRD)
cout << "Contains third option." << endl;
return 0;
// Output:
// Contains second option.
// Contains third option.
C++20 Type-Safe Enum Operators
TL;DR
template<typename T>
requires std::is_enum_v<T> and
requires (std::underlying_type_t<T> x) {
{ x | x } -> std::same_as<std::underlying_type_t<T>>;
T(x);
}
T operator|(T left, T right)
{
using U = std::underlying_type_t<T>;
return T( U(left) | U(right) );
}
template<typename T>
requires std::is_enum_v<T> and
requires (std::underlying_type_t<T> x) {
{ x | x } -> std::same_as<std::underlying_type_t<T>>;
T(x);
}
T operator&(T left, T right)
{
using U = std::underlying_type_t<T>;
return T( U(left) & U(right) );
}
template<typename T>
requires std::is_enum_v<T> and requires (T x) { { x | x } -> std::same_as<T>; }
T & operator|=(T &left, T right)
{
return left = left | right;
}
template<typename T>
requires std::is_enum_v<T> and requires (T x) { { x & x } -> std::same_as<T>; }
T & operator&=(T &left, T right)
{
return left = left & right;
}
Rationale
With type trait std::is_enum we can test some type T for whether it is an enumeration type.
This includes both unscoped and scoped enums (i.e. enum and enum class).
With type trait std::underlying_type we can get the underlying type of an enum.
With C++20 concepts and constraints it is quite easy to then provide overloads for bitwise operations.
Scoped vs. Unscoped
If the operations should only be overloaded for either scoped or unscoped enums, std::is_scoped_enum can be used to extend the template constraints accordingly.
C++23
With C++23 we get std::to_underlying to convert an enum value to its underlying type more easily.
Move Semantics & Perfect Forwarding
Should you get in the bizarre situation that your underlying type has different semantics for copy vs. move or it does not provide a copy c'tor, then you should do perfect forwarding of the operands with std::forward.
You can use struct as follow:
struct UiFlags2 {
static const int
FULLSCREEN = 0x00000004, //api 16
HIDE_NAVIGATION = 0x00000002, //api 14
LAYOUT_HIDE_NAVIGATION = 0x00000200, //api 16
LAYOUT_FULLSCREEN = 0x00000400, //api 16
LAYOUT_STABLE = 0x00000100, //api 16
IMMERSIVE_STICKY = 0x00001000; //api 19
};
and use as this:
int flags = UiFlags2::FULLSCREEN | UiFlags2::HIDE_NAVIGATION;
So you don't need to int casting and it is directly usable.
Also it is scope separated like enum class
I prefer using magic_enum as it helps automate converting strings to enums and vice versa.
It is a header-only library which is written in C++17 standard.
magic_enum already has template functions for enum bitwise operators.
See documentation.
Usage:
#include <magic_enum.hpp>
enum Flag { ... };
Flag flag{};
Flag value{};
using namespace magic_enum::bitwise_operators;
flag |= value;
Is there a generic way to cast int to enum in C++?
If int falls in range of an enum it should return an enum value, otherwise throw an exception. Is there a way to write it generically? More than one enum type should be supported.
Background: I have an external enum type and no control over the source code. I'd like to store this value in a database and retrieve it.
The obvious thing is to annotate your enum:
// generic code
#include <algorithm>
template <typename T>
struct enum_traits {};
template<typename T, size_t N>
T *endof(T (&ra)[N]) {
return ra + N;
}
template<typename T, typename ValType>
T check(ValType v) {
typedef enum_traits<T> traits;
const T *first = traits::enumerators;
const T *last = endof(traits::enumerators);
if (traits::sorted) { // probably premature optimization
if (std::binary_search(first, last, v)) return T(v);
} else if (std::find(first, last, v) != last) {
return T(v);
}
throw "exception";
}
// "enhanced" definition of enum
enum e {
x = 1,
y = 4,
z = 10,
};
template<>
struct enum_traits<e> {
static const e enumerators[];
static const bool sorted = true;
};
// must appear in only one TU,
// so if the above is in a header then it will need the array size
const e enum_traits<e>::enumerators[] = {x, y, z};
// usage
int main() {
e good = check<e>(1);
e bad = check<e>(2);
}
You need the array to be kept up to date with e, which is a nuisance if you're not the author of e. As Sjoerd says, it can probably be automated with any decent build system.
In any case, you're up against 7.2/6:
For an enumeration where emin is the
smallest enumerator and emax is the
largest, the values of the enumeration
are the values of the underlying type
in the range bmin to bmax, where bmin
and bmax are, respectively, the
smallest and largest values of the
smallest bit-field that can store emin
and emax. It is possible to define an
enumeration that has values not
defined by any of its enumerators.
So if you aren't the author of e, you may or may not have a guarantee that valid values of e actually appear in its definition.
Ugly.
enum MyEnum { one = 1, two = 2 };
MyEnum to_enum(int n)
{
switch( n )
{
case 1 : return one;
case 2 : return two;
}
throw something();
}
Now for the real question. Why do you need this? The code is ugly, not easy to write (*?) and not easy to maintain, and not easy to incorporate in to your code. The code it telling you that it's wrong. Why fight it?
EDIT:
Alternatively, given that enums are integral types in C++:
enum my_enum_val = static_cast<MyEnum>(my_int_val);
but this is even uglier that above, much more prone to errors, and it won't throw as you desire.
If, as you describe, the values are in a database, why not write a code generator that reads this table and creates a .h and .cpp file with both the enum and a to_enum(int) function?
Advantages:
Easy to add a to_string(my_enum) function.
Little maintenance required
Database and code are in synch
No- there's no introspection in C++, nor is there any built in "domain check" facility.
What do you think about this one?
#include <iostream>
#include <stdexcept>
#include <set>
#include <string>
using namespace std;
template<typename T>
class Enum
{
public:
static void insert(int value)
{
_set.insert(value);
}
static T buildFrom(int value)
{
if (_set.find(value) != _set.end()) {
T retval;
retval.assign(value);
return retval;
}
throw std::runtime_error("unexpected value");
}
operator int() const { return _value; }
private:
void assign(int value)
{
_value = value;
}
int _value;
static std::set<int> _set;
};
template<typename T> std::set<int> Enum<T>::_set;
class Apples: public Enum<Apples> {};
class Oranges: public Enum<Oranges> {};
class Proxy
{
public:
Proxy(int value): _value(value) {}
template<typename T>
operator T()
{
T theEnum;
return theEnum.buildFrom(_value);
}
int _value;
};
Proxy convert(int value)
{
return Proxy(value);
}
int main()
{
Apples::insert(4);
Apples::insert(8);
Apples a = convert(4); // works
std::cout << a << std::endl; // prints 4
try {
Apples b = convert(9); // throws
}
catch (std::exception const& e) {
std::cout << e.what() << std::endl; // prints "unexpected value"
}
try {
Oranges b = convert(4); // also throws
}
catch (std::exception const& e) {
std::cout << e.what() << std::endl; // prints "unexpected value"
}
}
You could then use code I posted here to switch on values.
You should not want something like what you describe to exist, I fear there are problems in your code design.
Also, you assume that enums come in a range, but that's not always the case:
enum Flags { one = 1, two = 2, four = 4, eigh = 8, big = 2000000000 };
This is not in a range: even if it was possible, are you supposed to check every integer from 0 to 2^n to see if they match some enum's value?
If you are prepared to list your enum values as template parameters you can do this in C++ 11 with varadic templates. You can look at this as a good thing, allowing you to accept subsets of the valid enum values in different contexts; often useful when parsing codes from external sources.
Perhaps not quite as generic as you'd like, but the checking code itself is generalised, you just need to specify the set of values. This approach handles gaps, arbitrary values, etc.
template<typename EnumType, EnumType... Values> class EnumCheck;
template<typename EnumType> class EnumCheck<EnumType>
{
public:
template<typename IntType>
static bool constexpr is_value(IntType) { return false; }
};
template<typename EnumType, EnumType V, EnumType... Next>
class EnumCheck<EnumType, V, Next...> : private EnumCheck<EnumType, Next...>
{
using super = EnumCheck<EnumType, Next...>;
public:
template<typename IntType>
static bool constexpr is_value(IntType v)
{
return v == static_cast<typename std::underlying_type<EnumType>::type>(V) || super::is_value(v);
}
EnumType convert(IntType v)
{
if (!is_value(v)) throw std::runtime_error("Enum value out of range");
return static_cast<EnumType>(v);
};
enum class Test {
A = 1,
C = 3,
E = 5
};
using TestCheck = EnumCheck<Test, Test::A, Test::C, Test::E>;
void check_value(int v)
{
if (TestCheck::is_value(v))
printf("%d is OK\n", v);
else
printf("%d is not OK\n", v);
}
int main()
{
for (int i = 0; i < 10; ++i)
check_value(i);
}
C++0x alternative to the "ugly" version, allows for multiple enums. Uses initializer lists rather than switches, a bit cleaner IMO. Unfortunately, this doesn't work around the need to hard-code the enum values.
#include <cassert> // assert
namespace // unnamed namespace
{
enum class e1 { value_1 = 1, value_2 = 2 };
enum class e2 { value_3 = 3, value_4 = 4 };
template <typename T>
int valid_enum( const int val, const T& vec )
{
for ( const auto item : vec )
if ( static_cast<int>( item ) == val ) return val;
throw std::exception( "invalid enum value!" ); // throw something useful here
} // valid_enum
} // ns
int main()
{
// generate list of valid values
const auto e1_valid_values = { e1::value_1, e1::value_2 };
const auto e2_valid_values = { e2::value_3, e2::value_4 };
auto result1 = static_cast<e1>( valid_enum( 1, e1_valid_values ) );
assert( result1 == e1::value_1 );
auto result2 = static_cast<e2>( valid_enum( 3, e2_valid_values ) );
assert( result2 == e2::value_3 );
// test throw on invalid value
try
{
auto result3 = static_cast<e1>( valid_enum( 9999999, e1_valid_values ) );
assert( false );
}
catch ( ... )
{
assert( true );
}
}