Move semantics appear to add additional overhead in the simplest possible* example - c++

* I believe this is the simplest possible example, but if I'm incorrect please let me know.
https://godbolt.org/z/neaTse
I'm attempting to learn and understand move semantics and some of their intricacies, but I've hit a bit of a snag. When attempting to compare the following 2 code snippets, the code using move semantics ends up with 8 additional lines of assembly and 2 additional moves (15 for move, 13 for no-move).
Move:
#include <utility>
template<class T>
void swap(T& a, T& b)
{
T tmp(std::move(a));
a = std::move(b);
b = std::move(tmp);
}
int main(){
int a, b;
swap(a, b);
}
No-Move:
template<class T>
void swap(T& a, T& b)
{
T tmp(a);
a = b;
b = tmp;
}
int main(){
int a, b;
swap(a, b);
}
Move generated assembly:
main:
pushq %rbp
movq %rsp, %rbp
subq $16, %rsp
leaq -8(%rbp), %rdx
leaq -4(%rbp), %rax
movq %rdx, %rsi
movq %rax, %rdi
call void swap<int>(int&, int&)
movl $0, %eax
leave
ret
void swap<int>(int&, int&):
pushq %rbp
movq %rsp, %rbp
subq $32, %rsp
movq %rdi, -24(%rbp)
movq %rsi, -32(%rbp)
movq -24(%rbp), %rax
movq %rax, %rdi
call std::remove_reference<int&>::type&& std::move<int&>(int&)
movl (%rax), %eax
movl %eax, -4(%rbp)
movq -32(%rbp), %rax
movq %rax, %rdi
call std::remove_reference<int&>::type&& std::move<int&>(int&)
movl (%rax), %edx
movq -24(%rbp), %rax
movl %edx, (%rax)
leaq -4(%rbp), %rax
movq %rax, %rdi
call std::remove_reference<int&>::type&& std::move<int&>(int&)
movl (%rax), %edx
movq -32(%rbp), %rax
movl %edx, (%rax)
nop
leave
ret
No-move generated assembly:
main:
pushq %rbp
movq %rsp, %rbp
subq $16, %rsp
leaq -8(%rbp), %rdx
leaq -4(%rbp), %rax
movq %rdx, %rsi
movq %rax, %rdi
call void swap<int>(int&, int&)
movl $0, %eax
leave
ret
void swap<int>(int&, int&):
pushq %rbp
movq %rsp, %rbp
movq %rdi, -24(%rbp)
movq %rsi, -32(%rbp)
movq -24(%rbp), %rax
movl (%rax), %eax
movl %eax, -4(%rbp)
movq -32(%rbp), %rax
movl (%rax), %edx
movq -24(%rbp), %rax
movl %edx, (%rax)
movq -32(%rbp), %rax
movl -4(%rbp), %edx
movl %edx, (%rax)
nop
popq %rbp
ret
I think the the way I've internalized or abstracted move semantics for myself, is that they "enable 'newly available' optimization through the removal of costly temporary copies".
Have I just internalized this incorrectly?
-Or-
Is this just failing because I'm using a primitive type?
-Or-
Have I just missed the mark entirely?

OK, the problem is inlining and effective optimization, to get around it I've annotated it with __attribute__((noinline)).
I've made a class to handle the move
class mover {
public :
int *ptr { nullptr };
__attribute__((noinline)) mover() : ptr(new int(42)) { }
__attribute__((noinline)) mover(mover & other) {
delete ptr;
ptr = new int(*other.ptr);
}
__attribute__((noinline)) mover& operator= ( mover && other) {
delete ptr;
ptr = other.ptr;
other.ptr = nullptr;
return *this;
}
__attribute__((noinline)) mover& operator= ( mover & other) {
delete ptr;
ptr = new int(*other.ptr);
return *this;
}
__attribute__((noinline)) mover(mover && other) {
ptr = other.ptr;
other.ptr = nullptr;
}
__attribute__((noinline)) ~mover() {
delete ptr;
}
};
Specific doesn't use smart pointers to be able to see what goes on.
The move swap now looks like this calling the correct constructors and operators
void swap<mover>(mover&, mover&):
pushq %r12
movq %rdi, %r12
pushq %rbp
movq %rsi, %rbp
movq %rdi, %rsi
subq $24, %rsp
leaq 8(%rsp), %rdi
call mover::mover(mover&&)
movq %rbp, %rsi
movq %r12, %rdi
call mover::operator=(mover&&) [clone .isra.0]
leaq 8(%rsp), %rsi
movq %rbp, %rdi
call mover::operator=(mover&&) [clone .isra.0]
leaq 8(%rsp), %rdi
call mover::~mover() [complete object destructor]
addq $24, %rsp
popq %rbp
popq %r12
ret
And the copy swap looks like this calling copy and copy assign.
void swap<mover>(mover&, mover&):
pushq %r12
movq %rdi, %r12
pushq %rbp
movq %rsi, %rbp
movq %rdi, %rsi
subq $24, %rsp
leaq 8(%rsp), %rdi
call mover::mover(mover&)
movq %rbp, %rsi
movq %r12, %rdi
call mover::operator=(mover&) [clone .isra.0]
leaq 8(%rsp), %rsi
movq %rbp, %rdi
call mover::operator=(mover&) [clone .isra.0]
leaq 8(%rsp), %rdi
call mover::~mover() [complete object destructor]
addq $24, %rsp
popq %rbp
popq %r12
ret
This biggest effect are the different move constructor
mover::mover(mover&&):
movq (%rsi), %rax
movq $0, (%rsi)
movq %rax, (%rdi)
ret
and copy constructor
mover::mover(mover&):
pushq %rbp
movq %rsi, %rbp
pushq %rbx
movq %rdi, %rbx
subq $8, %rsp
movq $0, (%rdi)
movl $4, %edi
call operator new(unsigned long) // <---- new
movq 0(%rbp), %rdx
movq %rax, (%rbx)
movl (%rdx), %edx
movl %edx, (%rax)
addq $8, %rsp
popq %rbx
popq %rbp
ret
With the new call in the latter.

Related

Behaviour of assert

I'm trying to understand how assert behaves in the case of false statement. It calls __assert_fail and it calls std::abort(), according to documentation. So, I want to know what is going in assembly code of __assert_fail:
endbr64
pushq %r13
movl %edx, %r13d
movl $0x5, %edx
pushq %r12
movq %rsi, %r12
leaq 0x18467e(%rip), %rsi
pushq %rbp
movq %rdi, %rbp
leaq 0x1804f0(%rip), %rdi ; _libc_intl_domainname
pushq %rbx
movq %rcx, %rbx
subq $0x8, %rsp
callq 0x37980 ; __dcgettext
movq %rbx, %r8
movl %r13d, %ecx
movq %r12, %rdx
movq %rax, %rdi
movq %rbp, %rsi
callq 0x36d70
What %r13 and %r12 stand for in this code? Where is the call of std::abort() and what is going on before and after this call?

What do these instructions in the diassembly phase indicate?

Hello as I run c++ code in clion IDE debugger, after main() returns, the debugger steps into a file called disassembly, and it contains what looks like assmebly code. What are those instructions? What does it do? Should I care? as I'm new to c++ I'm familiarizing myself with the language, IDE and anything else of relevance.
start:
nop
movl %eax, %edi
callq 0x2e82e ; symbol stub for: exit
hlt
nop
nop
nop
nop
nop
nop
nop
nop
nop
nop
nop
nop
nop
exit:
jmpq *0x268c241c(%rip)
exit:
pushq %rbp
movq %rsp, %rbp
pushq %rbx
pushq %rax
movl %edi, %ebx
cmpl $0xad, %edi
jne 0x5a404 ; <+41>
leaq 0x2683a31e(%rip), %rcx
movq (%rcx), %rax
testq %rax, %rax
je 0x5a404 ; <+41>
xorl %eax, %eax
xchgq %rax, (%rcx)
testq %rax, %rax
jne 0x5a427 ; <+76>
xorl %eax, %eax
callq 0x8017c ; symbol stub for: _tlv_exit
xorl %edi, %edi
callq 0x5a196 ; __cxa_finalize
movq 0x268354f7(%rip), %rax
testq %rax, %rax
je 0x5a420 ; <+69>
callq *%rax
movl %ebx, %edi
callq 0x8000e ; symbol stub for: __exit
callq *%rax
ud2
There is also this
_tlv_exit:
pushq %rbp
movq %rsp, %rbp
pushq %rbx
pushq %rax
movq 0x268db5e9(%rip), %rdi
callq 0x2e92a ; symbol stub for: pthread_getspecific
testq %rax, %rax
je 0x18e20 ; <+54>
movq %rax, %rbx
movq 0x268db5d5(%rip), %rdi
xorl %esi, %esi
callq 0x2e942 ; symbol stub for: pthread_setspecific
movq %rbx, %rdi
addq $0x8, %rsp
popq %rbx
popq %rbp
jmp 0x1983e ; tlv_finalize_list
addq $0x8, %rsp
popq %rbx
popq %rbp
retq
nop
nop
nop
nop
nop
nop
nop
nop
nop
nop
nop
start:
nop
movl %eax, %edi
callq 0x2e82e ; symbol stub for: exit
hlt
nop
nop
nop
nop
nop
nop
nop
nop
nop
nop
nop
nop
nop
exit:
jmpq *0x268c241c(%rip)
pthread_getspecific:
jmpq *0x268c2470(%rip)
__cxa_finalize_ranges:
pushq %rbp
movq %rsp, %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
subq $0x18, %rsp
movl %esi, -0x2c(%rbp)
movq %rdi, -0x38(%rbp)
leaq 0x26834d24(%rip), %rdi
callq 0x804d6 ; symbol stub for: pthread_mutex_lock
movq 0x26834ca0(%rip), %r13
testq %r13, %r13
je 0x5a17c ; <+383>
movl -0x2c(%rbp), %ebx
addq $0x8, -0x38(%rbp)
movslq 0x8(%r13), %r15
testq %r15, %r15
jle 0x5a16f ; <+370>
decq %r15
movq %r15, %r14
shlq $0x5, %r14
movl 0x10(%r13,%r14), %r12d
testl %r12d, %r12d
je 0x5a03d ; <+64>
cmpl $0x0, -0x2c(%rbp)
je 0x5a102 ; <+261>
cmpl $0x1, %r12d
je 0x5a0a4 ; <+167>
cmpl $0x3, %r12d
je 0x5a0d1 ; <+212>
cmpl $0x2, %r12d
jne 0x5a102 ; <+261>
movq 0x28(%r13,%r14), %rax
movq -0x38(%rbp), %rcx
xorl %edx, %edx
movq -0x8(%rcx), %rsi
cmpq %rax, %rsi
ja 0x5a096 ; <+153>
addq (%rcx), %rsi
cmpq %rax, %rsi
ja 0x5a102 ; <+261>
incq %rdx
addq $0x10, %rcx
cmpq %rbx, %rdx
jb 0x5a085 ; <+136>
jmp 0x5a03d ; <+64>
movq 0x18(%r13,%r14), %rax
movq -0x38(%rbp), %rcx
xorl %edx, %edx
movq -0x8(%rcx), %rsi
cmpq %rax, %rsi
ja 0x5a0c0 ; <+195>
addq (%rcx), %rsi
cmpq %rax, %rsi
ja 0x5a102 ; <+261>
incq %rdx
addq $0x10, %rcx
cmpq %rbx, %rdx
jb 0x5a0af ; <+178>
jmp 0x5a03d ; <+64>
movq 0x18(%r13,%r14), %rax
movq 0x10(%rax), %rax
movq -0x38(%rbp), %rcx
xorl %edx, %edx
movq -0x8(%rcx), %rsi
cmpq %rax, %rsi
ja 0x5a0f1 ; <+244>
addq (%rcx), %rsi
cmpq %rax, %rsi
ja 0x5a102 ; <+261>
incq %rdx
addq $0x10, %rcx
cmpq %rbx, %rdx
jb 0x5a0e0 ; <+227>
jmp 0x5a03d ; <+64>
leaq 0x10(%r13,%r14), %rax
movl $0x0, (%rax)
movb $0x0, 0x26834b94(%rip)
leaq 0x26834c25(%rip), %rdi
callq 0x804e2 ; symbol stub for: pthread_mutex_unlock
cmpl $0x1, %r12d
je 0x5a13e ; <+321>
cmpl $0x3, %r12d
je 0x5a145 ; <+328>
cmpl $0x2, %r12d
jne 0x5a14d ; <+336>
movq 0x20(%r13,%r14), %rdi
callq *0x18(%r13,%r14)
jmp 0x5a14d ; <+336>
callq *0x18(%r13,%r14)
jmp 0x5a14d ; <+336>
movq 0x18(%r13,%r14), %rdi
callq *0x10(%rdi)
leaq 0x26834bec(%rip), %rdi
callq 0x804d6 ; symbol stub for: pthread_mutex_lock
cmpb $0x0, 0x26834b48(%rip)
je 0x5a03d ; <+64>
movq 0x26834b5b(%rip), %r13
jmp 0x5a173 ; <+374>
movq (%r13), %r13
testq %r13, %r13
jne 0x5a039 ; <+60>
leaq 0x26834bbd(%rip), %rdi
addq $0x18, %rsp
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
jmp 0x804e2 ; symbol stub for: pthread_mutex_unlock
__cxa_finalize:
testq %rdi, %rdi
je 0x5a1c5 ; <+47>
pushq %rbp
movq %rsp, %rbp
subq $0x10, %rsp
leaq -0x10(%rbp), %rax
movq %rdi, (%rax)
movq $0x1, 0x8(%rax)
movq %rax, %rdi
movl $0x1, %esi
callq 0x59ffd ; __cxa_finalize_ranges
addq $0x10, %rsp
popq %rbp
retq
xorl %edi, %edi
xorl %esi, %esi
jmp 0x59ffd ; __cxa_finalize_ranges
exit:
pushq %rbp
movq %rsp, %rbp
pushq %rbx
pushq %rax
movl %edi, %ebx
cmpl $0xad, %edi
jne 0x5a404 ; <+41>
leaq 0x2683a31e(%rip), %rcx
movq (%rcx), %rax
testq %rax, %rax
je 0x5a404 ; <+41>
xorl %eax, %eax
xchgq %rax, (%rcx)
testq %rax, %rax
jne 0x5a427 ; <+76>
xorl %eax, %eax
callq 0x8017c ; symbol stub for: _tlv_exit
xorl %edi, %edi
callq 0x5a196 ; __cxa_finalize
movq 0x268354f7(%rip), %rax
testq %rax, %rax
je 0x5a420 ; <+69>
callq *%rax
movl %ebx, %edi
callq 0x8000e ; symbol stub for: __exit
callq *%rax
ud2
_tlv_exit:
jmpq *0x2680cbd6(%rip)
pthread_getspecific:
movq %gs:(,%rdi,8), %rax
retq
Assembly output is just a dump of the executable code the compiler generated, but in a human-readable form1. This is not actually used by the compiler, it's just an artifact of the compilation process to be used for reference.
Remember, the compiled executable can be converted into assembly code at any time, tools like IDA Pro and Ghidra excel at doing this on any executable, but the compiler can add in contextual information that's lost in the final compilation phase in the form of comments or useful labels for things.
The compiler often emits debug hints for your compiled executable so it can turn a stack-trace into something that maps back to your original source code. These artifacts are much more useful as they allow you to step through C++ code instead of assembly code. If you ever have to debug in a library you don't have the source for you'll be stuck stepping through an assembly view of the executable code.
1 Presuming you can read assembly code.
The code you posted is support code from your libc runtime. The runtime is responsible for, among others:
implementing atexit hooks;
setting up your IO streams (cin, cout);
running constructors of any global static variables.
This answer has a more complete overview. You can search for articles about libc_start_main and related functions to learn more.

Expression template code not optimized fully

I have the following linear algebra function call (vector-vector addition) in C++.
int m = 4;
blasfeo_dvec one, two, three;
blasfeo_allocate_dvec(m, &one);
blasfeo_allocate_dvec(m, &two);
blasfeo_allocate_dvec(m, &three);
// initialize vectors ... (omitted)
blasfeo_daxpy(m, 1.0, &one, 0, &two, 0, &three, 0);
Using expression templates (ETs), we can wrap it as follows:
three = one + two;
where the vector struct looks like
struct blasfeo_dvec {
int m; // length
int pm; // packed length
double *pa; // pointer to a pm array of doubles, the first is aligned to cache line size
int memsize; // size of needed memory
void operator=(const vec_expression_sum<blasfeo_dvec, blasfeo_dvec> expr) {
blasfeo_daxpy(m, 1.0, (blasfeo_dvec *) &expr.vec_a, 0, (blasfeo_dvec *) &expr.vec_b, 0, this, 0);
}
};
The cast to non-const is necessary because blasfeo_daxpy takes non-const pointers. The ET code is simply
template<typename Ta, typename Tb>
struct vec_expression_sum {
const Ta vec_a;
const Tb vec_b;
vec_expression_sum(const Ta va, const Tb vb) : vec_a {va}, vec_b {vb} {}
};
template<typename Ta, typename Tb>
auto operator+(const Ta a, const Tb b) {
return vec_expression_sum<Ta, Tb>(a, b);
}
The 'native' call, i.e. blasfeo_daxpy(...) generates the following assembly:
; allocation and initialization omitted ...
movl $0, (%rsp)
movl $4, %edi
xorl %edx, %edx
xorl %r8d, %r8d
movsd LCPI0_0(%rip), %xmm0 ## xmm0 = mem[0],zero
movq %r14, %rsi
movq %rbx, %rcx
movq %r15, %r9
callq _blasfeo_daxpy
...
which is exactly what you would expect. The ET code is quite a bit longer:
; allocation :
leaq -120(%rbp), %rbx
movl $4, %edi
movq %rbx, %rsi
callq _blasfeo_allocate_dvec
leaq -96(%rbp), %r15
movl $4, %edi
movq %r15, %rsi
callq _blasfeo_allocate_dvec
leaq -192(%rbp), %r14
movl $4, %edi
movq %r14, %rsi
callq _blasfeo_allocate_dvec
; initialization code omitted
; operator+ :
movq -104(%rbp), %rax
movq %rax, -56(%rbp)
movq -120(%rbp), %rax
movq -112(%rbp), %rcx
movq %rcx, -64(%rbp)
movq %rax, -72(%rbp)
; vec_expression_sum :
movq -80(%rbp), %rax
movq %rax, -32(%rbp)
movq -96(%rbp), %rax
movq -88(%rbp), %rcx
movq %rcx, -40(%rbp)
movq %rax, -48(%rbp)
movq -32(%rbp), %rax
movq %rax, -128(%rbp)
movq -40(%rbp), %rax
movq %rax, -136(%rbp)
movq -48(%rbp), %rax
movq %rax, -144(%rbp)
movq -56(%rbp), %rax
movq %rax, -152(%rbp)
movq -72(%rbp), %rax
movq -64(%rbp), %rcx
movq %rcx, -160(%rbp)
movq %rax, -168(%rbp)
leaq -144(%rbp), %rcx
; blasfeo_daxpy :
movl -192(%rbp), %edi
movl $0, (%rsp)
leaq -168(%rbp), %rsi
xorl %edx, %edx
xorl %r8d, %r8d
movsd LCPI0_0(%rip), %xmm0 ## xmm0 = mem[0],zero
movq %r14, %r9
callq _blasfeo_daxpy
...
It involves quite a bit of copying, namely the fields of blasfeo_dvec. I (naively, maybe) hoped that the ET code would generate the exact same code as the native call, given that everything is fixed at compile time and const, but it doesn't.
The question is: why the extra loads? And is there a way of getting fully 'optimized' code? (edit: I use Apple LLVM version 8.1.0 (clang-802.0.42) with -std=c++14 -O3)
Note: I read and understood this and this post on a similar topic, but they unfortunately do not contain an answer to my question.

Vectorization of sin and cos

I was playing around with Compiler Explorer and ran into an anomaly (I think). If I want to make the compiler vectorize a sin calculation using libmvec, I would write:
#include <cmath>
#define NN 512
typedef float T;
typedef T __attribute__((aligned(NN))) AT;
inline T s(const T x)
{
return sinf(x);
}
void func(AT* __restrict x, AT* __restrict y, int length)
{
if (length & NN-1) __builtin_unreachable();
for (int i = 0; i < length; i++)
{
y[i] = s(x[i]);
}
}
compile with gcc 6.2 and -O3 -march=native -ffast-math and get
func(float*, float*, int):
testl %edx, %edx
jle .L10
leaq 8(%rsp), %r10
andq $-32, %rsp
pushq -8(%r10)
pushq %rbp
movq %rsp, %rbp
pushq %r14
xorl %r14d, %r14d
pushq %r13
leal -8(%rdx), %r13d
pushq %r12
shrl $3, %r13d
movq %rsi, %r12
pushq %r10
addl $1, %r13d
pushq %rbx
movq %rdi, %rbx
subq $8, %rsp
.L4:
vmovaps (%rbx), %ymm0
addl $1, %r14d
addq $32, %r12
addq $32, %rbx
call _ZGVcN8v_sinf // YAY! Vectorized trig!
vmovaps %ymm0, -32(%r12)
cmpl %r13d, %r14d
jb .L4
vzeroupper
addq $8, %rsp
popq %rbx
popq %r10
popq %r12
popq %r13
popq %r14
popq %rbp
leaq -8(%r10), %rsp
.L10:
ret
But when I add a cosine to the function, there is no vectorization:
#include <cmath>
#define NN 512
typedef float T;
typedef T __attribute__((aligned(NN))) AT;
inline T f(const T x)
{
return cosf(x)+sinf(x);
}
void func(AT* __restrict x, AT* __restrict y, int length)
{
if (length & NN-1) __builtin_unreachable();
for (int i = 0; i < length; i++)
{
y[i] = f(x[i]);
}
}
which gives:
func(float*, float*, int):
testl %edx, %edx
jle .L10
pushq %r12
leal -1(%rdx), %eax
pushq %rbp
leaq 4(%rdi,%rax,4), %r12
movq %rsi, %rbp
pushq %rbx
movq %rdi, %rbx
subq $16, %rsp
.L4:
vmovss (%rbx), %xmm0
leaq 8(%rsp), %rsi
addq $4, %rbx
addq $4, %rbp
leaq 12(%rsp), %rdi
call sincosf // No vectorization
vmovss 12(%rsp), %xmm0
vaddss 8(%rsp), %xmm0, %xmm0
vmovss %xmm0, -4(%rbp)
cmpq %rbx, %r12
jne .L4
addq $16, %rsp
popq %rbx
popq %rbp
popq %r12
.L10:
ret
I see two good alternatives. Either call a vectorized version of sincosf or call the vectorized sin and cos sequentially. I tried adding -fno-builtin-sincos to no avail. -fopt-info-vec-missed complains about complex float, which there is none.
Is this a known issue with gcc? Either way, is there a way I can convince gcc to vectorize the latter example?
(As an aside, is there any way to get gcc < 6 to vectorize trigonometric functions automatically?)

Prefered method of populating a std::vector of classes with its first entry

In reviewing a large software project I came a cross two ways of doing essentially the same thing, pushing an initial entry on a std::vector
consider a class like Foo
class Foo
{
public:
Foo(int param){
m_param = param;
}
setParam(int param){
m_param = param;
}
private:
int m_param;
}
Is there a preferred method between the following considering whatever applicable metrics.... speed, stability, etc.
Foo bar;
int val = 5;
bar.setParam(val);
std::vector<Foo> fooVec(1, bar);
Versus
int val = 5;
std::vector<Foo> fooVec;
fooVec.push_back(Foo(val));
Is there a preferred method between the following considering whatever applicable metrics.... speed, stability, etc.
It can be argued that Without doubt this is poor style:
auto test1()
{
Foo bar; // redundant default construction
int val = 5; // redundant load
bar.setParam(val); // only now setting the value
std::vector<Foo> fooVec(1, bar); // redundant copy
return fooVec;
}
and that this is good style:
auto test2()
{
return std::vector<Foo>(1, Foo(5));
}
What about performance, we all care about that, right?
But what does it mean in reality? once you've enabled optimisations?...
__Z5test1v: ## #_Z5test1v
.cfi_startproc
## BB#0: ## %_ZNSt3__16vectorI3FooNS_9allocatorIS1_EEEC2EmRKS1_.exit1
pushq %rbx
Ltmp0:
.cfi_def_cfa_offset 16
Ltmp1:
.cfi_offset %rbx, -16
movq %rdi, %rbx
movq $0, 16(%rbx)
movq $0, 8(%rbx)
movq $0, (%rbx)
movl $4, %edi
callq __Znwm
movq %rax, (%rbx)
leaq 4(%rax), %rcx
movq %rcx, 16(%rbx)
movl $5, (%rax)
movq %rcx, 8(%rbx)
movq %rbx, %rax
popq %rbx
retq
.cfi_endproc
.globl __Z5test2v
.align 4, 0x90
__Z5test2v: ## #_Z5test2v
.cfi_startproc
## BB#0: ## %_ZNSt3__16vectorI3FooNS_9allocatorIS1_EEEC2EmRKS1_.exit1
pushq %rbx
Ltmp2:
.cfi_def_cfa_offset 16
Ltmp3:
.cfi_offset %rbx, -16
movq %rdi, %rbx
movq $0, 16(%rbx)
movq $0, 8(%rbx)
movq $0, (%rbx)
movl $4, %edi
callq __Znwm
movq %rax, (%rbx)
leaq 4(%rax), %rcx
movq %rcx, 16(%rbx)
movl $5, (%rax)
movq %rcx, 8(%rbx)
movq %rbx, %rax
popq %rbx
retq
.cfi_endproc
Absolutely no difference whatsoever. The generated machine code is exactly the same in this case.
Unless you have a fairly specific reason to use one of these, like needing to support an older (pre-C++11) compiler, I'd just use:
std::vector<Foo> fooVec { 5 }; // or fooVec { foo(5) };, if you really prefer
This is pretty much guaranteed to be as fast, stable, etc. as any of the others (and may be a tad faster, depending...)