Why does base class destructor call free - c++

I am compiling a C++ program for a NIOS II core with very limited memory. Since it is an embedded system we are not using a heap either. Since we added inheritance to our code we saw that malloc and free (and related functions) was being build into our binary thus increasing its size a number of kilobytes.
We have one pure virtual base class, StatisticsApi and one derived class Statistics. Statistics and they both have virtual destructors defined.
We are compiling with Alteras GCC with fno-rtti and -fno_expections and are also defined our own __cxa_pure_virtual().
Looking closer into it we see that delete is called from the destructors (and thus free). Why is that? What memory is it trying to free?
Here is the assembler for the destructors:
0000c6b8 <_ZN7Namespace12StatisticsD0Ev>:
c6b8: defffd04 addi sp,sp,-12
c6bc: dfc00215 stw ra,8(sp)
c6c0: df000115 stw fp,4(sp)
c6c4: df000104 addi fp,sp,4
c6c8: e13fff15 stw r4,-4(fp)
c6cc: 00c000b4 movhi r3,2
c6d0: 18c74b04 addi r3,r3,7468
c6d4: e0bfff17 ldw r2,-4(fp)
c6d8: 10c00015 stw r3,0(r2)
c6dc: e13fff17 ldw r4,-4(fp)
c6e0: 000c7f00 call c7f0 <_ZN7Namespace15StatisticsApiD2Ev>
c6e4: 00800044 movi r2,1
c6e8: 10803fcc andi r2,r2,255
c6ec: 1005003a cmpeq r2,r2,zero
c6f0: 1000021e bne r2,zero,c6fc <_ZN7LinCtrl12TxStatisticsD0Ev+0x44>
c6f4: e13fff17 ldw r4,-4(fp)
c6f8: 000c8e00 call c8e0 <_ZdlPv>
c6fc: e037883a mov sp,fp
c700: dfc00117 ldw ra,4(sp)
c704: df000017 ldw fp,0(sp)
c708: dec00204 addi sp,sp,8
c70c: f800283a ret
0000c710 <_ZN7Namespace12StatisticsD1Ev>:
c710: defffd04 addi sp,sp,-12
c714: dfc00215 stw ra,8(sp)
c718: df000115 stw fp,4(sp)
c71c: df000104 addi fp,sp,4
c720: e13fff15 stw r4,-4(fp)
c724: 00c000b4 movhi r3,2
c728: 18c74b04 addi r3,r3,7468
c72c: e0bfff17 ldw r2,-4(fp)
c730: 10c00015 stw r3,0(r2)
c734: e13fff17 ldw r4,-4(fp)
c738: 000c7f00 call c7f0 <_ZN7Namespace15StatisticsApiD2Ev>
c73c: 0005883a mov r2,zero
c740: 10803fcc andi r2,r2,255
c744: 1005003a cmpeq r2,r2,zero
c748: 1000021e bne r2,zero,c754 <_ZN7Namespace12StatisticsD1Ev+0x44>
c74c: e13fff17 ldw r4,-4(fp)
c750: 000c8e00 call c8e0 <_ZdlPv>
c754: e037883a mov sp,fp
c758: dfc00117 ldw ra,4(sp)
c75c: df000017 ldw fp,0(sp)
c760: dec00204 addi sp,sp,8
c764: f800283a ret
0000c768 <_ZN7Namespace12StatisticsD2Ev>:
c768: defffd04 addi sp,sp,-12
c76c: dfc00215 stw ra,8(sp)
c770: df000115 stw fp,4(sp)
c774: df000104 addi fp,sp,4
c778: e13fff15 stw r4,-4(fp)
c77c: 00c000b4 movhi r3,2
c780: 18c74b04 addi r3,r3,7468
c784: e0bfff17 ldw r2,-4(fp)
c788: 10c00015 stw r3,0(r2)
c78c: e13fff17 ldw r4,-4(fp)
c790: 000c7f00 call c7f0 <_ZN7Namespace15StatisticsApiD2Ev>
c794: 0005883a mov r2,zero
c798: 10803fcc andi r2,r2,255
c79c: 1005003a cmpeq r2,r2,zero
c7a0: 1000021e bne r2,zero,c7ac <_ZN7Namespace12StatisticsD2Ev+0x44>
c7a4: e13fff17 ldw r4,-4(fp)
c7a8: 000c8e00 call c8e0 <_ZdlPv>
c7ac: e037883a mov sp,fp
c7b0: dfc00117 ldw ra,4(sp)
c7b4: df000017 ldw fp,0(sp)
c7b8: dec00204 addi sp,sp,8
c7bc: f800283a ret
And here are the destructors for the base class:
0000c7f0 <_ZN7Namespace15StatisticsApiD2Ev>:
c7f0: defffd04 addi sp,sp,-12
c7f4: dfc00215 stw ra,8(sp)
c7f8: df000115 stw fp,4(sp)
c7fc: df000104 addi fp,sp,4
c800: e13fff15 stw r4,-4(fp)
c804: 00c000b4 movhi r3,2
c808: 18c76204 addi r3,r3,7560
c80c: e0bfff17 ldw r2,-4(fp)
c810: 10c00015 stw r3,0(r2)
c814: 0005883a mov r2,zero
c818: 10803fcc andi r2,r2,255
c81c: 1005003a cmpeq r2,r2,zero
c820: 1000021e bne r2,zero,c82c <_ZN7Namespace15StatisticsApiD2Ev+0x3c>
c824: e13fff17 ldw r4,-4(fp)
c828: 000c8e00 call c8e0 <_ZdlPv>
c82c: e037883a mov sp,fp
c830: dfc00117 ldw ra,4(sp)
c834: df000017 ldw fp,0(sp)
c838: dec00204 addi sp,sp,8
c83c: f800283a ret
0000c840 <_ZN7LinCtrl15TxStatisticsApiD0Ev>:
c840: defffd04 addi sp,sp,-12
c844: dfc00215 stw ra,8(sp)
c848: df000115 stw fp,4(sp)
c84c: df000104 addi fp,sp,4
c850: e13fff15 stw r4,-4(fp)
c854: 00c000b4 movhi r3,2
c858: 18c76204 addi r3,r3,7560
c85c: e0bfff17 ldw r2,-4(fp)
c860: 10c00015 stw r3,0(r2)
c864: 00800044 movi r2,1
c868: 10803fcc andi r2,r2,255
c86c: 1005003a cmpeq r2,r2,zero
c870: 1000021e bne r2,zero,c87c <_ZN7Namespace15StatisticsApiD0Ev+0x3c>
c874: e13fff17 ldw r4,-4(fp)
c878: 000c8e00 call c8e0 <_ZdlPv>
c87c: e037883a mov sp,fp
c880: dfc00117 ldw ra,4(sp)
c884: df000017 ldw fp,0(sp)
c888: dec00204 addi sp,sp,8
c88c: f800283a ret
0000c890 <_ZN7Namespace15StatisticsApiD1Ev>:
c890: defffd04 addi sp,sp,-12
c894: dfc00215 stw ra,8(sp)
c898: df000115 stw fp,4(sp)
c89c: df000104 addi fp,sp,4
c8a0: e13fff15 stw r4,-4(fp)
c8a4: 00c000b4 movhi r3,2
c8a8: 18c76204 addi r3,r3,7560
c8ac: e0bfff17 ldw r2,-4(fp)
c8b0: 10c00015 stw r3,0(r2)
c8b4: 0005883a mov r2,zero
c8b8: 10803fcc andi r2,r2,255
c8bc: 1005003a cmpeq r2,r2,zero
c8c0: 1000021e bne r2,zero,c8cc <_ZN7Namespace15StatisticsApiD1Ev+0x3c>
c8c4: e13fff17 ldw r4,-4(fp)
c8c8: 000c8e00 call c8e0 <_ZdlPv>
c8cc: e037883a mov sp,fp
c8d0: dfc00117 ldw ra,4(sp)
c8d4: df000017 ldw fp,0(sp)
c8d8: dec00204 addi sp,sp,8
c8dc: f800283a ret
And finally delete:
0000c8e0 <_ZdlPv>:
c8e0: 20000126 beq r4,zero,c8e8 <_ZdlPv+0x8>
c8e4: 000ebf01 jmpi ebf0 <free>
c8e8: f800283a ret
And here is a link to the instruction set for NIOS II for reference: http://www.altera.com/literature/hb/nios2/n2cpu_nii51017.pdf
The two solutions/workarounds we found were:
Redefining new/delete as empty functions (no heap so they shouldn't
be called anyway!) thus avoiding the call to free.
Leaving the destructors undefined in C++ which makes them not get
instantiated in assembler either. Though the compiler will complain
that there is no virtual destructor.
Workarounds or not, why are the destructors calling free? What memory is it trying to free!?! We have other destructors that are neither for base classes nor derived classes and they don't call free. This is how such a destructor looks like:
00005194 <_ZN7Namespace16OtherClassD2Ev>:
5194: defffe04 addi sp,sp,-8
5198: df000115 stw fp,4(sp)
519c: df000104 addi fp,sp,4
51a0: e13fff15 stw r4,-4(fp)
51a4: e037883a mov sp,fp
51a8: df000017 ldw fp,0(sp)
51ac: dec00104 addi sp,sp,4
51b0: f800283a ret
000051b4 <_ZN7Namespace16OtherClassD1Ev>:
51b4: defffe04 addi sp,sp,-8
51b8: df000115 stw fp,4(sp)
51bc: df000104 addi fp,sp,4
51c0: e13fff15 stw r4,-4(fp)
51c4: e037883a mov sp,fp
51c8: df000017 ldw fp,0(sp)
51cc: dec00104 addi sp,sp,4
51d0: f800283a ret
And also, in general, why are there multiple functions for each destructor (D0, D1 and D2)?

When you call delete on an object with a virtual destructor, it looks up the function to call in the virtual table. That function calls the destructor as well as freeing the memory, since only at the derived level does it know the proper pointer to pass to the free function.
The problem is that the compiler doesn't know that you are never calling delete. It still has to create the code even though it may never be used, just like any other virtual function.
Even though, as a general rule, destructors should be virtual in classes that have other virtual functions, this may be a valid exception. Making the destructor be protected in the base class will help avoid mistakes.

Related

Generating fast assembly for complex arithmetic in g++4.4.7

I have a very simple function:
__attribute__((noinline))
void benchmark(cfloat* __restrict__ aa, cfloat* __restrict__ bb, cfloat* __restrict__ cc, cfloat* __restrict__ dd, cfloat uu, cfloat vv, size_t nn) {
for (ssize_t ii=0; ii < nn; ii++) {
dd[ii] = (
aa[ii]*uu +
bb[ii]*vv +
cc[ii]
);
}
}
That generates very different assembly with g++4.4.7 depending on how I define my cfloat object.
First iteration, if I define my cfloat thusly:
struct cfloat {
cfloat(float re, float im) : re(re), im(im) {}
float re,im;
};
cfloat operator +(cfloat a, cfloat b) {
return cfloat(a.re+b.re, a.im+b.im);
}
cfloat operator *(cfloat a, cfloat b) {
return cfloat(a.re*b.re-a.im*b.im, a.re*b.im+a.im*b.re);
}
generates this assembly for the benchmark function (compiled with g++ testcx.cc -O3 -o testcx:
0x00000000004006a0 <+0>: push %r15
0x00000000004006a2 <+2>: test %r8,%r8
0x00000000004006a5 <+5>: push %r14
0x00000000004006a7 <+7>: push %r13
0x00000000004006a9 <+9>: push %r12
0x00000000004006ab <+11>: push %rbp
0x00000000004006ac <+12>: push %rbx
0x00000000004006ad <+13>: movq %xmm0,-0x28(%rsp)
0x00000000004006b3 <+19>: mov %rdi,-0x38(%rsp)
0x00000000004006b8 <+24>: mov -0x28(%rsp),%rax
0x00000000004006bd <+29>: movq %xmm1,-0x28(%rsp)
0x00000000004006c3 <+35>: mov -0x28(%rsp),%r9
0x00000000004006c8 <+40>: je 0x4008a0 <_Z9benchmarkP6cfloatS0_S0_S0_S_S_m+512>
0x00000000004006ce <+46>: mov %r9,%r15
0x00000000004006d1 <+49>: mov %rax,%r14
0x00000000004006d4 <+52>: xor %r11d,%r11d
0x00000000004006d7 <+55>: shr $0x20,%r15
0x00000000004006db <+59>: shr $0x20,%r14
0x00000000004006df <+63>: xor %r10d,%r10d
0x00000000004006e2 <+66>: mov %r15d,-0x2c(%rsp)
0x00000000004006e7 <+71>: xor %ebp,%ebp
0x00000000004006e9 <+73>: xor %ebx,%ebx
0x00000000004006eb <+75>: movss -0x2c(%rsp),%xmm6
0x00000000004006f1 <+81>: mov %r9d,-0x2c(%rsp)
0x00000000004006f6 <+86>: movss -0x2c(%rsp),%xmm5
0x00000000004006fc <+92>: mov %r14d,-0x2c(%rsp)
0x0000000000400701 <+97>: movss -0x2c(%rsp),%xmm4
0x0000000000400707 <+103>: mov %eax,-0x2c(%rsp)
0x000000000040070b <+107>: xor %r13d,%r13d
0x000000000040070e <+110>: xor %r12d,%r12d
0x0000000000400711 <+113>: movabs $0xffffffff00000000,%r9
0x000000000040071b <+123>: movss -0x2c(%rsp),%xmm3
0x0000000000400721 <+129>: nopl 0x0(%rax)
0x0000000000400728 <+136>: lea 0x0(,%r13,8),%rax
0x0000000000400730 <+144>: movaps %xmm6,%xmm1
0x0000000000400733 <+147>: movaps %xmm5,%xmm7
0x0000000000400736 <+150>: and $0xffffffff,%ebp
0x0000000000400739 <+153>: lea (%rsi,%rax,1),%r15
0x000000000040073d <+157>: lea (%rdx,%rax,1),%r14
0x0000000000400741 <+161>: add -0x38(%rsp),%rax
0x0000000000400746 <+166>: and $0xffffffff,%ebx
0x0000000000400749 <+169>: add $0x1,%r12
0x000000000040074d <+173>: movss (%r15),%xmm0
0x0000000000400752 <+178>: movss 0x4(%r15),%xmm2
0x0000000000400758 <+184>: mulss %xmm0,%xmm1
0x000000000040075c <+188>: mulss %xmm2,%xmm7
0x0000000000400760 <+192>: mulss %xmm5,%xmm0
0x0000000000400764 <+196>: mulss %xmm6,%xmm2
0x0000000000400768 <+200>: addss %xmm7,%xmm1
0x000000000040076c <+204>: movaps %xmm3,%xmm7
0x000000000040076f <+207>: subss %xmm2,%xmm0
0x0000000000400773 <+211>: movd %xmm1,-0x30(%rsp)
0x0000000000400779 <+217>: mov -0x30(%rsp),%edi
0x000000000040077d <+221>: movaps %xmm4,%xmm1
0x0000000000400780 <+224>: movd %xmm0,-0x30(%rsp)
0x0000000000400786 <+230>: mov %edi,%r15d
0x0000000000400789 <+233>: mov -0x30(%rsp),%edi
0x000000000040078d <+237>: movss (%rax),%xmm0
0x0000000000400791 <+241>: shl $0x20,%r15
0x0000000000400795 <+245>: movss 0x4(%rax),%xmm2
0x000000000040079a <+250>: mulss %xmm0,%xmm1
0x000000000040079e <+254>: or %r15,%rbp
0x00000000004007a1 <+257>: mulss %xmm2,%xmm7
0x00000000004007a5 <+261>: mov %edi,%r15d
0x00000000004007a8 <+264>: and %r9,%rbp
0x00000000004007ab <+267>: mulss %xmm3,%xmm0
0x00000000004007af <+271>: or %r15,%rbp
0x00000000004007b2 <+274>: mulss %xmm4,%xmm2
0x00000000004007b6 <+278>: addss %xmm7,%xmm1
0x00000000004007ba <+282>: subss %xmm2,%xmm0
0x00000000004007be <+286>: movd %xmm1,-0x30(%rsp)
0x00000000004007c4 <+292>: mov -0x30(%rsp),%edi
0x00000000004007c8 <+296>: movd %xmm0,-0x30(%rsp)
0x00000000004007ce <+302>: mov %edi,%eax
0x00000000004007d0 <+304>: mov -0x30(%rsp),%edi
0x00000000004007d4 <+308>: shl $0x20,%rax
0x00000000004007d8 <+312>: or %rax,%rbx
0x00000000004007db <+315>: and %r9,%rbx
0x00000000004007de <+318>: mov %edi,%eax
0x00000000004007e0 <+320>: or %rax,%rbx
0x00000000004007e3 <+323>: mov %r10,%rax
0x00000000004007e6 <+326>: mov %rbx,%rdi
0x00000000004007e9 <+329>: and $0xffffffff,%eax
0x00000000004007ec <+332>: shr $0x20,%rdi
0x00000000004007f0 <+336>: mov %edi,-0x20(%rsp)
0x00000000004007f4 <+340>: mov %rbp,%rdi
0x00000000004007f7 <+343>: shr $0x20,%rdi
0x00000000004007fb <+347>: movss -0x20(%rsp),%xmm0
0x0000000000400801 <+353>: mov %edi,-0x10(%rsp)
0x0000000000400805 <+357>: addss -0x10(%rsp),%xmm0
0x000000000040080b <+363>: mov %ebp,-0x10(%rsp)
0x000000000040080f <+367>: movss %xmm0,-0x20(%rsp)
0x0000000000400815 <+373>: mov -0x20(%rsp),%r10d
0x000000000040081a <+378>: mov %ebx,-0x20(%rsp)
0x000000000040081e <+382>: movss -0x20(%rsp),%xmm0
0x0000000000400824 <+388>: addss -0x10(%rsp),%xmm0
0x000000000040082a <+394>: shl $0x20,%r10
0x000000000040082e <+398>: or %rax,%r10
0x0000000000400831 <+401>: and %r9,%r10
0x0000000000400834 <+404>: movss %xmm0,-0x20(%rsp)
0x000000000040083a <+410>: mov -0x20(%rsp),%eax
0x000000000040083e <+414>: or %rax,%r10
0x0000000000400841 <+417>: mov %r11,%rax
0x0000000000400844 <+420>: mov %r10,%rdi
0x0000000000400847 <+423>: and $0xffffffff,%eax
0x000000000040084a <+426>: shr $0x20,%rdi
0x000000000040084e <+430>: mov %edi,-0x20(%rsp)
0x0000000000400852 <+434>: movss -0x20(%rsp),%xmm0
0x0000000000400858 <+440>: addss 0x4(%r14),%xmm0
0x000000000040085e <+446>: movss %xmm0,-0x20(%rsp)
0x0000000000400864 <+452>: mov -0x20(%rsp),%r11d
0x0000000000400869 <+457>: mov %r10d,-0x20(%rsp)
0x000000000040086e <+462>: movss -0x20(%rsp),%xmm0
0x0000000000400874 <+468>: addss (%r14),%xmm0
0x0000000000400879 <+473>: shl $0x20,%r11
0x000000000040087d <+477>: or %rax,%r11
0x0000000000400880 <+480>: and %r9,%r11
0x0000000000400883 <+483>: movss %xmm0,-0x20(%rsp)
0x0000000000400889 <+489>: mov -0x20(%rsp),%eax
0x000000000040088d <+493>: or %rax,%r11
0x0000000000400890 <+496>: cmp %r8,%r12
0x0000000000400893 <+499>: mov %r11,(%rcx,%r13,8)
0x0000000000400897 <+503>: mov %r12,%r13
0x000000000040089a <+506>: jne 0x400728 <_Z9benchmarkP6cfloatS0_S0_S0_S_S_m+136>
0x00000000004008a0 <+512>: pop %rbx
0x00000000004008a1 <+513>: pop %rbp
0x00000000004008a2 <+514>: pop %r12
0x00000000004008a4 <+516>: pop %r13
0x00000000004008a6 <+518>: pop %r14
0x00000000004008a8 <+520>: pop %r15
0x00000000004008aa <+522>: retq
Which is about 133 instructions.
If I define the cfloat like this, with an array as the state:
struct cfloat {
cfloat(float re, float im) { ri[0] = re; ri[1] = im; }
float ri[2];
};
cfloat operator +(cfloat a, cfloat b) {
return cfloat(a.ri[0]+b.ri[0], a.ri[1]+b.ri[1]);
}
cfloat operator *(cfloat a, cfloat b) {
return cfloat(a.ri[0]*b.ri[0]-a.ri[1]*b.ri[1], a.ri[0]*b.ri[1]+a.ri[1]*b.ri[0]);
}
It generates this assembly:
Dump of assembler code for function _Z9benchmarkP6cfloatS0_S0_S0_S_S_m:
0x00000000004006a0 <+0>: push %rbx
0x00000000004006a1 <+1>: movq %xmm0,-0x8(%rsp)
0x00000000004006a7 <+7>: mov -0x8(%rsp),%r9
0x00000000004006ac <+12>: movq %xmm1,-0x8(%rsp)
0x00000000004006b2 <+18>: mov -0x8(%rsp),%rax
0x00000000004006b7 <+23>: mov %r9d,-0xc(%rsp)
0x00000000004006bc <+28>: shr $0x20,%r9
0x00000000004006c0 <+32>: movss -0xc(%rsp),%xmm9
0x00000000004006c7 <+39>: mov %r9d,-0xc(%rsp)
0x00000000004006cc <+44>: movss -0xc(%rsp),%xmm8
0x00000000004006d3 <+51>: mov %eax,-0xc(%rsp)
0x00000000004006d7 <+55>: shr $0x20,%rax
0x00000000004006db <+59>: movss -0xc(%rsp),%xmm7
0x00000000004006e1 <+65>: test %r8,%r8
0x00000000004006e4 <+68>: mov %eax,-0xc(%rsp)
0x00000000004006e8 <+72>: movss -0xc(%rsp),%xmm6
0x00000000004006ee <+78>: je 0x400796 <_Z9benchmarkP6cfloatS0_S0_S0_S_S_m+246>
0x00000000004006f4 <+84>: xor %eax,%eax
0x00000000004006f6 <+86>: xor %r9d,%r9d
0x00000000004006f9 <+89>: nopl 0x0(%rax)
0x0000000000400700 <+96>: shl $0x3,%rax
0x0000000000400704 <+100>: movaps %xmm7,%xmm0
0x0000000000400707 <+103>: lea (%rsi,%rax,1),%rbx
0x000000000040070b <+107>: movaps %xmm6,%xmm3
0x000000000040070e <+110>: lea (%rcx,%rax,1),%r10
0x0000000000400712 <+114>: lea (%rdx,%rax,1),%r11
0x0000000000400716 <+118>: lea (%rdi,%rax,1),%rax
0x000000000040071a <+122>: movss (%rbx),%xmm1
0x000000000040071e <+126>: add $0x1,%r9
0x0000000000400722 <+130>: movss 0x4(%rbx),%xmm5
0x0000000000400727 <+135>: mulss %xmm1,%xmm0
0x000000000040072b <+139>: mulss %xmm5,%xmm3
0x000000000040072f <+143>: movss (%rax),%xmm2
0x0000000000400733 <+147>: movaps %xmm8,%xmm10
0x0000000000400737 <+151>: mulss %xmm6,%xmm1
0x000000000040073b <+155>: movss 0x4(%rax),%xmm4
0x0000000000400740 <+160>: mulss %xmm7,%xmm5
0x0000000000400744 <+164>: mulss %xmm4,%xmm10
0x0000000000400749 <+169>: cmp %r8,%r9
0x000000000040074c <+172>: mov %r9,%rax
0x000000000040074f <+175>: subss %xmm3,%xmm0
0x0000000000400753 <+179>: movaps %xmm2,%xmm3
0x0000000000400756 <+182>: mulss %xmm9,%xmm4
0x000000000040075b <+187>: mulss %xmm9,%xmm3
0x0000000000400760 <+192>: addss %xmm5,%xmm1
0x0000000000400764 <+196>: mulss %xmm8,%xmm2
0x0000000000400769 <+201>: subss %xmm10,%xmm3
0x000000000040076e <+206>: addss %xmm4,%xmm2
0x0000000000400772 <+210>: addss %xmm3,%xmm0
0x0000000000400776 <+214>: addss %xmm2,%xmm1
0x000000000040077a <+218>: addss (%r11),%xmm0
0x000000000040077f <+223>: addss 0x4(%r11),%xmm1
0x0000000000400785 <+229>: movss %xmm0,(%r10)
0x000000000040078a <+234>: movss %xmm1,0x4(%r10)
0x0000000000400790 <+240>: jne 0x400700 <_Z9benchmarkP6cfloatS0_S0_S0_S_S_m+96>
0x0000000000400796 <+246>: pop %rbx
0x0000000000400797 <+247>: retq
End of assembler dump.
Which is about 59 instructions. And, my benchmarks show, the first iteration is about 3x slower than the second.
I would prefer the separate real/imaginary fields, not least because having them as an array seems to break the vectorizer in Intel's compiler for some reason.
Is there any way I can convince gcc that these two classes are equivalent?
So I don't believe this, but if I specify an explicit copy constructor, the problem resolves itself:
struct cfloat {
cfloat(float re, float im) : re(re), im(im) {}
cfloat(const cfloat& o) : re(o.re), im(o.im) {}
float re,im;
};
Now generates the same assembly:
Dump of assembler code for function benchmark(cfloat*, cfloat*, cfloat*, cfloat*, cfloat, cfloat, unsigned long):
0x0000000000400600 <+0>: mov 0x8(%rsp),%r10
0x0000000000400605 <+5>: test %r10,%r10
0x0000000000400608 <+8>: je 0x4006aa <benchmark(cfloat*, cfloat*, cfloat*, cfloat*, cfloat, cfloat, unsigned long)+170>
0x000000000040060e <+14>: xor %eax,%eax
0x0000000000400610 <+16>: movss (%r9),%xmm8
0x0000000000400615 <+21>: movss 0x4(%r9),%xmm9
0x000000000040061b <+27>: movaps %xmm8,%xmm0
0x000000000040061f <+31>: movaps %xmm9,%xmm3
0x0000000000400623 <+35>: movss (%rsi,%rax,8),%xmm1
0x0000000000400628 <+40>: movss 0x4(%rsi,%rax,8),%xmm7
0x000000000040062e <+46>: mulss %xmm1,%xmm0
0x0000000000400632 <+50>: mulss %xmm7,%xmm3
0x0000000000400636 <+54>: movss (%r8),%xmm5
0x000000000040063b <+59>: movss 0x4(%r8),%xmm6
0x0000000000400641 <+65>: mulss %xmm9,%xmm1
0x0000000000400646 <+70>: movaps %xmm6,%xmm10
0x000000000040064a <+74>: mulss %xmm8,%xmm7
0x000000000040064f <+79>: movss (%rdi,%rax,8),%xmm2
0x0000000000400654 <+84>: subss %xmm3,%xmm0
0x0000000000400658 <+88>: movaps %xmm5,%xmm3
0x000000000040065b <+91>: movss 0x4(%rdi,%rax,8),%xmm4
0x0000000000400661 <+97>: mulss %xmm2,%xmm3
0x0000000000400665 <+101>: addss %xmm7,%xmm1
0x0000000000400669 <+105>: mulss %xmm4,%xmm10
0x000000000040066e <+110>: mulss %xmm6,%xmm2
0x0000000000400672 <+114>: mulss %xmm5,%xmm4
0x0000000000400676 <+118>: subss %xmm10,%xmm3
0x000000000040067b <+123>: addss %xmm4,%xmm2
0x000000000040067f <+127>: addss %xmm3,%xmm0
0x0000000000400683 <+131>: addss %xmm2,%xmm1
0x0000000000400687 <+135>: addss (%rdx,%rax,8),%xmm0
0x000000000040068c <+140>: addss 0x4(%rdx,%rax,8),%xmm1
0x0000000000400692 <+146>: movss %xmm0,(%rcx,%rax,8)
0x0000000000400697 <+151>: movss %xmm1,0x4(%rcx,%rax,8)
0x000000000040069d <+157>: add $0x1,%rax
0x00000000004006a1 <+161>: cmp %rax,%r10
0x00000000004006a4 <+164>: ja 0x400610 <benchmark(cfloat*, cfloat*, cfloat*, cfloat*, cfloat, cfloat, unsigned long)+16>
0x00000000004006aa <+170>: repz retq
End of assembler dump.
Find me that in the spec.
You mentioned that you target Red Hat Enterprise Linux, and (in your deleted post) that newer compiler versions generate better code. You could use Developer Toolset to get a newer compiler, creating applications which are compatible with the rest of the operating system:
https://www.softwarecollections.org/en/scls/rhscl/devtoolset-6/
https://developers.redhat.com/products/developertoolset/overview/

Performance difference with custom iterator

I was trying to implement an iterator over the pixels of a 3D image. The image is stored in a single vector, and I do index mapping to go from (x, y, z) to the vector index. The idea is to be able to do a range-based for on the image.
Of course, iterating over the pixels by using the vector iterators is really fast (0.6s in my case), but I don't have access to the indexes.
A triple for loop (z, y, x to be in the memory order) is as fast on VS2015, but way slower (2.4s) with gcc or clang (gcc 5.2, clang 3.9 as well as older versions).
I wrote an iterator that returns a tuple (x, y, z, value), and it's also around 2.4s, even with VS2015. The iterator contains a reference to the image, and the three field x, y, and z, and the value of the pixel is obtained by using the same pixel_at(x, y, z) as the triple loop.
So, I have 2 questions:
Why is VS2015 so much faster than GCC and Clang? (answered in edit)
How can I achieve the faster time while still having access to x, y, z?
Here's the increment of the x/y/z iterator:
_x++;
if (_x == _im.width())
{
_x = 0;
_y++;
if (_y == _im.height())
{
_y = 0;
_z++;
}
}
The loops that are measured are the following:
Triple for-loop:
for (int k = 0; k < im.depth(); ++k)
for (int j = 0; j < im.height(); ++j)
for (int i = 0; i < im.width(); ++i)
sum += im.pixel_at(i, j, k);
Iterate over the vector:
for (unsigned char c : im.pixels())
sum += c;
Iterate with the custom iterator:
for (const auto& c : im.indexes())
sum += std::get<3>(c);
The iterator implemented with a single index gives me the good performance (0.6s), but then I don't have access to x, y, z, and computing them on the fly is too slow (6s). Having x, y, z and the index in the iterator doesn't help either (2.4s).
The benchmark is 100 passes on the image, while summing the values of the pixels, randomly initialized. For the cases where I have the indexes too, I made sure that the value of the indexes are read (for instance, the sum of the z coordinates) so that the compiler doesn't optimize them. It doesn't change anything, except for the case where I compute the indexes on the fly, where the computation was optimized away.
The whole code can be found here: https://github.com/nitnelave/3DIterator
Thanks!
EDIT: After enabling link-time optimization (-flto), the triple for loop is as fast (0.19s) on g++ as the vector iterators (0.18s), but the custom iterator is still 6 times slower (1.2s). That's already much better than the 3.2s I was getting for both the for loop and the iterator, but we're not quite there yet.
EDIT: I isolated the loops inside functions which I forbade gcc to inline, to isolate the assembly code, and here it is:
Triple for loop:
push %r15
push %r14
push %r13
push %r12
push %rbp
push %rdi
push %rsi
push %rbx
sub $0x78,%rsp
mov 0x8(%rcx),%eax
pxor %xmm4,%xmm4
mov %rcx,%r12
movl $0x64,0x5c(%rsp)
xor %r13d,%r13d
xor %r14d,%r14d
mov %eax,0x58(%rsp)
mov 0x58(%rsp),%edx
test %edx,%edx
jle 10040163a <_Z17bench_triple_loopRK7Image3D.constprop.5+0x2fa>
mov 0x4(%r12),%eax
movl $0x0,0x54(%rsp)
movl $0x0,0x2c(%rsp)
mov %eax,0x48(%rsp)
nopl 0x0(%rax,%rax,1)
nopw %cs:0x0(%rax,%rax,1)
mov 0x48(%rsp),%eax
test %eax,%eax
jle 10040161f <_Z17bench_triple_loopRK7Image3D.constprop.5+0x2df>
movslq (%r12),%rax
mov 0x54(%rsp),%r10d
mov 0x2c(%rsp),%ebx
mov %rax,%rcx
mov %rax,0x40(%rsp)
add %rax,%rax
mov %rax,0x38(%rsp)
lea -0x1(%rcx),%eax
imul %ecx,%r10d
imul %eax,%ebx
mov %eax,0x50(%rsp)
movslq %r10d,%rsi
lea (%rsi,%rsi,1),%r9
mov %ebx,0x4c(%rsp)
xor %ebx,%ebx
xchg %ax,%ax
nopw %cs:0x0(%rax,%rax,1)
test %ecx,%ecx
jle 100401605 <_Z17bench_triple_loopRK7Image3D.constprop.5+0x2c5>
mov 0x10(%r12),%rdx
lea (%rdx,%r9,1),%r8
mov %r8,%rax
and $0xf,%eax
shr %rax
neg %rax
and $0x7,%eax
cmp %ecx,%eax
cmova %ecx,%eax
cmp $0xa,%ecx
cmovbe %ecx,%eax
test %eax,%eax
je 100401690 <_Z17bench_triple_loopRK7Image3D.constprop.5+0x350>
movswl (%r8),%r8d
add %r8d,%r14d
cmp $0x1,%eax
je 100401720 <_Z17bench_triple_loopRK7Image3D.constprop.5+0x3e0>
movswl 0x2(%rdx,%r9,1),%r8d
add %r8d,%r14d
cmp $0x2,%eax
je 100401710 <_Z17bench_triple_loopRK7Image3D.constprop.5+0x3d0>
movswl 0x4(%rdx,%r9,1),%r8d
add %r8d,%r14d
cmp $0x3,%eax
je 100401700 <_Z17bench_triple_loopRK7Image3D.constprop.5+0x3c0>
movswl 0x6(%rdx,%r9,1),%r8d
add %r8d,%r14d
cmp $0x4,%eax
je 1004016f0 <_Z17bench_triple_loopRK7Image3D.constprop.5+0x3b0>
movswl 0x8(%rdx,%r9,1),%r8d
add %r8d,%r14d
cmp $0x5,%eax
je 1004016e0 <_Z17bench_triple_loopRK7Image3D.constprop.5+0x3a0>
movswl 0xa(%rdx,%r9,1),%r8d
add %r8d,%r14d
cmp $0x6,%eax
je 1004016d0 <_Z17bench_triple_loopRK7Image3D.constprop.5+0x390>
movswl 0xc(%rdx,%r9,1),%r8d
add %r8d,%r14d
cmp $0x7,%eax
je 1004016c0 <_Z17bench_triple_loopRK7Image3D.constprop.5+0x380>
movswl 0xe(%rdx,%r9,1),%r8d
add %r8d,%r14d
cmp $0x8,%eax
je 1004016b0 <_Z17bench_triple_loopRK7Image3D.constprop.5+0x370>
movswl 0x10(%rdx,%r9,1),%r8d
add %r8d,%r14d
cmp $0xa,%eax
jne 1004016a0 <_Z17bench_triple_loopRK7Image3D.constprop.5+0x360>
movswl 0x12(%rdx,%r9,1),%r8d
add %r8d,%r14d
mov $0xa,%r8d
cmp %ecx,%eax
je 1004015fb <_Z17bench_triple_loopRK7Image3D.constprop.5+0x2bb>
mov %eax,%r15d
mov %ecx,%edi
sub %eax,%edi
mov %r15,0x30(%rsp)
mov 0x50(%rsp),%r15d
lea -0x8(%rdi),%r11d
sub %eax,%r15d
shr $0x3,%r11d
add $0x1,%r11d
cmp $0x6,%r15d
lea 0x0(,%r11,8),%ebp
jbe 100401573 <_Z17bench_triple_loopRK7Image3D.constprop.5+0x233>
mov 0x30(%rsp),%r15
pxor %xmm0,%xmm0
xor %eax,%eax
add %rsi,%r15
lea (%rdx,%r15,2),%r15
movdqa (%r15),%xmm1
movdqa %xmm4,%xmm3
add $0x1,%eax
add $0x10,%r15
pcmpgtw %xmm1,%xmm3
movdqa %xmm1,%xmm2
cmp %eax,%r11d
punpcklwd %xmm3,%xmm2
punpckhwd %xmm3,%xmm1
paddd %xmm2,%xmm0
paddd %xmm1,%xmm0
ja 10040151a <_Z17bench_triple_loopRK7Image3D.constprop.5+0x1da>
movdqa %xmm0,%xmm1
add %ebp,%r8d
psrldq $0x8,%xmm1
paddd %xmm1,%xmm0
movdqa %xmm0,%xmm1
psrldq $0x4,%xmm1
paddd %xmm1,%xmm0
movd %xmm0,%eax
add %eax,%r14d
cmp %ebp,%edi
je 1004015fb <_Z17bench_triple_loopRK7Image3D.constprop.5+0x2bb
lea (%r10,%r8,1),%eax
cltq
movswl (%rdx,%rax,2),%eax
add %eax,%r14d
lea 0x1(%r8),%eax
cmp %eax,%ecx
jle 1004015fb <_Z17bench_triple_loopRK7Image3D.constprop.5+0x2bb
add %r10d,%eax
cltq
movswl (%rdx,%rax,2),%eax
add %eax,%r14d
lea 0x2(%r8),%eax
cmp %eax,%ecx
jle 1004015fb <_Z17bench_triple_loopRK7Image3D.constprop.5+0x2bb
add %r10d,%eax
cltq
movswl (%rdx,%rax,2),%eax
add %eax,%r14d
lea 0x3(%r8),%eax
cmp %eax,%ecx
jle 1004015fb <_Z17bench_triple_loopRK7Image3D.constprop.5+0x2bb
add %r10d,%eax
cltq
movswl (%rdx,%rax,2),%eax
add %eax,%r14d
lea 0x4(%r8),%eax
cmp %eax,%ecx
jle 1004015fb <_Z17bench_triple_loopRK7Image3D.constprop.5+0x2bb
add %r10d,%eax
cltq
movswl (%rdx,%rax,2),%eax
add %eax,%r14d
lea 0x5(%r8),%eax
cmp %eax,%ecx
jle 1004015fb <_Z17bench_triple_loopRK7Image3D.constprop.5+0x2bb
add %r10d,%eax
add $0x6,%r8d
cltq
movswl (%rdx,%rax,2),%eax
add %eax,%r14d
cmp %r8d,%ecx
jle 1004015fb <_Z17bench_triple_loopRK7Image3D.constprop.5+0x2bb
add %r10d,%r8d
movslq %r8d,%r8
movswl (%rdx,%r8,2),%eax
add %eax,%r14d
add 0x2c(%rsp),%r13d
add 0x4c(%rsp),%r13d
add $0x1,%ebx
add 0x38(%rsp),%r9
add 0x40(%rsp),%rsi
add %ecx,%r10d
cmp %ebx,0x48(%rsp)
jne 1004013f0 <_Z17bench_triple_loopRK7Image3D.constprop.5+0xb0>
addl $0x1,0x2c(%rsp)
mov 0x48(%rsp),%ebx
mov 0x2c(%rsp),%eax
add %ebx,0x54(%rsp)
cmp 0x58(%rsp),%eax
jne 1004013a0 <_Z17bench_triple_loopRK7Image3D.constprop.5+0x60>
subl $0x1,0x5c(%rsp)
jne 10040136c <_Z17bench_triple_loopRK7Image3D.constprop.5+0x2c>
mov 0x2a64(%rip),%rcx # 1004040b0 <__fu0__ZSt4cout>
mov %r13d,%eax
mov %r14d,%r13d
mov %eax,%edx
callq 100401828 <_ZNSolsEi>
lea 0x6f(%rsp),%rdx
mov $0x1,%r8d
mov %rax,%rcx
movb $0xa,0x6f(%rsp)
callq 100401800 <_ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l>
mov %r13d,%eax
add $0x78,%rsp
pop %rbx
pop %rsi
pop %rdi
pop %rbp
pop %r12
pop %r13
pop %r14
pop %r15
retq
nop
nopw %cs:0x0(%rax,%rax,1)
xor %r8d,%r8d
jmpq 1004014da <_Z17bench_triple_loopRK7Image3D.constprop.5+0x19a>
nopl 0x0(%rax,%rax,1)
mov $0x9,%r8d
jmpq 1004014d2 <_Z17bench_triple_loopRK7Image3D.constprop.5+0x192>
nopl 0x0(%rax,%rax,1)
mov $0x8,%r8d
jmpq 1004014d2 <_Z17bench_triple_loopRK7Image3D.constprop.5+0x192>
nopl 0x0(%rax,%rax,1)
mov $0x7,%r8d
jmpq 1004014d2 <_Z17bench_triple_loopRK7Image3D.constprop.5+0x192>
nopl 0x0(%rax,%rax,1)
mov $0x6,%r8d
jmpq 1004014d2 <_Z17bench_triple_loopRK7Image3D.constprop.5+0x192>
nopl 0x0(%rax,%rax,1)
mov $0x5,%r8d
jmpq 1004014d2 <_Z17bench_triple_loopRK7Image3D.constprop.5+0x192>
nopl 0x0(%rax,%rax,1)
mov $0x4,%r8d
jmpq 1004014d2 <_Z17bench_triple_loopRK7Image3D.constprop.5+0x192>
nopl 0x0(%rax,%rax,1)
mov $0x3,%r8d
jmpq 1004014d2 <_Z17bench_triple_loopRK7Image3D.constprop.5+0x192>
nopl 0x0(%rax,%rax,1)
mov $0x2,%r8d
jmpq 1004014d2 <_Z17bench_triple_loopRK7Image3D.constprop.5+0x192>
nopl 0x0(%rax,%rax,1)
mov $0x1,%r8d
jmpq 1004014d2 <_Z17bench_triple_loopRK7Image3D.constprop.5+0x192>
nopl 0x0(%rax,%rax,1)
Iteration over the vector:
push %r14
push %r13
push %r12
push %rbp
push %rdi
push %rsi
push %rbx
mov 0x10(%rcx),%r8
mov 0x18(%rcx),%r10
mov $0x64,%ebx
pxor %xmm4,%xmm4
lea 0x2(%r8),%r12
mov %r10,%rdi
mov %r8,%rbp
and $0xf,%ebp
sub %r12,%rdi
shr %rbp
shr %rdi
neg %rbp
lea 0x1(%rdi),%r11
and $0x7,%ebp
cmp %r11,%rbp
cmova %r11,%rbp
xor %eax,%eax
xchg %ax,%ax
nopw %cs:0x0(%rax,%rax,1)
cmp %r8,%r10
je 1004012dc <_Z10bench_iterRK7Image3D.constprop.4+0x1fc>
cmp $0xa,%r11
mov %r11,%rcx
ja 1004012f0 <_Z10bench_iterRK7Image3D.constprop.4+0x210>
movswl (%r8),%edx
add %edx,%eax
cmp $0x1,%rcx
je 100401300 <_Z10bench_iterRK7Image3D.constprop.4+0x220>
movswl 0x2(%r8),%edx
add %edx,%eax
cmp $0x2,%rcx
lea 0x4(%r8),%rdx
je 1004011ed <_Z10bench_iterRK7Image3D.constprop.4+0x10d>
movswl 0x4(%r8),%edx
add %edx,%eax
cmp $0x3,%rcx
lea 0x6(%r8),%rdx
je 1004011ed <_Z10bench_iterRK7Image3D.constprop.4+0x10d>
movswl 0x6(%r8),%edx
add %edx,%eax
cmp $0x4,%rcx
lea 0x8(%r8),%rdx
je 1004011ed <_Z10bench_iterRK7Image3D.constprop.4+0x10d>
movswl 0x8(%r8),%edx
add %edx,%eax
cmp $0x5,%rcx
lea 0xa(%r8),%rdx
je 1004011ed <_Z10bench_iterRK7Image3D.constprop.4+0x10d>
movswl 0xa(%r8),%edx
add %edx,%eax
cmp $0x6,%rcx
lea 0xc(%r8),%rdx
je 1004011ed <_Z10bench_iterRK7Image3D.constprop.4+0x10d>
movswl 0xc(%r8),%edx
add %edx,%eax
cmp $0x7,%rcx
lea 0xe(%r8),%rdx
je 1004011ed <_Z10bench_iterRK7Image3D.constprop.4+0x10d>
movswl 0xe(%r8),%edx
add %edx,%eax
cmp $0x8,%rcx
lea 0x10(%r8),%rdx
je 1004011ed <_Z10bench_iterRK7Image3D.constprop.4+0x10d>
movswl 0x10(%r8),%edx
add %edx,%eax
cmp $0xa,%rcx
lea 0x12(%r8),%rdx
jne 1004011ed <_Z10bench_iterRK7Image3D.constprop.4+0x10d>
movswl 0x12(%r8),%edx
add %edx,%eax
lea 0x14(%r8),%rdx
cmp %rcx,%r11
je 1004012dc <_Z10bench_iterRK7Image3D.constprop.4+0x1fc>
mov %r11,%rsi
mov %rdi,%r14
sub %rcx,%rsi
sub %rcx,%r14
lea -0x8(%rsi),%r9
shr $0x3,%r9
add $0x1,%r9
cmp $0x6,%r14
lea 0x0(,%r9,8),%r13
jbe 10040127d <_Z10bench_iterRK7Image3D.constprop.4+0x19d>
pxor %xmm0,%xmm0
lea (%r8,%rcx,2),%r14
xor %ecx,%ecx
movdqa (%r14),%xmm1
add $0x1,%rcx
movdqa %xmm4,%xmm2
add $0x10,%r14
movdqa %xmm1,%xmm3
cmp %rcx,%r9
pcmpgtw %xmm1,%xmm2
punpcklwd %xmm2,%xmm3
punpckhwd %xmm2,%xmm1
paddd %xmm3,%xmm0
paddd %xmm1,%xmm0
ja 100401226 <_Z10bench_iterRK7Image3D.constprop.4+0x146>
movdqa %xmm0,%xmm1
lea (%rdx,%r13,2),%rdx
psrldq $0x8,%xmm1
paddd %xmm1,%xmm0
movdqa %xmm0,%xmm1
psrldq $0x4,%xmm1
paddd %xmm1,%xmm0
movd %xmm0,%ecx
add %ecx,%eax
cmp %r13,%rsi
je 1004012dc <_Z10bench_iterRK7Image3D.constprop.4+0x1fc>
movswl (%rdx),%ecx
add %ecx,%eax
lea 0x2(%rdx),%rcx
cmp %rcx,%r10
je 1004012dc <_Z10bench_iterRK7Image3D.constprop.4+0x1fc>
movswl 0x2(%rdx),%ecx
add %ecx,%eax
lea 0x4(%rdx),%rcx
cmp %rcx,%r10
je 1004012dc <_Z10bench_iterRK7Image3D.constprop.4+0x1fc>
movswl 0x4(%rdx),%ecx
add %ecx,%eax
lea 0x6(%rdx),%rcx
cmp %rcx,%r10
je 1004012dc <_Z10bench_iterRK7Image3D.constprop.4+0x1fc>
movswl 0x6(%rdx),%ecx
add %ecx,%eax
lea 0x8(%rdx),%rcx
cmp %rcx,%r10
je 1004012dc <_Z10bench_iterRK7Image3D.constprop.4+0x1fc>
movswl 0x8(%rdx),%ecx
add %ecx,%eax
lea 0xa(%rdx),%rcx
cmp %rcx,%r10
je 1004012dc <_Z10bench_iterRK7Image3D.constprop.4+0x1fc>
movswl 0xa(%rdx),%ecx
add %ecx,%eax
lea 0xc(%rdx),%rcx
cmp %rcx,%r10
je 1004012dc <_Z10bench_iterRK7Image3D.constprop.4+0x1fc>
movswl 0xc(%rdx),%edx
add %edx,%eax
sub $0x1,%ebx
jne 100401130 <_Z10bench_iterRK7Image3D.constprop.4+0x50>
pop %rbx
pop %rsi
pop %rdi
pop %rbp
pop %r12
pop %r13
pop %r14
retq
test %rbp,%rbp
jne 100401308 <_Z10bench_iterRK7Image3D.constprop.4+0x228>
xor %ecx,%ecx
mov %r8,%rdx
jmpq 1004011f6 <_Z10bench_iterRK7Image3D.constprop.4+0x116>
nop
mov %r12,%rdx
jmpq 1004011ed <_Z10bench_iterRK7Image3D.constprop.4+0x10d>
mov %rbp,%rcx
jmpq 100401146 <_Z10bench_iterRK7Image3D.constprop.4+0x66>
Custom iterator:
push %r14
push %rbp
push %rdi
push %rsi
push %rbx
sub $0x30,%rsp
mov (%rcx),%r10d
mov $0x64,%ebp
xor %ebx,%ebx
mov %rcx,%rdi
mov 0x4(%rcx),%ecx
xor %edx,%edx
mov 0x8(%rdi),%esi
nop
xor %r9d,%r9d
xor %r11d,%r11d
xor %r8d,%r8d
nopl 0x0(%rax)
cmp %r9d,%esi
je 1004017b0 <_Z16bench_index_iterRK7Image3D.constprop.0+0x80>
mov %ecx,%eax
mov 0x10(%rdi),%r14
add %r9d,%edx
imul %r9d,%eax
add %r11d,%eax
imul %r10d,%eax
add %r8d,%eax
add $0x1,%r8d
cltq
movswl (%r14,%rax,2),%eax
add %eax,%ebx
cmp %r10d,%r8d
jne 100401760 <_Z16bench_index_iterRK7Image3D.constprop.0+0x30>
add $0x1,%r11d
xor %r8d,%r8d
cmp %r11d,%ecx
jne 100401760 <_Z16bench_index_iterRK7Image3D.constprop.0+0x30>
add $0x1,%r9d
xor %r11d,%r11d
cmp %r9d,%esi
jne 100401765 <_Z16bench_index_iterRK7Image3D.constprop.0+0x35>
nopw %cs:0x0(%rax,%rax,1)
test %r11d,%r11d
jne 100401765 <_Z16bench_index_iterRK7Image3D.constprop.0+0x35>
test %r8d,%r8d
jne 100401765 <_Z16bench_index_iterRK7Image3D.constprop.0+0x35>
sub $0x1,%ebp
jne 100401750 <_Z16bench_index_iterRK7Image3D.constprop.0+0x20>
mov 0x28ea(%rip),%rcx # 1004040b0 <__fu0__ZSt4cout>
callq 100401828 <_ZNSolsEi>
lea 0x2f(%rsp),%rdx
mov $0x1,%r8d
mov %rax,%rcx
movb $0xa,0x2f(%rsp)
callq 100401800 <_ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l>
mov %ebx,%eax
add $0x30,%rsp
pop %rbx
pop %rsi
pop %rdi
pop %rbp
pop %r14
retq
So it seems that there is loop unrolling for the first 2 benches, but not for the custom iterator.
EDIT: I also tried using the boost::asio::coroutines, but it's slower than my solution.
Why is it slower
As you show nicely in the disassembly, the compiler generates vastly different code for the different loops. The main disadvantage of the generated machine code for the custom iterator is, that it does not utilize SIMD instructions.
Compiler optimizations are incredibly complex. You could spend days just looking at the intermediate optimization steps. Also the performance varies between different compilers (and compiler versions). In my tests clang was the only compiler that generated fast SIMD code for your iterator.
You already have one thing perfectly right: Measure. If performance of that part matters for you, you have to measure it.
How to speed it up
The basic idea is to help the compiler with the loop. For instance the end condition. The compiler needs to be able to prove that it doesn't change. You can try to locally keep a const copy the width/height of the image. Unfortunately I haven't been able to achieve significant speedup on your example code.
Another issue is the end iterator. You need to check three conditions in each iteration, even if only one (z) matters. That bring us to alternative loop end conditions such as sentinels.
range-v3
With range-v3 one could potentially implement more efficient and elegant loops. I made a quick attempt at your code:
#pragma once
#include "Image3D.h"
#include <range/v3/all.hpp>
template <typename Image>
class range_3d : public ranges::view_facade<range_3d<Image>>
{
using data_t = typename Image::data_t;
friend ranges::range_access;
int x_ = 0;
int y_ = 0;
int z_ = 0;
// Note: This cannot be a reference, because ranges needs a default constructor
// If it doesn't get one, the compiler errors will haunt you in your dreams.
Image* im_;
data_t const& get() const
{
return im_->pixel_at(x_, y_, z_);
}
bool done() const
{
return z_ == im_->depth();
}
void next()
{
x_++;
if (x_ == im_->width())
{
x_ = 0;
y_++;
if (y_ == im_->height())
{
y_ = 0;
z_++;
}
}
}
public:
range_3d() = default;
explicit range_3d(Image& im)
: im_(&im)
{
}
};
// use like:
range_3d<Image3D> r(im);
ranges::for_each(r, [&sum](unsigned char c) {
sum += c;
});
Unfortunately it still doesn't provide perfect performance, but at least the code is much nicer (as long as you don't get a compiler error).
Context matters
You have to keep in mind, that the performance of the iterators may be much different when you actually use of x/y/z. If your compiler cannot do a sum up the vector optimization, the iterator will probably show more similar performance.
On-demand x/y/z computation
I still think you might want to consider not keeping the separate loop variables. Maybe you could even optimize for images with power-of-two sizes, or block the loop somehow, so that you can compute x/y/z very efficiently from the index using bit operations.
Zero-overhead abstractions
One of the properties i like most about C++ is that it allows you to create nice abstractions with absolutely no runtime overhead. It pains me that I / the compilers fail in this case. I feel this answer lacks a happy-end, so I'd encourage everyone to attempt this as well.

Can I disas a specific statement in gdb?

(gdb) n
253 conf.log = log;
Like above,the next statement is conf.log = log;,how can I just disas that?
I tried simply disas,but gdb will disassembly all the current function(I don't need so much)...
(gdb) disas
Dump of assembler code for function ngx_init_cycle:
0x0000000000417c7c <ngx_init_cycle+0>: push %rbp
0x0000000000417c7d <ngx_init_cycle+1>: mov %rsp,%rbp
0x0000000000417c80 <ngx_init_cycle+4>: push %rbx
0x0000000000417c81 <ngx_init_cycle+5>: sub $0x258,%rsp
0x0000000000417c88 <ngx_init_cycle+12>: mov %rdi,-0x228(%rbp)
0x0000000000417c8f <ngx_init_cycle+19>: callq 0x42b2fc <ngx_timezone_update>
0x0000000000417c94 <ngx_init_cycle+24>: mov 0x2b00e5(%rip),%rax # 0x6c7d80 <ngx_cached_time>
0x0000000000417c9b <ngx_init_cycle+31>: mov %rax,-0x88(%rbp)
0x0000000000417ca2 <ngx_init_cycle+38>: mov -0x88(%rbp),%rax
0x0000000000417ca9 <ngx_init_cycle+45>: movq $0x0,(%rax)
0x0000000000417cb0 <ngx_init_cycle+52>: callq 0x4149e7 <ngx_time_update>
0x0000000000417cb5 <ngx_init_cycle+57>: mov -0x228(%rbp),%rax
0x0000000000417cbc <ngx_init_cycle+64>: mov 0x10(%rax),%rax
0x0000000000417cc0 <ngx_init_cycle+68>: mov %rax,-0x90(%rbp)
0x0000000000417cc7 <ngx_init_cycle+75>: mov -0x90(%rbp),%rsi
0x0000000000417cce <ngx_init_cycle+82>: mov $0x4000,%edi
0x0000000000417cd3 <ngx_init_cycle+87>: callq 0x405c6c <ngx_create_pool>
0x0000000000417cd8 <ngx_init_cycle+92>: mov %rax,-0x80(%rbp)
0x0000000000417cdc <ngx_init_cycle+96>: cmpq $0x0,-0x80(%rbp)
---Type <return> to continue, or q <return> to quit---q
UPDATE
(gdb) info line 98
Line 98 of "src/os/unix/ngx_process_cycle.c" starts at address 0x42f6f3 <ngx_master_process_cycle+31>
and ends at 0x42f704 <ngx_master_process_cycle+48>.
(gdb) disas 0x42f6f3,0x42f704
Dump of assembler code for function ngx_master_process_cycle:
0x000000000042f6d4 <ngx_master_process_cycle+0>: push %rbp
0x000000000042f6d5 <ngx_master_process_cycle+1>: mov %rsp,%rbp
0x000000000042f6d8 <ngx_master_process_cycle+4>: push %rbx
0x000000000042f6d9 <ngx_master_process_cycle+5>: sub $0x128,%rsp
0x000000000042f6e0 <ngx_master_process_cycle+12>: mov %rdi,-0x108(%rbp)
0x000000000042f6e7 <ngx_master_process_cycle+19>: lea -0xe0(%rbp),%rdi
0x000000000042f6ee <ngx_master_process_cycle+26>: callq 0x402988 <sigemptyset#plt>
0x000000000042f6f3 <ngx_master_process_cycle+31>: lea -0xe0(%rbp),%rdi
0x000000000042f6fa <ngx_master_process_cycle+38>: mov $0x11,%esi
0x000000000042f6ff <ngx_master_process_cycle+43>: callq 0x402878 <sigaddset#plt>
0x000000000042f704 <ngx_master_process_cycle+48>: lea -0xe0(%rbp),%rdi
0x000000000042f70b <ngx_master_process_cycle+55>: mov $0xe,%esi
0x000000000042f710 <ngx_master_process_cycle+60>: callq 0x402878 <sigaddset#plt>
0x000000000042f715 <ngx_master_process_cycle+65>: lea -0xe0(%rbp),%rdi
0x000000000042f71c <ngx_master_process_cycle+72>: mov $0x1d,%esi
0x000000000042f721 <ngx_master_process_cycle+77>: callq 0x402878 <sigaddset#plt>
0x000000000042f726 <ngx_master_process_cycle+82>: lea -0xe0(%rbp),%rdi
0x000000000042f72d <ngx_master_process_cycle+89>: mov $0x2,%esi
0x000000000042f732 <ngx_master_process_cycle+94>: callq 0x402878 <sigaddset#plt>
---Type <return> to continue, or q <return> to quit---
try something to the effect of:
(gdb) info line 12
Line 12 of "test.c" starts at address 0x4004f4 <main+24>
and ends at 0x4004fe <main+34>.
(gdb) disas 0x4004f4,0x4004fe
Dump of assembler code from 0x4004f4 to 0x4004fe:
0x00000000004004f4 <main+24>: mov $0x0,%eax
0x00000000004004f9 <main+29>: callq 0x4004d0 <bp3>
End of assembler dump.
Or:
(gdb) disas main+24,main+34
Dump of assembler code from 0x4004f4 to 0x4004fe:
0x00000000004004f4 <main+24>: mov $0x0,%eax
0x00000000004004f9 <main+29>: callq 0x4004d0 <bp3>
End of assembler dump.
not sure of a more automatic way offhand.

How do I make GCC instantiate a class instance with non-trivial const/dest?

I am implementing a profiler. I want to use the Constructor/Destructor idiom to keep track of when I enter/exit a function.
A rough outline of my code is as follows:
class Profile
{
Profile(void); //Start timing
~Profile(void); //Stop timer and log
};
//...
Game::Game(void) : m_Quit(false)
{
Profile p();
InitalizeModules();
//...
}
However, when I run it, the Constructor and destructor are not being called. Even when I disassemble, there are no references to Profile::Profile(). I understood that the standard specifies that an instance with a non-trivial constructor cannot be optimized out by the compiler.
There are no optimization flags on the command line of either the compiler or the linker.
I also tried specifying attribute((used)), but to no avail.
Here is the disassembly:
(gdb) disassemble Ztk::Game::Game
Dump of assembler code for function Ztk::Game::Game():
0x00000000004cd798 <+0>: push %rbp
0x00000000004cd799 <+1>: mov %rsp,%rbp
0x00000000004cd79c <+4>: push %r12
0x00000000004cd79e <+6>: push %rbx
0x00000000004cd79f <+7>: sub $0x30,%rsp
0x00000000004cd7a3 <+11>: mov %rdi,-0x38(%rbp)
0x00000000004cd7a7 <+15>: mov -0x38(%rbp),%rax
0x00000000004cd7ab <+19>: mov %rax,%rdi
0x00000000004cd7ae <+22>: callq 0x4cdc6a <Ztk::Highlander<Ztk::Game, int>::Highlander()>
/** CALL SHOULD BE HERE **/
0x00000000004cd7b3 <+27>: mov -0x38(%rbp),%rax
0x00000000004cd7b7 <+31>: movb $0x0,(%rax)
0x00000000004cd7ba <+34>: callq 0x4e59f0 <Ztk::InitializeModules()>
Indeed there is code generated and linked into the executable
(gdb) disassemble Ztk::Profile::Profile(void)
Dump of assembler code for function Ztk::Profile::Profile():
0x0000000000536668 <+0>: push %rbp
0x0000000000536669 <+1>: mov %rsp,%rbp
0x000000000053666c <+4>: sub $0x20,%rsp
0x0000000000536670 <+8>: mov %rdi,-0x18(%rbp)
0x0000000000536674 <+12>: mov 0x8(%rbp),%rax
0x0000000000536678 <+16>: mov %rax,-0x8(%rbp)
0x000000000053667c <+20>: mov -0x8(%rbp),%rax
0x0000000000536680 <+24>: mov %rax,%rsi
0x0000000000536683 <+27>: mov $0x802440,%edi
0x0000000000536688 <+32>: callq 0x5363ca <Ztk::Profiler::FindNode(void*)>
Profile p();
What you've done here is declared a function, called p, that returns an object of type Profile. What you want is this:
Profile p;

How to make gdb show the original non-mangling function name on disassembly model?

void outputString(const char *str) {
cout << "outputString(const char *str) : " << str << endl;
}
turns out to be
Dump of assembler code for function _Z12outputStringPKc:
0x004013ee <_Z12outputStringPKc+0>: push ebp
0x004013ef <_Z12outputStringPKc+1>: mov ebp,esp
0x004013f1 <_Z12outputStringPKc+3>: sub esp,0x8
0x004013f4 <_Z12outputStringPKc+6>: mov DWORD PTR [esp+4],0x443000
0x004013fc <_Z12outputStringPKc+14>: mov DWORD PTR [esp],0x4463c0
0x00401403 <_Z12outputStringPKc+21>: call 0x43f6e8 <_ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc>
0x00401408 <_Z12outputStringPKc+26>: mov edx,DWORD PTR [ebp+8]
0x0040140b <_Z12outputStringPKc+29>: mov DWORD PTR [esp+4],edx
0x0040140f <_Z12outputStringPKc+33>: mov DWORD PTR [esp],eax
0x00401412 <_Z12outputStringPKc+36>: call 0x43f6e8 <_ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc>
0x00401417 <_Z12outputStringPKc+41>: mov DWORD PTR [esp+4],0x43e4c8
0x0040141f <_Z12outputStringPKc+49>: mov DWORD PTR [esp],eax
0x00401422 <_Z12outputStringPKc+52>: call 0x42e170 <_ZNSolsEPFRSoS_E>
0x00401427 <_Z12outputStringPKc+57>: leave
0x00401428 <_Z12outputStringPKc+58>: ret
End of assembler dump.
All the disassemblies show only the manglinged function names,but its not eaiser for programmer to de-mangling and get the original function names with the bother to typing info symbol address for each mangling name met,so are there any methods that could make gdb show non-mangling function names on assembly model?
You could do maint demangle _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc at the (gdb) prompt.
The manual says:
`set print asm-demangle'
`set print asm-demangle on'
Print C++ names in their source form rather than their mangled
form, even in assembler code printouts such as instruction
disassemblies. The default is off.
Unfortunately, it doesn't appear to work:
(gdb) set print asm-demangle on
(gdb) disas
Dump of assembler code for function _Z12outputStringPKc:
0x00000000004009c4 <outputString(char const*)+0>: push %rbp
0x00000000004009c5 <outputString(char const*)+1>: mov %rsp,%rbp
0x00000000004009c8 <outputString(char const*)+4>: sub $0x10,%rsp
0x00000000004009cc <outputString(char const*)+8>: mov %rdi,-0x8(%rbp)
0x00000000004009d0 <outputString(char const*)+12>: mov $0x400bb0,%esi
0x00000000004009d5 <outputString(char const*)+17>: mov $0x6012a0,%edi
0x00000000004009da <outputString(char const*)+22>: callq 0x400798 <_ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc#plt>
0x00000000004009df <outputString(char const*)+27>: mov %rax,%rdi
0x00000000004009e2 <outputString(char const*)+30>: mov -0x8(%rbp),%rsi
0x00000000004009e6 <outputString(char const*)+34>: callq 0x400798 <_ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc#plt>
0x00000000004009eb <outputString(char const*)+39>: mov %rax,%rdi
0x00000000004009ee <outputString(char const*)+42>: mov $0x4007c8,%esi
0x00000000004009f3 <outputString(char const*)+47>: callq 0x4007b8 <_ZNSolsEPFRSoS_E#plt>
0x00000000004009f8 <outputString(char const*)+52>: leaveq
0x00000000004009f9 <outputString(char const*)+53>: retq
End of assembler dump.
The setting changed how the current function is printed, but not how the functions it calls are printed (which is what I assume you are after).
I think that is a bug in GDB, please file a bug in bugzilla.
Update:
The bug has been fixed in 2013. With GDB-10.0 the output is:
(gdb) disas 0x555555555169
Dump of assembler code for function _Z12outputStringPKc:
0x0000555555555169 <+0>: push %rbp
0x000055555555516a <+1>: mov %rsp,%rbp
0x000055555555516d <+4>: sub $0x10,%rsp
0x0000555555555171 <+8>: mov %rdi,-0x8(%rbp)
0x0000555555555175 <+12>: lea 0xe8c(%rip),%rax # 0x555555556008
0x000055555555517c <+19>: mov %rax,%rsi
0x000055555555517f <+22>: lea 0x2efa(%rip),%rax # 0x555555558080 <std::cout#GLIBCXX_3.4>
0x0000555555555186 <+29>: mov %rax,%rdi
0x0000555555555189 <+32>: callq 0x555555555040 <std::basic_ostream<char, std::char_traits<char> >& std::operator<< <std::char_traits<char> >(std::basic_ostream<char, std::char_traits<char> >&, char const*)#plt>
0x000055555555518e <+37>: mov %rax,%rdx
0x0000555555555191 <+40>: mov -0x8(%rbp),%rax
0x0000555555555195 <+44>: mov %rax,%rsi
0x0000555555555198 <+47>: mov %rdx,%rdi
0x000055555555519b <+50>: callq 0x555555555040 <std::basic_ostream<char, std::char_traits<char> >& std::operator<< <std::char_traits<char> >(std::basic_ostream<char, std::char_traits<char> >&, char const*)#plt>
0x00005555555551a0 <+55>: mov 0x2e29(%rip),%rdx # 0x555555557fd0
0x00005555555551a7 <+62>: mov %rdx,%rsi
0x00005555555551aa <+65>: mov %rax,%rdi
0x00005555555551ad <+68>: callq 0x555555555050 <std::ostream::operator<<(std::ostream& (*)(std::ostream&))#plt>
0x00005555555551b2 <+73>: nop
0x00005555555551b3 <+74>: leaveq
0x00005555555551b4 <+75>: retq
I don't remember ever finding an automatic way for gdb to do it. I always just copied and pasted the symbol and ran it through the Linux c++filt utility to demangle.
gdb show the original non-mangling function name on disassembly model ::
you have to do this steps every time whenever you are going to debug.
1. set print demangle on
2. set print asm-demangle on
Else you can create vim ~/.gdbinit file like ~/.vimrc file and set following steps so you no need to do every time on .
1 set print pretty on
2 set print demangle on
3 set print asm-demangle on