Windows Threading C++ - Access Violation - c++

I am new to threading for Windows and would appreciate any and all suggestions. I have created a small program to demonstrate the access violation I am getting.
Here is test.h:
#ifndef TEST_H
#define TEST_H
using namespace std;
#include <windows.h>
#include <iostream>
#include <iomanip>
#include <fstream>
#include <sstream>
#include <string>
#include <vector>
/**************************************************************************************************/
template<typename T>
string toString(const T&x){
stringstream output;
output << x;
return output.str();
}
/**************************************************************************************************/
//custom data structure for threads to use.
// This is passed by void pointer so it can be any data type
// that can be passed using a single void pointer (LPVOID).
struct tempData {
int threadID;
vector<string> filenames;
tempData(){}
tempData(vector<string> f, int tid) {
filenames = f;
threadID = tid;
}
};
/**************************************************************************************************/
static DWORD WINAPI tempThreadFunction(LPVOID lpParam){
tempData* pDataArray;
pDataArray = (tempData*)lpParam;
string fileName = pDataArray->filenames[pDataArray->threadID];
ifstream fileHandle(fileName.c_str());
string output = toString(pDataArray->threadID);
ofstream out(output.c_str());
string name;
int currentNum, num;
vector<string> nameVector;
vector<float> data;
float currentData;
int index = 0;
fileHandle >> num;
while(!fileHandle.eof()){
fileHandle >> name >> currentNum;
nameVector.push_back(name);
for(int i=0;i<num;i++){
fileHandle >> currentData;
data.push_back(currentData);
}
//grab extra white space
char d;
while(isspace(d=fileHandle.get())) { ;}
if(!fileHandle.eof()) { fileHandle.putback(d); }
index++;
cout << "Thread " << pDataArray->threadID << '\t' << index << endl;
out << name << '\t' << "Thread " << pDataArray->threadID << '\t' << index << endl;
}
fileHandle.close();
out.close();
cout << "Thread " << pDataArray->threadID << " read " << nameVector.size() << " lines." << endl;
}
#endif
And here is test.cpp
#include "test.h"
/**************************************************************************************************/
int main(int argc, char *argv[]){
string fileName1 = argv[1];
string fileName2 = argv[2];
vector<string> fileNames; fileNames.push_back(fileName1); fileNames.push_back(fileName2);
vector<tempData*> pDataArray;
DWORD dwThreadIdArray[2];
HANDLE hThreadArray[2];
//Create processor worker threads.
for( int i=0; i<2; i++ ){
// Allocate memory for thread data.
tempData* tempThread = new tempData(fileNames, i);
pDataArray.push_back(tempThread);
hThreadArray[i] = CreateThread(NULL, 0, tempThreadFunction, pDataArray[i], 0, &dwThreadIdArray[i]);
}
//Wait until all threads have terminated.
WaitForMultipleObjects(2, hThreadArray, TRUE, INFINITE);
//Close all thread handles and free memory allocations.
for(int i=0; i < pDataArray.size(); i++){
CloseHandle(hThreadArray[i]);
delete pDataArray[i];
}
return 0;
}
/**************************************************************************************************/
The files the threads are reading look like:
450
F5MMO9001C96XU 450 1.03 0.02 1.00 0.03 0.05 1.02 0.03 1.04 0.05 0.04 2.06 1.05 2.01 0.05 0.98 0.03 0.08 1.05 1.01 0.02 0.05 1.03 0.04 0.04 2.05 1.07 2.04 1.01 0.06 0.05 0.96 2.02 0.06 0.04 0.99 0.06 1.00 0.03 0.06 1.04 0.08 0.01 1.07 0.06 1.02 0.03 0.05 2.00 0.07 0.04 1.00 0.11 0.06 1.01 1.02 1.02 1.03 1.06 0.04 1.04 1.94 1.02 0.06 1.00 0.12 0.06 2.01 1.96 0.94 0.08 0.10 0.96 0.12 0.05 1.01 0.05 2.04 1.11 0.08 0.04 2.00 0.06 1.02 0.04 1.99 0.05 1.03 0.09 0.14 0.98 0.10 1.99 1.02 1.06 2.11 1.00 0.96 0.10 1.00 0.08 0.11 1.08 0.07 0.06 1.03 0.10 0.04 1.01 0.12 1.11 0.09 0.99 0.98 0.12 3.06 0.15 0.12 1.03 0.17 2.00 1.01 0.98 0.06 0.16 2.00 1.00 0.08 1.06 0.19 0.13 2.10 0.13 0.08 1.00 0.19 0.99 0.16 2.00 2.19 0.12 3.96 0.17 0.99 0.05 2.06 0.06 3.03 0.08 1.02 0.06 0.11 1.02 0.17 1.01 1.06 0.15 0.08 3.92 0.14 1.01 0.13 0.12 1.05 2.04 3.04 1.02 0.98 0.08 0.10 2.02 3.19 1.00 0.11 1.98 0.14 1.94 0.14 0.07 2.04 0.08 2.05 0.06 0.98 0.08 1.99 0.04 2.93 1.07 0.11 0.05 1.04 0.17 0.09 0.97 1.05 0.99 0.08 0.11 1.02 1.98 0.07 0.06 1.05 0.06 0.09 1.03 0.17 0.11 1.05 0.14 0.09 2.09 0.19 0.06 1.02 0.13 1.03 0.06 0.15 2.07 0.19 0.98 0.08 0.06 1.06 0.16 1.09 0.14 0.16 1.00 0.17 2.07 0.13 0.13 1.01 0.08 2.04 0.05 0.18 1.03 0.05 0.02 0.99 1.01 0.09 0.07 2.98 0.07 0.13 1.01 0.04 0.10 1.99 0.15 0.15 1.05 1.01 0.01 2.09 0.16 0.13 4.02 0.19 0.06 2.03 0.10 3.97 0.08 0.09 1.01 1.01 0.08 1.03 0.16 0.09 1.03 0.12 0.05 1.02 0.07 1.04 0.04 0.15 1.01 0.13 0.04 1.91 0.10 1.06 0.08 2.99 1.01 1.01 1.00 0.04 1.93 0.13 0.90 0.16 1.01 0.98 0.04 1.14 0.16 1.06 0.05 0.13 3.00 0.12 0.05 2.10 0.99 0.99 0.03 0.09 1.00 1.01 0.04 0.99 0.04 1.02 0.08 1.02 0.14 0.11 0.98 0.20 1.15 1.06 0.06 3.08 0.08 0.09 0.97 0.00 0.97 1.04 0.15 0.12 0.89 0.94 0.05 0.12 2.04 0.14 0.04 1.15 0.11 1.06 0.04 0.08 2.10 1.05 0.03 1.01 0.98 1.04 0.03 2.00 0.03 1.01 0.03 0.91 0.10 1.04 0.08 1.04 0.14 0.03 0.98 0.15 1.13 0.12 0.92 2.14 0.09 0.11 0.96 0.07 1.04 0.13 0.03 1.02 0.05 1.12 1.06 1.00 0.13 0.04 0.88 0.01 1.10 0.14 0.88 0.14 0.10 1.10 0.00 1.14 1.01 1.02 0.06 0.95 1.86 0.07 0.04 1.01 0.04 1.93 0.04 0.08 2.05 1.10 0.10 0.11 0.91 0.11 1.00 0.08 1.09 0.07 0.10 2.14 0.10 3.19 1.07 2.10 0.11 1.02 0.13 0.93 0.09 0.13 0.90 2.17 0.09 0.19 2.09 1.10 0.09 1.13 0.91 2.03 0.08 1.01 2.09 0.19 0.07 1.03 0.10
F5MMO9001DCOF4 450 0.98 0.02 1.03 0.02 0.04 1.04 0.02 1.02 0.03 0.05 2.15 1.04 2.01 0.00 0.93 0.07 0.06 1.01 0.99 0.03 0.05 1.02 0.05 0.02 2.06 1.10 2.02 0.98 0.09 0.06 1.05 2.03 0.08 0.05 1.01 0.10 1.03 0.03 0.09 1.00 0.07 0.01 1.02 0.07 0.98 0.03 0.05 1.98 0.10 0.01 1.02 0.10 0.05 1.03 1.09 1.02 1.02 1.04 0.06 0.99 1.98 0.98 0.07 1.00 0.12 0.04 2.09 1.03 1.00 0.00 0.17 2.02 0.11 0.03 0.96 0.13 2.02 0.04 2.11 0.05 1.03 0.00 1.11 1.07 2.92 1.02 1.02 0.08 0.93 1.03 2.02 0.99 1.01 0.08 1.05 0.09 0.13 1.00 0.11 0.01 2.00 0.11 0.06 1.03 0.18 0.05 1.04 0.07 0.05 1.99 0.11 0.01 0.99 0.16 0.05 1.04 0.11 0.05 1.04 0.13 0.07 1.02 0.11 0.06 2.17 0.10 0.03 1.04 2.07 0.03 0.99 0.13 0.09 0.99 1.02 0.00 0.04 0.94 1.04 0.01 0.06 1.05 1.01 0.02 1.10 0.11 0.11 1.01 0.12 0.03 1.03 0.11 0.09 1.01 1.03 1.06 2.02 0.09 0.99 1.06 1.03 0.03 1.03 0.12 0.17 0.88 0.16 0.02 1.11 2.86 1.07 0.03 0.15 2.10 1.01 0.02 0.04 0.91 0.15 0.99 0.03 1.01 0.06 1.07 0.09 0.16 1.05 0.13 3.03 1.00 1.07 0.05 0.16 0.99 0.13 0.98 0.08 0.90 2.01 1.05 0.08 2.74 0.20 0.16 1.01 0.20 2.07 0.04 2.05 0.11 1.08 0.03 0.16 1.05 0.10 0.02 0.97 0.08 0.99 0.04 0.19 1.02 1.03 0.03 1.08 0.10 1.04 0.05 0.16 1.06 1.01 0.99 0.06 0.15 1.02 1.92 0.13 0.06 1.02 1.02 2.06 0.04 0.09 1.09 0.15 0.01 0.98 0.08 1.06 0.01 2.06 1.02 1.01 0.04 1.08 0.12 0.09 0.90 0.11 0.99 0.17 1.03 1.14 0.08 2.84 0.04 0.86 0.94 1.37 0.08 2.05 0.19 0.16 0.94 0.35 0.11 2.00 0.20 0.18 0.93 0.41 0.15 0.96 2.03 0.16 1.75 0.19 1.45 0.14 1.27 0.04 0.17 2.11 0.23 3.92 0.13 0.32 1.02 2.03 0.07 1.05 0.27 0.30 1.06 0.29 0.08 0.99 0.24 1.04 0.02 0.31 1.03 0.24 0.05 1.93 0.21 0.98 0.09 3.70 1.02 1.44 1.03 0.84 2.42 0.24 1.23 0.09 1.49 2.89 0.24 0.21 3.26 0.93 0.10 2.19 1.98 1.00 0.03 0.45 1.27 1.30 0.02 0.83 0.26 1.17 0.05 1.19 0.12 0.23 0.85 0.20 1.00 0.98 0.15 2.58 0.21 0.27 1.72 0.90 0.16 0.88 0.38 0.01 1.08 1.20 0.12 0.16 2.01 0.24 0.03 1.88 1.39 1.83 0.06 1.36 0.21 0.39 0.87 0.19 0.12 0.84 0.19 1.69 0.09 1.13 0.09 1.42 0.09 1.24 0.09 1.11 0.09 0.21 0.81 0.20 0.93 0.16 1.06 1.70 2.08 0.15 0.16 1.42 0.43 1.06 0.86 1.20 0.12 1.22 0.20 0.25 0.98 0.23 0.82 0.19 0.25 1.01 0.18 1.05 0.11 0.26 0.95 0.22 0.11 1.08 0.19 1.05 1.03 0.21 0.08 2.14 0.21 1.84 0.07 0.40 1.79 1.35 0.90 0.17 1.35 1.12 0.15 1.84 1.23 2.19 0.86 1.35 0.26 0.34 1.00
F5MMO9001CUZ4G 450 1.04 0.01 1.02 0.03 0.04 1.00 0.02 1.01 0.04 0.08 2.06 1.02 1.97 0.03 0.99 0.05 0.07 1.07 1.03 0.02 0.06 1.03 0.05 0.02 1.99 1.04 2.06 0.99 0.09 0.05 1.01 1.98 0.08 0.06 1.00 0.06 1.03 0.05 0.05 1.02 0.11 0.04 1.03 0.06 1.04 0.03 0.06 2.04 0.09 0.05 0.98 0.08 0.06 1.03 1.02 1.03 0.98 1.05 0.07 1.01 1.95 1.05 0.05 1.00 0.11 0.05 2.03 1.96 1.02 0.01 0.11 1.03 0.12 0.02 0.98 0.07 1.97 0.03 1.02 0.04 3.03 1.01 3.02 0.05 0.17 1.01 0.19 0.06 2.00 1.05 2.07 1.03 1.01 0.10 1.04 0.09 0.12 1.03 1.04 0.04 1.01 0.12 1.03 0.05 0.09 1.02 1.00 1.01 0.09 0.12 1.06 0.12 2.01 0.01 0.99 1.05 1.03 0.06 1.05 0.10 0.12 1.02 1.03 0.06 0.05 1.00 0.11 2.00 0.07 0.14 0.98 1.05 0.07 3.04 0.13 1.05 0.12 0.07 1.03 2.03 3.07 1.02 0.99 0.16 0.05 1.98 3.08 0.96 0.08 1.97 0.10 1.96 0.08 0.10 1.98 1.03 1.04 0.07 1.03 0.13 0.16 1.03 0.20 0.07 1.01 0.14 3.08 0.97 0.14 0.05 1.09 0.15 0.06 1.02 1.00 1.01 0.06 0.12 1.02 1.99 0.11 0.03 1.01 0.98 2.02 0.02 0.18 1.06 0.14 0.02 1.03 0.15 1.00 0.03 0.15 1.02 0.15 0.04 1.04 0.13 0.09 0.99 0.16 0.06 1.03 0.15 1.05 0.10 0.16 1.01 0.18 1.99 0.14 0.09 1.05 0.09 1.99 0.04 2.05 1.03 0.10 0.05 3.14 0.15 0.14 1.01 0.11 0.07 2.01 0.12 0.09 0.96 1.00 0.03 0.09 1.02 0.19 0.08 1.03 0.15 0.12 2.14 0.18 0.05 1.02 1.06 0.18 0.04 2.00 0.09 4.08 0.05 0.13 0.98 1.08 0.09 1.03 0.14 0.10 1.00 0.12 0.02 1.01 0.09 1.03 0.04 0.15 0.99 0.12 0.03 2.06 0.10 1.09 0.08 3.21 1.03 1.01 0.99 0.09 2.01 0.15 0.93 0.13 1.02 0.95 0.13 1.02 0.17 1.06 0.05 0.16 3.12 0.12 0.08 2.07 1.06 1.08 1.02 0.09 0.07 0.93 0.13 1.01 0.07 0.98 0.07 1.02 0.11 0.12 0.99 0.21 1.09 1.08 0.10 3.03 0.06 0.12 1.99 0.04 0.12 1.00 0.03 0.11 1.05 1.00 0.07 0.16 1.96 0.12 0.04 2.16 1.98 1.04 0.07 0.90 0.04 0.15 1.09 3.08 0.10 1.04 0.15 0.99 0.08 1.05 0.08 1.07 0.17 0.07 1.01 0.18 2.06 0.13 0.13 2.12 1.97 0.14 0.09 0.91 0.10 1.07 1.09 3.06 1.08 0.98 0.17 0.91 0.09 0.08 3.09 0.11 1.08 0.19 0.00 2.04 0.16 2.05 0.17 0.06 2.07 0.96 2.05 0.09 0.98 0.09 0.06 2.37 0.03 0.16 1.11 0.95 0.09 1.13 0.93 4.07 0.08 0.07 0.95 1.99 0.09 0.12 1.97 1.12 0.11 0.10 2.06 0.18 0.94 0.13 0.09 1.07 0.09 1.03 0.14 0.11 0.98 0.15 1.04 0.15 0.10 1.04 2.06 0.12 1.00 0.07 0.13 2.06 0.94 0.11 0.16 1.03 0.90 0.13 1.03 0.21 1.03 1.09 0.13 2.06 0.06 0.12 1.01 0.10 0.12 1.03 0.06 4.01 0.13 0.06 1.99
...
I don't mind sending the full files if you think it would be helpful. I suspect it's a simple error having to do with an assumption I am making about threads, but I can't seem to spot it. Thanks for taking the time to look at this issue. I really appreciate it!

I tried your program in VC 6 and I was getting access violation when I create ofstream objects. The following link helped me to solve that access violation.
http://www.gamedev.net/topic/73037-ofstream-access-violations-when-multi-threading/
Since you haven't provided more information as where you are getting the violation, I can only give this as a hint.

Related

Why doesn't the reg exp that works in regex101 dot com, does NOT work with grep nor with vim in Linux

I have an ascii file in the format below:
gc_ab_cd 92641.48 25.2 5.12 9.20 0.00 gc_ht_t_gc_ab_cd
gc_ab_cd/reg 29.24 0.0 0.49 0.01 0.00 gc_ht_t_CHECK1_0
gc_ab_cd/reg/dff_in_gated 17.13 0.0 6.00 11.13 0.00 gc_ht_t_dff_en_in_WIDTH84_0
gc_ab_cd/reg/dff_in_send_gated 0.20 0.0 0.00 0.20 0.00 gc_ht_t_dff_in_WIDTH1_33
gc_ab_cd/reg/rd_rtn 11.42 0.0 4.20 7.22 0.00 gc_ht_t_gfx_2toN_WIDTH32_1
gc_ab_cd/regs 18583.88 5.1 2958.87 25.01 0.00 gc_ht_t_gc_ab_cd_regs
gc_ab_cd/tap_ch 431.51 0.1 144.83 150.05 0.00 gc_ht_t_gc_vm2_qe128
gc_ab_cd/tap_ch/throttle 136.63 0.0 77.33 59.30 0.00 gc_ht_t_gc_vm2__throttle
gc_ab_cd/vm2_dbg 22.79 0.0 0.00 0.00 0.00 gc_ht_t_gfx_dbg_mux_01
gc_ab_cd/vm2_dbg/bg_mux 22.79 0.0 9.90 4.80 0.00 gc_ht_t_gc_dbg_mux_4_1_01
gc_ab_cd/vm2_dbg/bg_mux/clk 0.20 0.0 0.00 0.20 0.00 gc_ht_t_clock
gc_ab_cd/vm2_dbg/bg__mux/flop_mux_flop 5.33 0.0 2.63 2.70 0.00 gc_ht_t_dbg_COUNT4_WIDTH8_0
I need to grep 0 or 1 level of the hierarchy of the first field in the above text, so that the output of the "grep" should print the below in the stdout
gc_ab_cd 92641.48 25.2 5.12 9.20 0.00 gc_ht_t_gc_ab_cd
gc_ab_cd/reg 29.24 0.0 0.49 0.01 0.00 gc_ht_t_CHECK1_0
gc_ab_cd/regs 18583.88 5.1 2958.87 25.01 0.00 gc_ht_t_gc_ab_cd_regs
gc_ab_cd/tap_ch 431.51 0.1 144.83 150.05 0.00 gc_ht_t_gc_vm2_qe128
gc_ab_cd/vm2_dbg 22.79 0.0 0.00 0.00 0.00 gc_ht_t_gfx_dbg_mux_01
I used the regexp https://regex101.com/r/D92KSP/1
But it gives only 3 matches below (1 level of hierarchy in the first field), as can be seen in https://regex101.com/r/D92KSP/1
gc_ab_cd/reg 29.24 0.0 0.49 0.01 0.00 gc_ht_t_CHECK1_0
gc_ab_cd/regs 18583.88 5.1 2958.87 25.01 0.00 gc_ht_t_gc_ab_cd_regs
gc_ab_cd/tap_ch 431.51 0.1 144.83 150.05 0.00 gc_ht_t_gc_vm2_qe128
Questions:
[1] I'm NOT sure why the below line (0 hierarchy in the first field) is NOT being matched by the regexp in https://regex101.com/r/D92KSP/1
gc_ab_cd 92641.48 25.2 5.12 9.20 0.00 gc_ht_t_gc_ab_cd
[2] What should I do to modify the regexp https://regex101.com/r/D92KSP/1 to match the line below
gc_ab_cd/vm2_dbg 22.79 0.0 0.00 0.00 0.00 gc_ht_t_gfx_dbg_mux_01
[3] I used the above regexp with "grep" and in the vim editor in Linux and it doesn't work there, though it works partially in regexp101.com. Don't know why?
regex101 and other such web sites will help you create/validate a regexp that works on that web site, don't assume it'll work anywhere else, especially the mandatory POSIX command-line tools like sed, grep, and awk as each tool uses specific regexp variants (BRE, ERE, and/or PCRE) with different arguments (e.g. -E to enable EREs in grep and sed, -P to enable PCREs in grep with some caveats), extensions (e.g. word boundaries, shortcuts, or back references), and limitations (e.g. delimiter chars). You have to learn which regexp variant with which extensions and limitations the version (e.g. GNU or BSD) of the tool you want to use supports.
In any case, any time you're talking about fields you should be using awk, not grep (or sed) since awk is the tool that separates input into fields. The following will work using any awk in any shell on every Unix box:
$ awk '$1 ~ "^[^/]*/?[^/]*$"' file
gc_ab_cd 92641.48 25.2 5.12 9.20 0.00 gc_ht_t_gc_ab_cd
gc_ab_cd/reg 29.24 0.0 0.49 0.01 0.00 gc_ht_t_CHECK1_0
gc_ab_cd/regs 18583.88 5.1 2958.87 25.01 0.00 gc_ht_t_gc_ab_cd_regs
gc_ab_cd/tap_ch 431.51 0.1 144.83 150.05 0.00 gc_ht_t_gc_vm2_qe128
gc_ab_cd/vm2_dbg 22.79 0.0 0.00 0.00 0.00 gc_ht_t_gfx_dbg_mux_01
or to search for a specific path depth by just setting a numeric variable on the command line:
$ awk -v n=2 '{key=$1} gsub("/","&",key)<n' file
gc_ab_cd 92641.48 25.2 5.12 9.20 0.00 gc_ht_t_gc_ab_cd
gc_ab_cd/reg 29.24 0.0 0.49 0.01 0.00 gc_ht_t_CHECK1_0
gc_ab_cd/regs 18583.88 5.1 2958.87 25.01 0.00 gc_ht_t_gc_ab_cd_regs
gc_ab_cd/tap_ch 431.51 0.1 144.83 150.05 0.00 gc_ht_t_gc_vm2_qe128
gc_ab_cd/vm2_dbg 22.79 0.0 0.00 0.00 0.00 gc_ht_t_gfx_dbg_mux_01
$ awk -v n=3 '{key=$1} gsub("/","&",key)<n' file
gc_ab_cd 92641.48 25.2 5.12 9.20 0.00 gc_ht_t_gc_ab_cd
gc_ab_cd/reg 29.24 0.0 0.49 0.01 0.00 gc_ht_t_CHECK1_0
gc_ab_cd/reg/dff_in_gated 17.13 0.0 6.00 11.13 0.00 gc_ht_t_dff_en_in_WIDTH84_0
gc_ab_cd/reg/dff_in_send_gated 0.20 0.0 0.00 0.20 0.00 gc_ht_t_dff_in_WIDTH1_33
gc_ab_cd/reg/rd_rtn 11.42 0.0 4.20 7.22 0.00 gc_ht_t_gfx_2toN_WIDTH32_1
gc_ab_cd/regs 18583.88 5.1 2958.87 25.01 0.00 gc_ht_t_gc_ab_cd_regs
gc_ab_cd/tap_ch 431.51 0.1 144.83 150.05 0.00 gc_ht_t_gc_vm2_qe128
gc_ab_cd/tap_ch/throttle 136.63 0.0 77.33 59.30 0.00 gc_ht_t_gc_vm2__throttle
gc_ab_cd/vm2_dbg 22.79 0.0 0.00 0.00 0.00 gc_ht_t_gfx_dbg_mux_01
gc_ab_cd/vm2_dbg/bg_mux 22.79 0.0 9.90 4.80 0.00 gc_ht_t_gc_dbg_mux_4_1_01

Pandas extract daily data and write to csv

I have a pandas dataframe looking like this:
Index Stat value1 value2 value3 value4 value5 value6
2016-11-01 00:00:00.000 Gard 0.08 0.24 0.09 6.08 18.4 0.268514431642
2016-11-01 00:00:00.100 Gard 0.08 0.24 0.09 6.08 18.4 0.268514431642
2016-11-01 00:00:00.200 Gard 0.08 0.24 0.09 6.08 18.4 0.268514431642
2016-11-01 00:00:00.300 Gard 0.08 0.24 0.09 6.08 18.4 0.268514431642
2016-11-01 00:00:00.400 Gard 0.08 0.24 0.09 6.08 18.4 0.268514431642
2016-11-02 00:00:00.000 Gard 0.08 0.24 0.09 6.08 18.4 0.268514431642
2016-11-02 00:00:00.100 Gard 0.08 0.24 0.09 6.08 18.4 0.268514431642
2016-11-02 00:00:00.200 Gard 0.15 0.25 0.01 6.08 31.0 0.291719042916
2016-11-02 00:00:00.300 Gard 0.15 0.25 0.01 6.08 31.0 0.291719042916
Of course this is just a snippet, the whole dataframe has about 4.3 million rows.
I would like to extract each line that correponds to a date. So all lines that have the timestamp date 2016-11-01 into one file and 2016-1-02 into another file. So two files looking like this:
Index Stat value1 value2 value3 value4 value5 value6
2016-11-01 00:00:00.000 Gard 0.08 0.24 0.09 6.08 18.4 0.268514431642
2016-11-01 00:00:00.100 Gard 0.08 0.24 0.09 6.08 18.4 0.268514431642
2016-11-01 00:00:00.200 Gard 0.08 0.24 0.09 6.08 18.4 0.268514431642
2016-11-01 00:00:00.300 Gard 0.08 0.24 0.09 6.08 18.4 0.268514431642
2016-11-01 00:00:00.400 Gard 0.08 0.24 0.09 6.08 18.4 0.268514431642
And:
Index Stat value1 value2 value3 value4 value5 value6
2016-11-02 00:00:00.000 Gard 0.08 0.24 0.09 6.08 18.4 0.268514431642
2016-11-02 00:00:00.100 Gard 0.08 0.24 0.09 6.08 18.4 0.268514431642
2016-11-02 00:00:00.200 Gard 0.15 0.25 0.01 6.08 31.0 0.291719042916
2016-11-02 00:00:00.300 Gard 0.15 0.25 0.01 6.08 31.0 0.291719042916
I tried to use groupby in the following command:
grouped_df = df.groupby(df.index.date)["Stat","value1","value2","value3","value4","value5","value6"]
But I don't get any output or error. It runs but nothing happens. Am I doing anything wrong? Is this even the correct function to use? Or is there an easier, better way?
I think you need groupby with apply and custom function with to_csv:
f = lambda x: x.to_csv(r'd:/folder/{}.csv'.format(x.name))
df.groupby(df.index.date).apply(f)

How to benefit most from precompiled headers with gcc?

I have a C++ project with many targets that include a lot of boost header files and other line-intensive headers. Most of the targets include the same headers.
Thus, I thought this might be ideal to use precompiled headers (pch).
So I created a header file with the most included headers and precompiled it.
This reduced the lines of code of the compilation unit from 350k to 120k (I passed the -save-temps flag to the gcc to check that). I checked that it was used with the -H parameter and the pch has a exlamation mark in front of it.
The precompiled header has 550MB.
Though, the compile time was only reduced from 23 seconds to 20 seconds.
Is this little of improvement to be expected from precompiled headers?
If not, what am I doing wrong?
What speeds the compilation time with precompiled headers most?
Edit:
This is the gcc command:
/usr/bin/c++
-fPIC -I/projectDir/build/source -I/projectDir/source -I/usr/include/eigen3 -include /projectDir/build/source/Core/core/cotire/Core_ORIGINAL_CXX_prefix.hxx
-Winvalid-pch -g -Wall -Wextra -Wno-long-long -Wno-unused-parameter -std=c++0x -DBOOST_ENABLE_ASSERT_HANDLER -D_REENTRANT -o CMakeFiles/SubProject.dir/cotire/SubProject_ORIGINAL_CXX_unity.cxx.o
-c /projectDir/build/source/ArmarXCore/statechart/cotire/SubProject_ORIGINAL_CXX_unity.cxx
The output of passing -ftime-report gives me (with PCH enabled):
Execution times (seconds)
phase setup : 0.00 ( 0%) usr 0.00 ( 0%) sys 0.01 ( 0%) wall 1321 kB ( 0%) ggc
phase parsing : 7.29 (32%) usr 1.69 (51%) sys 8.99 (35%) wall 1135793 kB (54%) ggc
phase lang. deferred : 2.75 (12%) usr 0.40 (12%) sys 3.15 (12%) wall 317920 kB (15%) ggc
phase opt and generate : 12.03 (53%) usr 1.17 (36%) sys 13.22 (51%) wall 622545 kB (30%) ggc
phase check & debug info: 0.01 ( 0%) usr 0.00 ( 0%) sys 0.00 ( 0%) wall 440 kB ( 0%) ggc
phase last asm : 0.63 ( 3%) usr 0.02 ( 1%) sys 0.64 ( 2%) wall 26440 kB ( 1%) ggc
phase finalize : 0.00 ( 0%) usr 0.01 ( 0%) sys 0.02 ( 0%) wall 0 kB ( 0%) ggc
|name lookup : 1.30 ( 6%) usr 0.29 ( 9%) sys 1.42 ( 5%) wall 153617 kB ( 7%) ggc
|overload resolution : 3.37 (15%) usr 0.59 (18%) sys 3.30 (13%) wall 360551 kB (17%) ggc
garbage collection : 1.80 ( 8%) usr 0.01 ( 0%) sys 1.82 ( 7%) wall 0 kB ( 0%) ggc
dump files : 0.11 ( 0%) usr 0.05 ( 2%) sys 0.18 ( 1%) wall 0 kB ( 0%) ggc
callgraph construction : 0.44 ( 2%) usr 0.10 ( 3%) sys 0.59 ( 2%) wall 26388 kB ( 1%) ggc
callgraph optimization : 0.21 ( 1%) usr 0.11 ( 3%) sys 0.23 ( 1%) wall 16131 kB ( 1%) ggc
ipa free inline summary : 0.01 ( 0%) usr 0.00 ( 0%) sys 0.01 ( 0%) wall 0 kB ( 0%) ggc
cfg construction : 0.03 ( 0%) usr 0.00 ( 0%) sys 0.03 ( 0%) wall 2119 kB ( 0%) ggc
cfg cleanup : 0.08 ( 0%) usr 0.00 ( 0%) sys 0.11 ( 0%) wall 169 kB ( 0%) ggc
trivially dead code : 0.05 ( 0%) usr 0.02 ( 1%) sys 0.13 ( 0%) wall 0 kB ( 0%) ggc
df scan insns : 0.30 ( 1%) usr 0.02 ( 1%) sys 0.38 ( 1%) wall 1126 kB ( 0%) ggc
df live regs : 0.07 ( 0%) usr 0.00 ( 0%) sys 0.10 ( 0%) wall 0 kB ( 0%) ggc
df reg dead/unused notes: 0.10 ( 0%) usr 0.03 ( 1%) sys 0.12 ( 0%) wall 7774 kB ( 0%) ggc
register information : 0.03 ( 0%) usr 0.00 ( 0%) sys 0.04 ( 0%) wall 0 kB ( 0%) ggc
alias analysis : 0.02 ( 0%) usr 0.02 ( 1%) sys 0.08 ( 0%) wall 2621 kB ( 0%) ggc
rebuild jump labels : 0.05 ( 0%) usr 0.01 ( 0%) sys 0.03 ( 0%) wall 0 kB ( 0%) ggc
preprocessing : 1.16 ( 5%) usr 0.45 (14%) sys 1.61 ( 6%) wall 209848 kB (10%) ggc
parser (global) : 0.43 ( 2%) usr 0.29 ( 9%) sys 0.83 ( 3%) wall 193966 kB ( 9%) ggc
parser struct body : 1.03 ( 5%) usr 0.20 ( 6%) sys 1.37 ( 5%) wall 199825 kB ( 9%) ggc
parser enumerator list : 0.01 ( 0%) usr 0.00 ( 0%) sys 0.00 ( 0%) wall 574 kB ( 0%) ggc
parser function body : 0.53 ( 2%) usr 0.06 ( 2%) sys 0.49 ( 2%) wall 35252 kB ( 2%) ggc
parser inl. func. body : 0.13 ( 1%) usr 0.03 ( 1%) sys 0.14 ( 1%) wall 11720 kB ( 1%) ggc
parser inl. meth. body : 1.14 ( 5%) usr 0.19 ( 6%) sys 1.45 ( 6%) wall 115776 kB ( 6%) ggc
template instantiation : 4.11 (18%) usr 0.82 (25%) sys 4.78 (18%) wall 566245 kB (27%) ggc
inline parameters : 0.05 ( 0%) usr 0.01 ( 0%) sys 0.03 ( 0%) wall 12792 kB ( 1%) ggc
tree gimplify : 0.28 ( 1%) usr 0.03 ( 1%) sys 0.27 ( 1%) wall 55239 kB ( 3%) ggc
tree eh : 0.19 ( 1%) usr 0.00 ( 0%) sys 0.14 ( 1%) wall 20091 kB ( 1%) ggc
tree CFG construction : 0.02 ( 0%) usr 0.00 ( 0%) sys 0.05 ( 0%) wall 34452 kB ( 2%) ggc
tree CFG cleanup : 0.09 ( 0%) usr 0.02 ( 1%) sys 0.15 ( 1%) wall 27 kB ( 0%) ggc
tree PHI insertion : 0.01 ( 0%) usr 0.01 ( 0%) sys 0.01 ( 0%) wall 5960 kB ( 0%) ggc
tree SSA rewrite : 0.01 ( 0%) usr 0.00 ( 0%) sys 0.04 ( 0%) wall 8035 kB ( 0%) ggc
tree SSA other : 0.04 ( 0%) usr 0.03 ( 1%) sys 0.12 ( 0%) wall 1604 kB ( 0%) ggc
tree operand scan : 0.06 ( 0%) usr 0.04 ( 1%) sys 0.08 ( 0%) wall 16681 kB ( 1%) ggc
dominance frontiers : 0.00 ( 0%) usr 0.00 ( 0%) sys 0.01 ( 0%) wall 0 kB ( 0%) ggc
dominance computation : 0.14 ( 1%) usr 0.04 ( 1%) sys 0.12 ( 0%) wall 0 kB ( 0%) ggc
out of ssa : 0.04 ( 0%) usr 0.03 ( 1%) sys 0.14 ( 1%) wall 8 kB ( 0%) ggc
expand vars : 0.10 ( 0%) usr 0.00 ( 0%) sys 0.14 ( 1%) wall 10387 kB ( 0%) ggc
expand : 0.79 ( 3%) usr 0.05 ( 2%) sys 0.77 ( 3%) wall 89756 kB ( 4%) ggc
post expand cleanups : 0.10 ( 0%) usr 0.00 ( 0%) sys 0.05 ( 0%) wall 14796 kB ( 1%) ggc
varconst : 0.03 ( 0%) usr 0.00 ( 0%) sys 0.03 ( 0%) wall 532 kB ( 0%) ggc
jump : 0.00 ( 0%) usr 0.01 ( 0%) sys 0.00 ( 0%) wall 0 kB ( 0%) ggc
integrated RA : 4.92 (22%) usr 0.12 ( 4%) sys 4.54 (17%) wall 167029 kB ( 8%) ggc
LRA non-specific : 0.38 ( 2%) usr 0.01 ( 0%) sys 0.81 ( 3%) wall 776 kB ( 0%) ggc
LRA virtuals elimination: 0.07 ( 0%) usr 0.00 ( 0%) sys 0.07 ( 0%) wall 6530 kB ( 0%) ggc
LRA reload inheritance : 0.01 ( 0%) usr 0.00 ( 0%) sys 0.00 ( 0%) wall 4 kB ( 0%) ggc
LRA create live ranges : 0.03 ( 0%) usr 0.00 ( 0%) sys 0.02 ( 0%) wall 40 kB ( 0%) ggc
LRA hard reg assignment : 0.00 ( 0%) usr 0.00 ( 0%) sys 0.01 ( 0%) wall 0 kB ( 0%) ggc
reload : 0.01 ( 0%) usr 0.00 ( 0%) sys 0.03 ( 0%) wall 0 kB ( 0%) ggc
thread pro- & epilogue : 0.16 ( 1%) usr 0.01 ( 0%) sys 0.26 ( 1%) wall 19997 kB ( 1%) ggc
shorten branches : 0.17 ( 1%) usr 0.01 ( 0%) sys 0.16 ( 1%) wall 0 kB ( 0%) ggc
reg stack : 0.01 ( 0%) usr 0.00 ( 0%) sys 0.00 ( 0%) wall 0 kB ( 0%) ggc
final : 0.63 ( 3%) usr 0.04 ( 1%) sys 0.69 ( 3%) wall 29353 kB ( 1%) ggc
symout : 1.28 ( 6%) usr 0.06 ( 2%) sys 1.23 ( 5%) wall 173563 kB ( 8%) ggc
uninit var analysis : 0.00 ( 0%) usr 0.00 ( 0%) sys 0.01 ( 0%) wall 0 kB ( 0%) ggc
rest of compilation : 0.81 ( 4%) usr 0.18 ( 5%) sys 0.93 ( 4%) wall 34415 kB ( 2%) ggc
unaccounted todo : 0.25 ( 1%) usr 0.16 ( 5%) sys 0.39 ( 1%) wall 0 kB ( 0%) ggc
TOTAL : 22.71 3.29 26.03 2104543 kB
thanks veio
I haven't seen -ftime-report before. That actually gives some interesting info on the bottleneck.
phase opt and generate : 12.03 (53%) usr 1.17 (36%) sys 13.22 (51%)
Half the time is spent optimizing, which PCH won't solve. PCH is meant to prevent include files being compiled per translation unit. A unity build is essentially on large translation unit, so re-compiling headers should not be a bottleneck. Unity builds generally imply it will take longer to optimize though, since compiler optimization normally isn't linear with respect to translation unit size.
However, since optimizing is generally designed for non-unity builds, one possible optimization might be using -flto instead. GCC LTO can be parallelized by passing a thread argument, -flto=8. The speedup will most likely be less than threads though, for obvious reasons. FYI, you might also need to switch your linker to ld.gold.

Understand where compilation time is taken when using GCC

I have a project that makes massive use of templates. Recently the compilation time rose quite abruptly. I wonder if there's a way for seeing what classes / lines require the most time to be compiled by g++.
Here is some output from -ftime-report:
Execution times (seconds)
TOTAL : 0.30 0.05 0.37 9119 kB
Execution times (seconds)
garbage collection : 0.91 ( 6%) usr 0.00 ( 0%) sys 0.92 ( 5%) wall 0 kB ( 0%) ggc
callgraph construction: 0.23 ( 2%) usr 0.11 ( 3%) sys 0.37 ( 2%) wall 10652 kB ( 1%) ggc
callgraph optimization: 0.18 ( 1%) usr 0.12 ( 3%) sys 0.28 ( 2%) wall 11906 kB ( 2%) ggc
varpool construction : 0.04 ( 0%) usr 0.01 ( 0%) sys 0.08 ( 0%) wall 6984 kB ( 1%) ggc
cfg construction : 0.03 ( 0%) usr 0.00 ( 0%) sys 0.05 ( 0%) wall 644 kB ( 0%) ggc
cfg cleanup : 0.05 ( 0%) usr 0.02 ( 0%) sys 0.05 ( 0%) wall 7 kB ( 0%) ggc
trivially dead code : 0.05 ( 0%) usr 0.01 ( 0%) sys 0.12 ( 1%) wall 0 kB ( 0%) ggc
df scan insns : 0.37 ( 3%) usr 0.03 ( 1%) sys 0.43 ( 2%) wall 677 kB ( 0%) ggc
df live regs : 0.07 ( 0%) usr 0.01 ( 0%) sys 0.02 ( 0%) wall 0 kB ( 0%) ggc
df reg dead/unused notes: 0.08 ( 1%) usr 0.01 ( 0%) sys 0.08 ( 0%) wall 2755 kB ( 0%) ggc
register information : 0.05 ( 0%) usr 0.01 ( 0%) sys 0.05 ( 0%) wall 0 kB ( 0%) ggc
alias analysis : 0.01 ( 0%) usr 0.01 ( 0%) sys 0.01 ( 0%) wall 878 kB ( 0%) ggc
rebuild jump labels : 0.03 ( 0%) usr 0.01 ( 0%) sys 0.01 ( 0%) wall 0 kB ( 0%) ggc
preprocessing : 0.19 ( 1%) usr 0.44 (11%) sys 0.68 ( 4%) wall 5284 kB ( 1%) ggc
parser : 3.94 (28%) usr 1.43 (35%) sys 4.94 (27%) wall 355964 kB (48%) ggc
name lookup : 1.35 ( 9%) usr 0.88 (21%) sys 2.76 (15%) wall 64919 kB ( 9%) ggc
inline heuristics : 0.14 ( 1%) usr 0.03 ( 1%) sys 0.14 ( 1%) wall 0 kB ( 0%) ggc
integration : 0.02 ( 0%) usr 0.00 ( 0%) sys 0.02 ( 0%) wall 20 kB ( 0%) ggc
tree gimplify : 0.31 ( 2%) usr 0.07 ( 2%) sys 0.28 ( 2%) wall 24598 kB ( 3%) ggc
tree eh : 0.07 ( 0%) usr 0.02 ( 0%) sys 0.11 ( 1%) wall 7267 kB ( 1%) ggc
tree CFG construction : 0.04 ( 0%) usr 0.04 ( 1%) sys 0.11 ( 1%) wall 15754 kB ( 2%) ggc
tree CFG cleanup : 0.12 ( 1%) usr 0.00 ( 0%) sys 0.05 ( 0%) wall 3 kB ( 0%) ggc
tree find ref. vars : 0.03 ( 0%) usr 0.01 ( 0%) sys 0.02 ( 0%) wall 963 kB ( 0%) ggc
tree PHI insertion : 0.00 ( 0%) usr 0.01 ( 0%) sys 0.01 ( 0%) wall 351 kB ( 0%) ggc
tree SSA rewrite : 0.03 ( 0%) usr 0.01 ( 0%) sys 0.01 ( 0%) wall 4078 kB ( 1%) ggc
tree SSA other : 0.03 ( 0%) usr 0.06 ( 1%) sys 0.12 ( 1%) wall 1504 kB ( 0%) ggc
tree operand scan : 0.04 ( 0%) usr 0.02 ( 0%) sys 0.08 ( 0%) wall 10781 kB ( 1%) ggc
dominance computation : 0.15 ( 1%) usr 0.04 ( 1%) sys 0.15 ( 1%) wall 0 kB ( 0%) ggc
out of ssa : 0.09 ( 1%) usr 0.00 ( 0%) sys 0.02 ( 0%) wall 0 kB ( 0%) ggc
expand vars : 0.03 ( 0%) usr 0.00 ( 0%) sys 0.03 ( 0%) wall 1840 kB ( 0%) ggc
expand : 0.45 ( 3%) usr 0.04 ( 1%) sys 0.59 ( 3%) wall 37695 kB ( 5%) ggc
post expand cleanups : 0.08 ( 1%) usr 0.02 ( 0%) sys 0.06 ( 0%) wall 4542 kB ( 1%) ggc
varconst : 0.15 ( 1%) usr 0.03 ( 1%) sys 0.12 ( 1%) wall 3595 kB ( 0%) ggc
jump : 0.01 ( 0%) usr 0.00 ( 0%) sys 0.04 ( 0%) wall 1904 kB ( 0%) ggc
mode switching : 0.01 ( 0%) usr 0.00 ( 0%) sys 0.01 ( 0%) wall 0 kB ( 0%) ggc
integrated RA : 1.33 ( 9%) usr 0.09 ( 2%) sys 1.49 ( 8%) wall 18163 kB ( 2%) ggc
reload : 0.60 ( 4%) usr 0.10 ( 2%) sys 0.62 ( 3%) wall 8668 kB ( 1%) ggc
thread pro- & epilogue: 0.17 ( 1%) usr 0.00 ( 0%) sys 0.20 ( 1%) wall 11884 kB ( 2%) ggc
reg stack : 0.02 ( 0%) usr 0.00 ( 0%) sys 0.00 ( 0%) wall 0 kB ( 0%) ggc
final : 0.71 ( 5%) usr 0.10 ( 2%) sys 0.84 ( 5%) wall 6251 kB ( 1%) ggc
symout : 1.10 ( 8%) usr 0.16 ( 4%) sys 1.19 ( 6%) wall 100954 kB (14%) ggc
uninit var analysis : 0.03 ( 0%) usr 0.00 ( 0%) sys 0.01 ( 0%) wall 0 kB ( 0%) ggc
early local passes : 0.00 ( 0%) usr 0.00 ( 0%) sys 0.01 ( 0%) wall 0 kB ( 0%) ggc
rest of compilation : 0.49 ( 3%) usr 0.06 ( 1%) sys 0.76 ( 4%) wall 19252 kB ( 3%) ggc
unaccounted todo : 0.43 ( 3%) usr 0.09 ( 2%) sys 0.55 ( 3%) wall 0 kB ( 0%) ggc
TOTAL : 14.26 4.11 18.52 742072 kB
Steven Watanabe's template profiler may help you get a per class/function instantiation count.
See Debugging GCC Compile Times for an actual link to this tool.
When I read that you have made massive use of templates in your project, my first suspicion was template instantiation, and after seeing the following information, my suspicion became stronger:
parser : ... (27%) wall 355964 kB (48%) ggc
name lookup : ... (15%) wall 64919 kB ( 9%) ggc
Since I cannot see the code (as you have not posted it), so I can only suspect. My second suspicion is, you have not used explicit instantiation of the templates for the known types (which you will most certainly use), instead you depend on implicit instantiation and you are using templates from lots of.cpp file. If so, then that could be the major problem, because implicit instantiation causes same templates to be instantiated many times, once for each translation unit. So if you have M templates and you are using it from N translation units (.cpp), then there will be M * N instantiations, instead of just M instantiations.
AFAIK, no such compilation switch does exist.
A more manual method can be to split between preprocessing and compilation (gcc -E, then gcc -c on preprocessed file) to guess where time is spent.
Another solution is to instrument your build environment to have compilation time per file. Note that I can only recommend to set up continuous integration to track such evolutions early (as soon as it pops up, you detect it without having to dig in the past what introduced the jump).
As a rule of thumb, you can check that only relevant headers are included (try to remove some) or could switch to precompiled headers.

`gprof` time spent in particular lines of code

I've been using the gprof profiler in conjunction with g++.
I have a function in my code which encapsulates several sections of behaviour which are related enough to the primary function that it would not make sense to split them off into their own functions.
I'd like to know how much time is spent in each of these areas of code.
So, if you imagine the code looks like
function(){
A
A
A
B
B
B
C
C
C
}
where A, B, and C represent particular sections of code I'm interested in, is there a way to get gprof to tell me how much time is spent working on those particular sections?
I know it's a old question, but I have found a interesting answer.
As Sam say, the -l option is only for old gcc compiler. But I have found that if you compile and link with -pg -fprofile-arcs -ftest-coverage, run the program, the result of gprof -l is very interesting.
Flat profile:
Each sample counts as 0.01 seconds.
% cumulative self self total
time seconds seconds calls Ts/call Ts/call name
13.86 0.26 0.26 main (ComAnalyste.c:450 # 804b315)
10.87 0.46 0.20 main (ComAnalyste.c:386 # 804b151)
7.07 0.59 0.13 main (ComAnalyste.c:437 # 804b211)
6.25 0.70 0.12 main (ComAnalyste.c:436 # 804b425)
4.89 0.79 0.09 main (ComAnalyste.c:283 # 804a3f4)
4.89 0.88 0.09 main (ComAnalyste.c:436 # 804b1e9)
4.08 0.96 0.08 main (ComAnalyste.c:388 # 804ad95)
3.81 1.03 0.07 main (ComAnalyste.c:293 # 804a510)
3.53 1.09 0.07 main (ComAnalyste.c:401 # 804af04)
3.26 1.15 0.06 main (ComAnalyste.c:293 # 804a4bf)
2.72 1.20 0.05 main (ComAnalyste.c:278 # 804a48d)
2.72 1.25 0.05 main (ComAnalyste.c:389 # 804adae)
2.72 1.30 0.05 main (ComAnalyste.c:406 # 804aecb)
2.45 1.35 0.05 main (ComAnalyste.c:386 # 804ad6d)
2.45 1.39 0.05 main (ComAnalyste.c:443 # 804b248)
2.45 1.44 0.05 main (ComAnalyste.c:446 # 804b2f4)
2.17 1.48 0.04 main (ComAnalyste.c:294 # 804a4e4)
2.17 1.52 0.04 main (ComAnalyste.c:459 # 804b43b)
1.63 1.55 0.03 main (ComAnalyste.c:442 # 804b22d)
1.63 1.58 0.03 main (ComAnalyste.c:304 # 804a56d)
1.09 1.60 0.02 main (ComAnalyste.c:278 # 804a3b3)
1.09 1.62 0.02 main (ComAnalyste.c:285 # 804a450)
1.09 1.64 0.02 main (ComAnalyste.c:286 # 804a470)
1.09 1.66 0.02 main (ComAnalyste.c:302 # 804acdf)
0.82 1.67 0.02 main (ComAnalyste.c:435 # 804b1d2)
0.54 1.68 0.01 main (ComAnalyste.c:282 # 804a3db)
0.54 1.69 0.01 main (ComAnalyste.c:302 # 804a545)
0.54 1.70 0.01 main (ComAnalyste.c:307 # 804a586)
0.54 1.71 0.01 main (ComAnalyste.c:367 # 804ac1a)
0.54 1.72 0.01 main (ComAnalyste.c:395 # 804ade6)
0.54 1.73 0.01 main (ComAnalyste.c:411 # 804aff8)
0.54 1.74 0.01 main (ComAnalyste.c:425 # 804b12a)
0.54 1.75 0.01 main (ComAnalyste.c:429 # 804b19f)
0.54 1.76 0.01 main (ComAnalyste.c:444 # 804b26f)
0.54 1.77 0.01 main (ComAnalyste.c:464 # 804b4a1)
0.54 1.78 0.01 main (ComAnalyste.c:469 # 804b570)
0.54 1.79 0.01 main (ComAnalyste.c:472 # 804b5b9)
0.27 1.80 0.01 main (ComAnalyste.c:308 # 804a5a3)
0.27 1.80 0.01 main (ComAnalyste.c:309 # 804a5a9)
0.27 1.81 0.01 main (ComAnalyste.c:349 # 804a974)
0.27 1.81 0.01 main (ComAnalyste.c:350 # 804a99c)
0.27 1.82 0.01 main (ComAnalyste.c:402 # 804af1d)
0.27 1.82 0.01 main (ComAnalyste.c:416 # 804b073)
0.27 1.83 0.01 main (ComAnalyste.c:417 # 804b0a1)
0.27 1.83 0.01 main (ComAnalyste.c:454 # 804b3ec)
0.27 1.84 0.01 main (ComAnalyste.c:461 # 804b44a)
0.27 1.84 0.01 main (ComAnalyste.c:462 # 804b458)
It's say the time spent per line. It's very interesting result.
I don't know the accuracy or the validity of that, but it's quite interesting.
Hope it's help
Here's a useful resource for you: gprof line by line profiling.
With older versions of the gcc compiler, the gprof -l argument specified line by line profiling.
However, newer versions of gcc use the gcov tool instead of gprof to display line by line profiling information.
If you are using linux, then you can use linux perf instead of gprof, as described here:
http://code.google.com/p/jrfonseca/wiki/Gprof2Dot#linux_perf
Typing perf report and selecting a function will allow you to get line-by-line information about where the CPU time is spent inside the function.