I have huge data that consist of points(x,y) and need to present in a scatter plot.
To find overlap and remove invisible differences between the same point, I wrote bellow code :
void DownSampler::makeDownSample(QVector<double> keys,QVector<double> values, QVector<int> pixmapIdx, QSize resolution, sViewSize view)
{
m_calculating = true;
QTime now = QTime::currentTime();
QMap<QPair<double, double>,int> downsampledMap;
QSizeF stepValue( ( view.maxHorizontal - view.minHorizontal ) * m_downSampleRatio / (resolution.width() ),
( view.maxVertical - view.minVertical ) * m_downSampleRatio/ (resolution.height() ) ) ;
for(int index = 0 ; index < keys.size() ; index++)
{
keys[index] = round((keys[index]) / stepValue.width());
keys[index] *= stepValue.width();
values[index] = round(values[index] / stepValue.height());
values[index] *= stepValue.height();
//remove same items
if((keys[index] >= view.minHorizontal) && (keys[index] <= view.maxHorizontal)) {
if((values[index] >= view.minVertical) && (values[index] <= view.maxVertical)) {
QPair<double, double> pairValue = qMakePair(keys[index], values[index]);
if(!downsampledMap.contains(pairValue)){
downsampledMap.insert(pairValue, pixmapIdx[index]);
}
}
}
}
QVector<int> retPixmapIdx;
QVector<double> retKey, retValue;
for(QMap<QPair<double, double>,int>::iterator iter = downsampledMap.begin() ; iter != downsampledMap.end() ; ++iter)
{
retKey.append(iter.key().first);
retValue.append(iter.key().second);
retPixmapIdx.append(iter.value());
}
emit downSampledPlotReady(retKey, retValue, retPixmapIdx);
m_calculating = false;
// qDebug() << __FUNCTION__ << "firstPointSize ==> "<< keys.size() << "downsampledSize ==> " << retKey.size() << "time ==> " << now.msecsTo(QTime::currentTime());
}
as you can see, at first, I rounded all positions to a pixel size step that expects to present data and after all iterate to all created boxes and return the created box list.
according to this code, every box can contain many points, we return just a single position and therefore overlap point was removed.
it's my handwriting code but I think it hasn't the best-expected performance.
my question is :
1- is there any library or algorithm that collects all scatter data like our dependency?
2- is an improvement in our source code?
the point cloud library and Grid Voxel in 2D aspects can process and downsample point clouds perfectly.
Related
I am trying to apply a Kalman Filter in C++ with OpenCV in order to filter some tracks. The first step to make it work for me was to predict the points with the filter from a vector of Points2f.
My code is the following one :
cv::KalmanFilter kalmanFilter(4,2,0, CV_32F);
kalmanFilter.transitionMatrix = transitionMat;
for(int i = 0 ; i < oldTrackeables.size() ; i++)
for(int j = 0 ; j < oldTrackeables[i].getTrack().size() ; j++)
{
cv::Size msmtSize(2,1);
cv::Mat measurementMat(msmtSize, CV_32F);
measurementMat.setTo(cv::Scalar(0));
measurementMat.at<float>(0) = oldTrackeables[i].getTrack()[j].x;
measurementMat.at<float>(1) = oldTrackeables[i].getTrack()[j].y;
//Initialisation of the Kalman filter
kalmanFilter.statePre.at<float>(0) = (float) oldTrackeables[i].getTrack()[j].x;
kalmanFilter.statePre.at<float>(1) = (float) oldTrackeables[i].getTrack()[j].y;
kalmanFilter.statePre.at<float>(2) = (float) 2;
kalmanFilter.statePre.at<float>(3) = (float) 3;
cv::setIdentity(kalmanFilter.measurementMatrix);
cv::setIdentity(kalmanFilter.processNoiseCov, cv::Scalar::all(1e-4));
cv::setIdentity(kalmanFilter.measurementNoiseCov, cv::Scalar::all(.1));
cv::setIdentity(kalmanFilter.errorCovPost, cv::Scalar::all(.1));
//Prediction
cv::Mat prediction = kalmanFilter.predict();
kalmanFilter.statePre.copyTo(kalmanFilter.statePost);
kalmanFilter.errorCovPre.copyTo(kalmanFilter.errorCovPost);
cv::Point predictPt(prediction.at<float>(0), prediction.at<float>(1));
cv::Point Mc = oldTrackeables[i].getMassCenter();
cv::circle(kalmat, predictPt, 16, cv::Scalar(0,255,0), 3, 2, 1);
std::cout<<"prediction : x = " << predictPt.x << " - y = " << predictPt.y <<std::endl;
std::cout<<"position captée : x = " << oldTrackeables[i].getTrack()[j].x << " - y = " << oldTrackeables[i].getTrack()[j].y << std::endl;
std::cout<<"size of frame : rows = " << frame.rows << " - width = " << frame.cols <<std::endl;
std::cout<<"size of kalmat : rows = " << kalmat.rows << " - width = " << kalmat.cols <<std::endl;
cv::imshow("kalmat", kalmat);
Where oldTrackeables[i].getTrack()[j] are just some Points2f from a vector.
The tracking is correct, but the Kalman filter does not give "correct" values for the prediction - For example, the program displays :
prediction : x = 0 - y = 0 -
position captée : x = 138.29 - y = 161.078 (position of the original point).
I've really been looking a lot for answers and trying many different ways to do it but I can't find anything that really helps me... The closer one I found was this one : http://answers.opencv.org/question/24865/why-kalman-filter-keeps-returning-the-same-prediction/ But it did not help me solve my problem...
If any of you has an element of answer of could help me understand the problem, I'd be very grateful.
Thank you.
First of all I would have moved all the init stuff outside the loop otherwise you will override the internal states in the filter. Also change the statePre to statPost
//Initialisation of the Kalman filter
kalmanFilter.statePost.at<float>(0) = (float) 0;
kalmanFilter.statePost.at<float>(1) = (float) 0;
kalmanFilter.statePost.at<float>(2) = (float) 2;
kalmanFilter.statePost.at<float>(3) = (float) 3;
cv::setIdentity(kalmanFilter.measurementMatrix);
cv::setIdentity(kalmanFilter.processNoiseCov, cv::Scalar::all(1e-4));
cv::setIdentity(kalmanFilter.measurementNoiseCov,cv::Scalar::all(.1));
cv::setIdentity(kalmanFilter.errorCovPost, cv::Scalar::all(.1));
The part:
kalmanFilter.statePre.copyTo(kalmanFilter.statePost);
kalmanFilter.errorCovPre.copyTo(kalmanFilter.errorCovPost);
should be removed since this is done internally in the predict phase.
Finally as #Mozfox says, the correct phase is not present in the loop code you provided. Add:
kalmanFilter.predict(measurementMat);
I think you are missing correction phase for measurement computation.
Following are two characters outlines extracted using freetype and processed in some loops using the quadric bezier curve formula(helped by opensans regular ttf font). I am not sure if what i did is correct, the fonts generated by the c++ code are not identical with the official counterparts.
The code will get the outline of the letter, iterate over it, get two on points and one off point(bezier quadric control/end points), and calculate the resulting points using the conic bezier formula. The points are spewed on the console(debugging purpose). Using gnuplot to draw the resulting points, connecting each dot(with lines option in gnuplot).
This is the letter h and e:
http://imgur.com/JyHkZ7H,Aapr6zX
What is weird about the letter e is that the inner outline is ending in an outside point(Coes not connect with the starting point). This will result in problems when triangulating the oultine poligon.
Am i missing some option when loading/the font , loading the letter in freetype?
Bellow you can see the relevant portions of the code.
Opening the font:
FT_New_Face(ft, "OpenSans-Regular.ttf", 0, &this->face)
Creating the glyph:
FT_GlyphSlot g = this->face->glyph;
Creating face from letter:
for (p = text; *p; p++) {
/* Try to load and render the character */
if (FT_Load_Char(this->face, *p, FT_LOAD_RENDER))
continue;
Iterating over the Freetype generated outline:
char tag = *g->outline.tags;
int start_point = 0;
int end_point;
for(int current_contour = 0; current_contour < g->outline.n_contours; current_contour++){
end_point = g->outline.contours[current_contour];
for(int current_point = start_point; current_point <= end_point; current_point++){
std::bitset<8>first_tags(tag);
FT_Vector first_point = g->outline.points[current_point];
tag++;current_point++;
if(first_tags[0] == FT_CURVE_TAG_ON){
std::bitset<8>second_tags(tag);
FT_Vector second_point = g->outline.points[current_point];
tag++;current_point++;
if(second_tags[0] != FT_CURVE_TAG_ON){
std::bitset<8>third_tags(tag);
FT_Vector third_point = g->outline.points[current_point];
if(current_point-1 == end_point){
third_point = g->outline.points[start_point];
}
for(double t = 0; t <= 1; t+=0.01){
FT_Vector letter_point;
letter_point.x = (1-t)*(1-t)*first_point.x+2*(1-t)*t*second_point.x + t*t*third_point.x;
letter_point.y = (1-t)*(1-t)*first_point.y+2*(1-t)*t*second_point.y + t*t*third_point.y;
if(current_letter == 1){
std::cout << letter_point.x << " " << letter_point.y << std::endl;
}
}
current_point--;
}
}
}
start_point = end_point+1;
}
Thanks.
Accessing the tags array by key fixed most of the problems
Here is my update method that currently cycles through my 6 sprites for a walking character, it counts 0,1,2,3,4,5 and then resets back to 0.
The task is to have it to cycle forwards and then backwards 0,1,2,3,4,5,4,3,2,1,0,1,2... etc
I've tried to implement several counting methods to count up and count down given certain conditions, but they appear to fight and loop between frame 4/5.
Is there a quick solution? or would anyone be able to point me in the direction of a solution please :)
void SpriteGame::Update(int tickTotal, int tickDelta){
if ( tickTotal >= this->playerLastFrameChange + (TICKS_PER_SECOND / playerSheetLength) )
{
this->playerFrame = this->playerFrame + 1;
this->playerLastFrameChange = tickTotal;
if (this->playerFrame >= this->playerSheetLength)
{
this->playerFrame = 0;
}
this->playerSourceRect->left = this->playerFrame * widthOfSprite;
this->playerSourceRect->top = 0;
this->playerSourceRect->right = (this->playerFrame + 1) * widthOfSprite;
this->playerSourceRect->bottom = widthOfSprite;
}
}
implementing the (abs()) method worked counting 0,1,2,3,4,5,4,3,2,1,2.. etc
//initializing playerFrame = -4; at the top of the .cpp
this->playerFrame = this->playerFrame +1; //keep counting for as long as its <= 5 [sheet length]
if (this->playerFrame >= this->playerSheetLength)
{
this->playerFrame = -4;
}
this->playerSourceRect->left = (abs(playerFrame)) * widthOfSprite;
this->playerSourceRect->top = 0;
this->playerSourceRect->right = (abs(playerFrame)+1) * widthOfSprite;
this->playerSourceRect->bottom = widthOfSprite
A fairly simple way to get it to count up and down continuously would be to run your count from -4 up to 5 (always incrementing). When it goes past 5, set it back to -4.
To get the actual sprite index from that count, just take the absolute of it (abs()). That will give you the positive equivalent of any negative values, but leave the positive values unchanged.
How about sth like this:
char direction=1; //initialize forward
this->playerFrame+=direction;
if(this->playerFrame >= this->playerSheetLength || this->playerFrame <=0)
direction*=-1;
To stick with the arithmetic approach you've started with you need something like this pseudocode
if( /*it's time to update frame*/ )
{
if( currentFrame >= maxFrame ||
currentFrame <= minFrame )
{
incrementer *= -1;
}
currentFrame += incrementer;
// Then calculate the next frame boundary based on the frame number
}
Or if you kept a collection of rectangles in a vector, you could simply iterate to end, then reverse iterate to the beginning, and so on.
Google std::vector, std::vector::begin(), std::vector::end(), std::vector::rbegin(), std::vector::rend().
Quick example:
vector<rect> vec;
.
.
.
for(vector<rect>::iterator iter = vec.begin(); iter != vec.end(); ++iter)
{
....
}
for(vector<rect>::reverse_iterator iter = vec.rbegin(); iter != vec.rend(); ++iter)
{
....
}
I'm doing online destructive clustering (clusters replace clustered objects) on a list of class instances (stl::list).
Background
My list of current percepUnits is: stl::list<percepUnit> units; and for each iteration I get a new list of input percepUnits stl::list<percepUnit> scratch; that need to be clustered with the units.
I want to maintain a fixed number of percepUnits (so units.size() is constant), so for each new scratch percepUnit I need to merge it with the nearest percepUnit in units. Following is a code snippet that builds a list (dists) of structures (percepUnitDist) that contain pointers to each pair of items in scratch and units percepDist.scratchUnit = &(*scratchUnit); and percepDist.unit = &(*unit); and their distance. Additionally, for each item in scratch I keep track of which item in units has the least distance minDists.
// For every scratch percepUnit:
for (scratchUnit = scratch.begin(); scratchUnit != scratch.end(); scratchUnit++) {
float minDist=2025.1172; // This is the max possible distance in unnormalized CIELuv, and much larger than the normalized dist.
// For every percepUnit:
for (unit = units.begin(); unit != units.end(); unit++) {
// compare pairs
float dist = featureDist(*scratchUnit, *unit, FGBG);
//cout << "distance: " << dist << endl;
// Put pairs in a structure that caches their distances
percepUnitDist percepDist;
percepDist.scratchUnit = &(*scratchUnit); // address of where scratchUnit points to.
percepDist.unit = &(*unit);
percepDist.dist = dist;
// Figure out the percepUnit that is closest to this scratchUnit.
if (dist < minDist)
minDist = dist;
dists.push_back(percepDist); // append dist struct
}
minDists.push_back(minDist); // append the min distance to the nearest percepUnit for this particular scratchUnit.
}
So now I just need to loop through the percepUnitDist items in dists and match the distances with the minimum distances to figure out which percepUnit in scratch should be merged with which percepUnit in units. The merging process mergePerceps() creates a new percepUnit which is a weighted average of the "parent" percepUnits in scratch and units.
Question
I want to replace the instance in the units list with the new percepUnit constructed by mergePerceps(), but I would like to do so in the context of looping through the percepUnitDists. This is my current code:
// Loop through dists and merge all the closest pairs.
// Loop through all dists
for (distIter = dists.begin(); distIter != dists.end(); distIter++) {
// Loop through all minDists for each scratchUnit.
for (minDistsIter = minDists.begin(); minDistsIter != minDists.end(); minDistsIter++) {
// if this is the closest cluster, and the closest cluster has not already been merged, and the scratch has not already been merged.
if (*minDistsIter == distIter->dist and not distIter->scratchUnit->remove) {
percepUnit newUnit;
mergePerceps(*(distIter->scratchUnit), *(distIter->unit), newUnit, FGBG);
*(distIter->unit) = newUnit; // replace the cluster with the new merged version.
distIter->scratchUnit->remove = true;
}
}
}
I thought that I could replace the instance in units via the percepUnitDist pointer with the new percepUnit instance using *(distIter->unit) = newUnit;, but that does not seem to be working as I'm seeing a memory leak, implying the instances in the units are not getting replaced.
How do I delete the percepUnit in the units list and replace it with a new percepUnit instance such that the new unit is located in the same location?
EDIT1
Here is the percepUnit class. Note the cv::Mat members. Following is the mergePerceps() function and the mergeImages() function on which it depends:
// Function to construct an accumulation.
void clustering::mergeImages(Mat &scratch, Mat &unit, cv::Mat &merged, const string maskOrImage, const string FGBG, const float scratchWeight, const float unitWeight) {
int width, height, type=CV_8UC3;
Mat scratchImagePad, unitImagePad, scratchImage, unitImage;
// use the resolution and aspect of the largest of the pair.
if (unit.cols > scratch.cols)
width = unit.cols;
else
width = scratch.cols;
if (unit.rows > scratch.rows)
height = unit.rows;
else
height = scratch.rows;
if (maskOrImage == "mask")
type = CV_8UC1; // single channel mask
else if (maskOrImage == "image")
type = CV_8UC3; // three channel image
else
cout << "maskOrImage is not 'mask' or 'image'\n";
merged = Mat(height, width, type, Scalar::all(0));
scratchImagePad = Mat(height, width, type, Scalar::all(0));
unitImagePad = Mat(height, width, type, Scalar::all(0));
// weight images before summation.
// because these pass by reference, they mess up the images in memory!
scratch *= scratchWeight;
unit *= unitWeight;
// copy images into padded images.
scratch.copyTo(scratchImagePad(Rect((scratchImagePad.cols-scratch.cols)/2,
(scratchImagePad.rows-scratch.rows)/2,
scratch.cols,
scratch.rows)));
unit.copyTo(unitImagePad(Rect((unitImagePad.cols-unit.cols)/2,
(unitImagePad.rows-unit.rows)/2,
unit.cols,
unit.rows)));
merged = scratchImagePad+unitImagePad;
}
// Merge two perceps and return a new percept to replace them.
void clustering::mergePerceps(percepUnit scratch, percepUnit unit, percepUnit &mergedUnit, const string FGBG) {
Mat accumulation;
Mat accumulationMask;
Mat meanColour;
int x, y, w, h, area;
float l,u,v;
int numMerges=0;
std::vector<float> featuresVar; // Normalized, Sum, Variance.
//float featuresVarMin, featuresVarMax; // min and max variance accross all features.
float scratchWeight, unitWeight;
if (FGBG == "FG") {
// foreground percepts don't get merged as much.
scratchWeight = 0.65;
unitWeight = 1-scratchWeight;
} else {
scratchWeight = 0.85;
unitWeight = 1-scratchWeight;
}
// Images TODO remove the meanColour if needbe.
mergeImages(scratch.image, unit.image, accumulation, "image", FGBG, scratchWeight, unitWeight);
mergeImages(scratch.mask, unit.mask, accumulationMask, "mask", FGBG, scratchWeight, unitWeight);
mergeImages(scratch.meanColour, unit.meanColour, meanColour, "image", "FG", scratchWeight, unitWeight); // merge images
// Position and size.
x = (scratch.x1*scratchWeight) + (unit.x1*unitWeight);
y = (scratch.y1*scratchWeight) + (unit.y1*unitWeight);
w = (scratch.w*scratchWeight) + (unit.w*unitWeight);
h = (scratch.h*scratchWeight) + (unit.h*unitWeight);
// area
area = (scratch.area*scratchWeight) + (unit.area*unitWeight);
// colour
l = (scratch.l*scratchWeight) + (unit.l*unitWeight);
u = (scratch.u*scratchWeight) + (unit.u*unitWeight);
v = (scratch.v*scratchWeight) + (unit.v*unitWeight);
// Number of merges
if (scratch.numMerges < 1 and unit.numMerges < 1) { // both units are patches
numMerges = 1;
} else if (scratch.numMerges < 1 and unit.numMerges >= 1) { // unit A is a patch, B a percept
numMerges = unit.numMerges + 1;
} else if (scratch.numMerges >= 1 and unit.numMerges < 1) { // unit A is a percept, B a patch.
numMerges = scratch.numMerges + 1;
cout << "merged scratch??" <<endl;
// TODO this may be an impossible case.
} else { // both units are percepts
numMerges = scratch.numMerges + unit.numMerges;
cout << "Merging two already merged Percepts" <<endl;
// TODO this may be an impossible case.
}
// Create unit.
mergedUnit = percepUnit(accumulation, accumulationMask, x, y, w, h, area); // time is the earliest value in times?
mergedUnit.l = l; // members not in the constrcutor.
mergedUnit.u = u;
mergedUnit.v = v;
mergedUnit.numMerges = numMerges;
mergedUnit.meanColour = meanColour;
mergedUnit.pActivated = unit.pActivated; // new clusters retain parent's history of activation.
mergedUnit.scratch = false;
mergedUnit.habituation = unit.habituation; // we inherent the habituation of the cluster we merged with.
}
EDIT2
Changing the copy and assignment operators had performance side-effects and did not seem to resolve the problem. So I've added a custom function to do the replacement, which just like the copy operator makes copies of each member and make's sure those copies are deep. The problem is that I still end up with a leak.
So I've changed this line: *(distIter->unit) = newUnit;
to this: (*(distIter->unit)).clone(newUnit)
Where the clone method is as follows:
// Deep Copy of members
void percepUnit::clone(const percepUnit &source) {
// Deep copy of Mats
this->image = source.image.clone();
this->mask = source.mask.clone();
this->alphaImage = source.alphaImage.clone();
this->meanColour = source.meanColour.clone();
// shallow copies of everything else
this->alpha = source.alpha;
this->fadingIn = source.fadingIn;
this->fadingHold = source.fadingHold;
this->fadingOut = source.fadingOut;
this->l = source.l;
this->u = source.u;
this->v = source.v;
this->x1 = source.x1;
this->y1 = source.y1;
this->w = source.w;
this->h = source.h;
this->x2 = source.x2;
this->y2 = source.y2;
this->cx = source.cx;
this->cy = source.cy;
this->numMerges = source.numMerges;
this->id = source.id;
this->area = source.area;
this->features = source.features;
this->featuresNorm = source.featuresNorm;
this->remove = source.remove;
this->fgKnockout = source.fgKnockout;
this->colourCalculated = source.colourCalculated;
this->normalized = source.normalized;
this->activation = source.activation;
this->activated = source.activated;
this->pActivated = source.pActivated;
this->habituation = source.habituation;
this->scratch = source.scratch;
this->FGBG = source.FGBG;
}
And yet, I still see a memory increase. The increase does not happen if I comment out that single replacement line. So I'm still stuck.
EDIT3
I can prevent memory from increasing if I disable the cv::Mat cloning code in the function above:
// Deep Copy of members
void percepUnit::clone(const percepUnit &source) {
/* try releasing Mats first?
// No effect on memory increase, but the refCount is decremented.
this->image.release();
this->mask.release();
this->alphaImage.release();
this->meanColour.release();*/
/* Deep copy of Mats
this->image = source.image.clone();
this->mask = source.mask.clone();
this->alphaImage = source.alphaImage.clone();
this->meanColour = source.meanColour.clone();*/
// shallow copies of everything else
this->alpha = source.alpha;
this->fadingIn = source.fadingIn;
this->fadingHold = source.fadingHold;
this->fadingOut = source.fadingOut;
this->l = source.l;
this->u = source.u;
this->v = source.v;
this->x1 = source.x1;
this->y1 = source.y1;
this->w = source.w;
this->h = source.h;
this->x2 = source.x2;
this->y2 = source.y2;
this->cx = source.cx;
this->cy = source.cy;
this->numMerges = source.numMerges;
this->id = source.id;
this->area = source.area;
this->features = source.features;
this->featuresNorm = source.featuresNorm;
this->remove = source.remove;
this->fgKnockout = source.fgKnockout;
this->colourCalculated = source.colourCalculated;
this->normalized = source.normalized;
this->activation = source.activation;
this->activated = source.activated;
this->pActivated = source.pActivated;
this->habituation = source.habituation;
this->scratch = source.scratch;
this->FGBG = source.FGBG;
}
EDIT4
While I still can't explain this issue, I did notice another hint. I realized that this leak can also be stopped if I don't normalize those features I use to cluster via featureDist() (but continue to clone cv::Mats). The really odd thing is that I rewrote that code entirely and still the problem persists.
Here is the featureDist function:
float clustering::featureDist(percepUnit unitA, percepUnit unitB, const string FGBG) {
float distance=0;
if (FGBG == "BG") {
for (unsigned int i=0; i<unitA.featuresNorm.rows; i++) {
distance += pow(abs(unitA.featuresNorm.at<float>(i) - unitB.featuresNorm.at<float>(i)),0.5);
//cout << "unitA.featuresNorm[" << i << "]: " << unitA.featuresNorm[i] << endl;
//cout << "unitB.featuresNorm[" << i << "]: " << unitB.featuresNorm[i] << endl;
}
// for FG, don't use normalized colour features.
// TODO To include the area use i=4
} else if (FGBG == "FG") {
for (unsigned int i=4; i<unitA.features.rows; i++) {
distance += pow(abs(unitA.features.at<float>(i) - unitB.features.at<float>(i)),0.5);
}
} else {
cout << "FGBG argument was not FG or BG, returning 0." <<endl;
return 0;
}
return pow(distance,2);
}
Features used to be a vector of floats, and thus the normalization code was as follows:
void clustering::normalize(list<percepUnit> &scratch, list<percepUnit> &units) {
list<percepUnit>::iterator unit;
list<percepUnit*>::iterator unitPtr;
vector<float> min,max;
list<percepUnit*> masterList; // list of pointers.
// generate pointers
for (unit = scratch.begin(); unit != scratch.end(); unit++)
masterList.push_back(&(*unit)); // add pointer to where unit points to.
for (unit = units.begin(); unit != units.end(); unit++)
masterList.push_back(&(*unit)); // add pointer to where unit points to.
int numFeatures = masterList.front()->features.size(); // all percepts have the same number of features.
min.resize(numFeatures); // allocate for the number of features we have.
max.resize(numFeatures);
// Loop through all units to get feature values
for (int i=0; i<numFeatures; i++) {
min[i] = masterList.front()->features[i]; // starting point.
max[i] = min[i];
// calculate min and max for each feature.
for (unitPtr = masterList.begin(); unitPtr != masterList.end(); unitPtr++) {
if ((*unitPtr)->features[i] < min[i])
min[i] = (*unitPtr)->features[i];
if ((*unitPtr)->features[i] > max[i])
max[i] = (*unitPtr)->features[i];
}
}
// Normalize features according to min/max.
for (int i=0; i<numFeatures; i++) {
for (unitPtr = masterList.begin(); unitPtr != masterList.end(); unitPtr++) {
(*unitPtr)->featuresNorm[i] = ((*unitPtr)->features[i]-min[i]) / (max[i]-min[i]);
(*unitPtr)->normalized = true;
}
}
}
I changed the features type to a cv::Mat so I could use the opencv normalization function, so I rewrote the normalization function as follows:
void clustering::normalize(list<percepUnit> &scratch, list<percepUnit> &units) {
Mat featureMat = Mat(1,units.size()+scratch.size(), CV_32FC1, Scalar(0));
list<percepUnit>::iterator unit;
// For each feature
for (int i=0; i< units.begin()->features.rows; i++) {
// for each unit in units
int j=0;
float value;
for (unit = units.begin(); unit != units.end(); unit++) {
// Populate featureMat j is the unit index, i is the feature index.
value = unit->features.at<float>(i);
featureMat.at<float>(j) = value;
j++;
}
// for each unit in scratch
for (unit = scratch.begin(); unit != scratch.end(); unit++) {
// Populate featureMat j is the unit index, i is the feature index.
value = unit->features.at<float>(i);
featureMat.at<float>(j) = value;
j++;
}
// Normalize this featureMat in place
cv::normalize(featureMat, featureMat, 0, 1, NORM_MINMAX);
// set normalized values in percepUnits from featureMat
// for each unit in units
j=0;
for (unit = units.begin(); unit != units.end(); unit++) {
// Populate percepUnit featuresNorm, j is the unit index, i is the feature index.
value = featureMat.at<float>(j);
unit->featuresNorm.at<float>(i) = value;
j++;
}
// for each unit in scratch
for (unit = scratch.begin(); unit != scratch.end(); unit++) {
// Populate percepUnit featuresNorm, j is the unit index, i is the feature index.
value = featureMat.at<float>(j);
unit->featuresNorm.at<float>(i) = value;
j++;
}
}
}
I can't understand what the interaction between mergePercepts and normalization, especially since normalization is an entirely rewritten function.
Update
Massif and my /proc memory reporting don't agree. Massif says there is no effect of normalization on memory usage, only commenting out the percepUnit::clone() operation bypasses the leak.
Here is all the code, in case the interaction is somewhere else I am missing.
Here is another version of the same code with the dependence on OpenCV GPU removed, to facilitate testing...
It was recommended by Nghia (on the opencv forum) that I try and make the percepts a constant size. Sure enough, if I fix the dimensions and type of the cv::Mat members of percepUnit, then the leak disappears.
So it seems to me this is a bug in OpenCV that effects calling clone() and copyTo() on Mats of different sizes that are class members. So far unable to reproduce in a simple program. The leak does seem small enough that it may be the headers leaking, rather than the underlying image data.
I've been working on this for several weeks but have been unable to get my algorithm working properly and i'm at my wits end. Here's an illustration of what i have achieved:
If everything was working i would expect a perfect circle/oval at the end.
My sample points (in white) are recalculated every time a new control point (in yellow) is added. At 4 control points everything looks perfect, again as i add a 5th on top of the 1st things look alright, but then on the 6th it starts to go off too the side and on the 7th it jumps up to the origin!
Below I'll post my code, where calculateWeightForPointI contains the actual algorithm. And for reference- here is the information i'm trying to follow. I'd be so greatful if someone could take a look for me.
void updateCurve(const std::vector<glm::vec3>& controls, std::vector<glm::vec3>& samples)
{
int subCurveOrder = 4; // = k = I want to break my curve into to cubics
// De boor 1st attempt
if(controls.size() >= subCurveOrder)
{
createKnotVector(subCurveOrder, controls.size());
samples.clear();
for(int steps=0; steps<=20; steps++)
{
// use steps to get a 0-1 range value for progression along the curve
// then get that value into the range [k-1, n+1]
// k-1 = subCurveOrder-1
// n+1 = always the number of total control points
float t = ( steps / 20.0f ) * ( controls.size() - (subCurveOrder-1) ) + subCurveOrder-1;
glm::vec3 newPoint(0,0,0);
for(int i=1; i <= controls.size(); i++)
{
float weightForControl = calculateWeightForPointI(i, subCurveOrder, controls.size(), t);
newPoint += weightForControl * controls.at(i-1);
}
samples.push_back(newPoint);
}
}
}
//i = the weight we're looking for, i should go from 1 to n+1, where n+1 is equal to the total number of control points.
//k = curve order = power/degree +1. eg, to break whole curve into cubics use a curve order of 4
//cps = number of total control points
//t = current step/interp value
float calculateWeightForPointI( int i, int k, int cps, float t )
{
//test if we've reached the bottom of the recursive call
if( k == 1 )
{
if( t >= knot(i) && t < knot(i+1) )
return 1;
else
return 0;
}
float numeratorA = ( t - knot(i) );
float denominatorA = ( knot(i + k-1) - knot(i) );
float numeratorB = ( knot(i + k) - t );
float denominatorB = ( knot(i + k) - knot(i + 1) );
float subweightA = 0;
float subweightB = 0;
if( denominatorA != 0 )
subweightA = numeratorA / denominatorA * calculateWeightForPointI(i, k-1, cps, t);
if( denominatorB != 0 )
subweightB = numeratorB / denominatorB * calculateWeightForPointI(i+1, k-1, cps, t);
return subweightA + subweightB;
}
//returns the knot value at the passed in index
//if i = 1 and we want Xi then we have to remember to index with i-1
float knot(int indexForKnot)
{
// When getting the index for the knot function i remember to subtract 1 from i because of the difference caused by us counting from i=1 to n+1 and indexing a vector from 0
return knotVector.at(indexForKnot-1);
}
//calculate the whole knot vector
void createKnotVector(int curveOrderK, int numControlPoints)
{
int knotSize = curveOrderK + numControlPoints;
for(int count = 0; count < knotSize; count++)
{
knotVector.push_back(count);
}
}
Your algorithm seems to work for any inputs I tried it on. Your problem might be a that a control point is not where it is supposed to be, or that they haven't been initialized properly. It looks like there are two control-points, half the height below the bottom left corner.