openCV 'cvPoint' and 'CV_FONT_HERSHEY_SIMPLEX' not declared in this scope - c++

I'm trying to get the LSD-SLAM by Kevin George.
But I got stuck because openCV doesn't want to be cooperative and when using catkin_make it doesn't know the following two things.
/home/adas/ros_workspace/src/src/lsd_slam/lsd_slam_core/src/util/globalFuncs.cpp: In function ‘void lsd_slam::printMessageOnCVImage(cv::Mat&, std::__cxx11::string, std::__cxx11::string)’:
/home/adas/ros_workspace/src/src/lsd_slam/lsd_slam_core/src/util/globalFuncs.cpp:52:28: error: ‘cvPoint’ was not declared in this scope
cv::putText(image, line2, cvPoint(10,image.rows-5),
^~~~~~~
/home/adas/ros_workspace/src/src/lsd_slam/lsd_slam_core/src/util/globalFuncs.cpp:52:28: note: suggested alternative: ‘cvRound’
cv::putText(image, line2, cvPoint(10,image.rows-5),
^~~~~~~
cvRound
and
/home/adas/ros_workspace/src/src/lsd_slam/lsd_slam_core/src/util/globalFuncs.cpp:53:6: error: ‘CV_FONT_HERSHEY_SIMPLEX’ was not declared in this scope
CV_FONT_HERSHEY_SIMPLEX, 0.4, cv::Scalar(200,200,250), 1, 8);
in case of the cvPoint catkin_make suggests replacing the cvPoint with cvRount which doesn't work as then I get a new error that the funciton with that type can't be found.
Here is be the full source code if it would help you.
/**
* This file is part of LSD-SLAM.
*
* Copyright 2013 Jakob Engel <engelj at in dot tum dot de> (Technical University of Munich)
* For more information see <http://vision.in.tum.de/lsdslam>
*
* LSD-SLAM is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* LSD-SLAM is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with LSD-SLAM. If not, see <http://www.gnu.org/licenses/>.
*/
#include "util/globalFuncs.h"
#include "util/SophusUtil.h"
#include "opencv2/opencv.hpp"
#include "DataStructures/Frame.h"
namespace lsd_slam
{
SE3 SE3CV2Sophus(const cv::Mat &R, const cv::Mat &t)
{
Sophus::Matrix3f sR;
Sophus::Vector3f st;
for(int i=0;i<3;i++)
{
sR(0,i) = R.at<double>(0,i);
sR(1,i) = R.at<double>(1,i);
sR(2,i) = R.at<double>(2,i);
st[i] = t.at<double>(i);
}
return SE3(toSophus(sR.inverse()), toSophus(st));
}
void printMessageOnCVImage(cv::Mat &image, std::string line1,std::string line2)
{
for(int x=0;x<image.cols;x++)
for(int y=image.rows-30; y<image.rows;y++)
image.at<cv::Vec3b>(y,x) *= 0.5;
cv::putText(image, line2, cvPoint(10,image.rows-5),
CV_FONT_HERSHEY_SIMPLEX, 0.4, cv::Scalar(200,200,250), 1, 8);
cv::putText(image, line1, cvPoint(10,image.rows-18),
CV_FONT_HERSHEY_SIMPLEX, 0.4, cv::Scalar(200,200,250), 1, 8);
}
cv::Mat getDepthRainbowPlot(Frame* kf, int lvl)
{
return getDepthRainbowPlot(kf->idepth(lvl), kf->idepthVar(lvl), kf->image(lvl),
kf->width(lvl), kf->height(lvl));
}
cv::Mat getDepthRainbowPlot(const float* idepth, const float* idepthVar, const float* gray, int width, int height)
{
cv::Mat res = cv::Mat(height,width,CV_8UC3);
if(gray != 0)
{
cv::Mat keyFrameImage(height, width, CV_32F, const_cast<float*>(gray));
cv::Mat keyFrameImage8u;
keyFrameImage.convertTo(keyFrameImage8u, CV_8UC1);
cv::cvtColor(keyFrameImage8u, res, cv::COLOR_GRAY2RGB);
}
else
fillCvMat(&res,cv::Vec3b(255,170,168));
for(int i=0;i<width;i++)
for(int j=0;j<height;j++)
{
float id = idepth[i + j*width];
if(id >=0 && idepthVar[i + j*width] >= 0)
{
// rainbow between 0 and 4
float r = (0-id) * 255 / 1.0; if(r < 0) r = -r;
float g = (1-id) * 255 / 1.0; if(g < 0) g = -g;
float b = (2-id) * 255 / 1.0; if(b < 0) b = -b;
uchar rc = r < 0 ? 0 : (r > 255 ? 255 : r);
uchar gc = g < 0 ? 0 : (g > 255 ? 255 : g);
uchar bc = b < 0 ? 0 : (b > 255 ? 255 : b);
res.at<cv::Vec3b>(j,i) = cv::Vec3b(255-rc,255-gc,255-bc);
}
}
return res;
}
cv::Mat getVarRedGreenPlot(const float* idepthVar, const float* gray, int width, int height)
{
float* idepthVarExt = (float*)Eigen::internal::aligned_malloc(width*height*sizeof(float));
memcpy(idepthVarExt,idepthVar,sizeof(float)*width*height);
for(int i=2;i<width-2;i++)
for(int j=2;j<height-2;j++)
{
if(idepthVar[(i) + width*(j)] <= 0)
idepthVarExt[(i) + width*(j)] = -1;
else
{
float sumIvar = 0;
float numIvar = 0;
for(int dx=-2; dx <=2; dx++)
for(int dy=-2; dy <=2; dy++)
{
if(idepthVar[(i+dx) + width*(j+dy)] > 0)
{
float distFac = (float)(dx*dx+dy*dy)*(0.075*0.075)*0.02;
float ivar = 1.0f/(idepthVar[(i+dx) + width*(j+dy)] + distFac);
sumIvar += ivar;
numIvar += 1;
}
}
idepthVarExt[(i) + width*(j)] = numIvar / sumIvar;
}
}
cv::Mat res = cv::Mat(height,width,CV_8UC3);
if(gray != 0)
{
cv::Mat keyFrameImage(height, width, CV_32F, const_cast<float*>(gray));
cv::Mat keyFrameImage8u;
keyFrameImage.convertTo(keyFrameImage8u, CV_8UC1);
cv::cvtColor(keyFrameImage8u, res, cv::COLOR_GRAY2RGB);
}
else
fillCvMat(&res,cv::Vec3b(255,170,168));
for(int i=0;i<width;i++)
for(int j=0;j<height;j++)
{
float idv = idepthVarExt[i + j*width];
if(idv > 0)
{
float var= sqrt(idv);
var = var*60*255*0.5 - 20;
if(var > 255) var = 255;
if(var < 0) var = 0;
res.at<cv::Vec3b>(j,i) = cv::Vec3b(0,255-var, var);
}
}
Eigen::internal::aligned_free((void*)idepthVarExt);
return res;
}
}
Here is also some information what I have installed, that I thing might be helpful:
openCV 4.1.1-pre
ROS Melodic
Ubuntu 18.04
CMake 3.10.2
CatKin_tools 0.4.5
Python 3.6.8
Hope someone out there is able to help me.

Don't mix the obsolete C api with the C++ api.
Use cv::Point instead of cvPoint, and cv::FONT_HERSHEY_SIMPLEX instead of CV_FONT_HERSHEY_SIMPLEX, e.g.:
cv::putText(image, line1, cv::Point(10,image.rows-18),
cv::FONT_HERSHEY_SIMPLEX, 0.4, cv::Scalar(200,200,250), 1, 8);

Related

Panorama to Tiny Planet in OpenCV C++

I tried to convert panorama to tiny planet using C++ and OpenCV but the image result is noisy. I am not really sure which part I did wrong. I think it has something to do with color.
I tried to convert panorama to tiny planet using C++ and OpenCV but the image result is noisy. I am not really sure which part I did wrong. I think it has something to do with color.
Tutorial I referred to
http://codeofthedamned.com/index.php/the-little-planet-effect
Panorama source
Tiny image result
#import <opencv2/opencv.hpp>
#import <opencv2/imgcodecs/ios.h>
#import "OpenCVWrapper.h"
using namespace cv;
#implementation OpenCVWrapper
+ (UIImage*)createTinyPlanetFromImage: (UIImage*)image {
Mat pano;
UIImageToMat(image, pano);
Mat grayMat;
RenderProjection(pano, 1000.0, grayMat);
return MatToUIImage(grayMat);
}
void RenderProjection(Mat &pano, long len, Mat &output) {
const double k_pi = 3.1415926535897932384626433832795;
const double k_pi_inverse = 0.31830988618379067153776752674503;
output.create(len, len, CV_16UC3);
long half_len = len / 2;
cv::Size sz = pano.size();
for (long indexX = 0; indexX < len; ++indexX) {
for (long indexY = 0; indexY < len; ++indexY) {
double sphereX = (indexX - half_len) * 10.0 / len;
double sphereY = (indexY - half_len) * 10.0 / len;
double Qx, Qy, Qz;
if (GetIntersection(sphereX, sphereY, Qx, Qy, Qz)) {
double theta = std::acos(Qz);
double phi = std::atan2(Qy, Qx) + k_pi;
theta = theta * k_pi_inverse;
phi = phi * (0.5 * k_pi_inverse);
double Sx = min(sz.width -2.0, sz.width * phi);
double Sy = min(sz.height-2.0, sz.height * theta);
output.at<Vec3s>(int(indexY), int(indexX)) = BilinearSample(pano, Sx, Sy);
}
}
}
}
bool GetIntersection(double u, double v, double &x, double &y, double &z) {
double Nx = 0.0;
double Ny = 0.0;
double Nz = 1.0;
double dir_x = u - Nx;
double dir_y = v - Ny;
double dir_z = -1.0 - Nz;
double a = (dir_x * dir_x) + (dir_y * dir_y) + (dir_z * dir_z);
double b = (dir_x * Nx) + (dir_y * Ny) + (dir_z * Nz);
b *= 2;
double d = b * b;
double q = -0.5 * (b - std::sqrt(d));
double t = q / a;
x = (dir_x * t) + Nx;
y = (dir_y * t) + Ny;
z = (dir_z * t) + Nz;
return true;
}
Vec3s BilinearSample(Mat &image, double x, double y) {
Vec3s c00 = image.at<Vec3s>(int(y), int(x));
Vec3s c01 = image.at<Vec3s>(int(y), int(x) + 1);
Vec3s c10 = image.at<Vec3s>(int(y) + 1, int(x));
Vec3s c11 = image.at<Vec3s>(int(y) + 1, int(x) + 1);
double X0 = x - floor(x);
double X1 = 1.0 - X0;
double Y0 = y - floor(y);
double Y1 = 1.0 - Y0;
double w00 = X0 * Y0;
double w01 = X1 * Y0;
double w10 = X0 * Y1;
double w11 = X1 * Y1;
short r = short(c00[2] * w00 + c01[2] * w01
+ c10[2] * w10 + c11[2] * w11);
short g = short(c00[1] * w00 + c01[1] * w01
+ c10[1] * w10 + c11[1] * w11);
short b = short(c00[0] * w00 + c01[0] * w01
+ c10[0] * w10 + c11[0] * w11);
return make_BGR(b, g, r);
}
Vec3s make_BGR(short blue, short green, short red) {
Vec3s result;
result[0] = blue;
result[1] = green;
result[2] = red;
return result;
}
#end
There problem solved when I replaced UIImageToMat(image, pano); and MatToUIImage(grayMat); with this code and we can remove this header #import <opencv2/imgcodecs/ios.h>
static void UIImageToMat2(UIImage *image, cv::Mat &mat) {
assert(image.size.width > 0 && image.size.height > 0);
assert(image.CGImage != nil || image.CIImage != nil);
// Create a pixel buffer.
NSInteger width = image.size.width;
NSInteger height = image.size.height;
cv::Mat mat8uc4 = cv::Mat((int)height, (int)width, CV_8UC4);
// Draw all pixels to the buffer.
CGColorSpaceRef colorSpace = CGColorSpaceCreateDeviceRGB();
if (image.CGImage) {
// Render with using Core Graphics.
CGContextRef contextRef = CGBitmapContextCreate(mat8uc4.data, mat8uc4.cols, mat8uc4.rows, 8, mat8uc4.step, colorSpace, kCGImageAlphaPremultipliedLast | kCGBitmapByteOrderDefault);
CGContextDrawImage(contextRef, CGRectMake(0, 0, width, height), image.CGImage);
CGContextRelease(contextRef);
} else {
// Render with using Core Image.
static CIContext* context = nil; // I do not like this declaration contains 'static'. But it is for performance.
if (!context) {
context = [CIContext contextWithOptions:#{ kCIContextUseSoftwareRenderer: #NO }];
}
CGRect bounds = CGRectMake(0, 0, width, height);
[context render:image.CIImage toBitmap:mat8uc4.data rowBytes:mat8uc4.step bounds:bounds format:kCIFormatRGBA8 colorSpace:colorSpace];
}
CGColorSpaceRelease(colorSpace);
// Adjust byte order of pixel.
cv::Mat mat8uc3 = cv::Mat((int)width, (int)height, CV_8UC3);
cv::cvtColor(mat8uc4, mat8uc3, cv::COLOR_RGBA2BGR);
mat = mat8uc3;
}
and
static UIImage *MatToUIImage2(cv::Mat &mat) {
// Create a pixel buffer.
assert(mat.elemSize() == 1 || mat.elemSize() == 3);
cv::Mat matrgb;
if (mat.elemSize() == 1) {
cv::cvtColor(mat, matrgb, cv::COLOR_GRAY2RGB);
} else if (mat.elemSize() == 3) {
cv::cvtColor(mat, matrgb, cv::COLOR_BGR2RGB);
}
// Change a image format.
NSData *data = [NSData dataWithBytes:matrgb.data length:(matrgb.elemSize() * matrgb.total())];
CGColorSpaceRef colorSpace;
if (matrgb.elemSize() == 1) {
colorSpace = CGColorSpaceCreateDeviceGray();
} else {
colorSpace = CGColorSpaceCreateDeviceRGB();
}
CGDataProviderRef provider = CGDataProviderCreateWithCFData((__bridge CFDataRef)data);
CGImageRef imageRef = CGImageCreate(matrgb.cols, matrgb.rows, 8, 8 * matrgb.elemSize(), matrgb.step.p[0], colorSpace, kCGImageAlphaNone|kCGBitmapByteOrderDefault, provider, NULL, false, kCGRenderingIntentDefault);
UIImage *image = [UIImage imageWithCGImage:imageRef];
CGImageRelease(imageRef);
CGDataProviderRelease(provider);
CGColorSpaceRelease(colorSpace);
return image;
}

Hough Circular Transform

Im trying to implement Hough Transform using gradient direction. I know that there is an implementation in OpenCv but I want to do it myself.
I'm using Sobel to get the X and Y gradient. Then for every pixel the
magnitute ---> sqrt(sobelX^2 + sobelY^2)
directions --> atan2(sobelY,sobelX) * 180/PI
if the magnitude is higher then 220 (so almost black) this is the edge.
And then the direction is used on the circle equation.
But the results are not acceptable. Any help?
I know there are the cv::polar and cv::cartToPolar, but I want to optimize code so that all equations will be calculated on fly, no empty loops.
cv::Mat sobelX,sobelY;
Sobel(mat, sobelX, CV_32F, 1, 0, kernelSize, 1, 0, cv::BORDER_REPLICATE);
Sobel(mat, sobelY, CV_32F, 0, 1, kernelSize, 1, 0, cv::BORDER_REPLICATE);
//cv::Canny(mat,mat,100,200,kernelSize,false);
debug::showImage("sobelX",sobelX);
debug::showImage("SobelY",sobelY);
debug::showImage("MAT",mat);
cv::Mat magnitudeMap,angleMap;
magnitudeMap = cv::Mat::zeros(mat.rows,mat.cols,mat.type());
angleMap = cv::Mat::zeros(mat.rows,mat.cols,mat.type());
std::vector<cv::Mat> hough_spaces(max);
for(int i=0; i<max; ++i)
{
hough_spaces[i] = cv::Mat::zeros(mat.rows,mat.cols,mat.type());
}
for(int x=0; x<mat.rows; ++x)
{
for(int y=0; y<mat.cols; ++y)
{
const float magnitude = sqrt(sobelX.at<uchar>(x,y)*sobelX.at<uchar>(x,y)+sobelY.at<uchar>(x,y)*sobelY.at<uchar>(x,y));
const float theta= atan2(sobelY.at<uchar>(x,y),sobelX.at<uchar>(x,y)) * 180/CV_PI;
magnitudeMap.at<uchar>(x,y) = magnitude;
if(magnitude > 225)//mat.at<const uchar>(x,y) == 255)
{
for(int radius=min; radius<max; ++radius)
{
const int a = x - radius * cos(theta);//lookup::cosArray[static_cast<int>(theta)];//+ 0.5f;
const int b = y - radius * sin(theta);//lookup::sinArray[static_cast<int>(theta)]; //+ 0.5f;
if(a >= 0 && a <hough_spaces[radius].rows && b >= 0 && b<hough_spaces[radius].cols) {
hough_spaces[radius].at<uchar>(a,b)+=10;
}
}
}
}
}
debug::showImage("magnitude",magnitudeMap);
for(int radius=min; radius<max; ++radius)
{
double min_f,max_f;
cv::Point min_loc,max_loc;
cv::minMaxLoc(hough_spaces[radius],&min_f,&max_f,&min_loc,&max_loc);
if(max_f>=treshold)
{
circles.emplace_back(cv::Point3f(max_loc.x,max_loc.y,radius));
// debug::showImage(std::to_string(radius).c_str(),hough_spaces[radius]);
}
}
circles.shrink_to_fit();

Different results between self-made bgr2hsv and opencv bgr2hsv

I implemented bgr2hsv function by accessing pixels with OpenCV, C++.
I just coded it with bgr2hsv algorithm on the Internet.
And I compared my bgr2hsv() to cvtColor() results.
Actually, result images has a little different colors even though the original image was same. I tried to see why different but I couldn't find it.
Could you see the source codes and result images?
Here's the code.
//self-made bgr2hsv
double b, g, r;
double bb, gg, rr;
double tmax, tmin;
double h = 0, s = 0, v = 0;
double del, delB, delG, delR;
Mat image = imread("lena.jpg", 1);
Mat clone1 = image.clone();
Mat img;
image.convertTo(img, CV_64F);
for (int y = 0; y < img.rows; y++)
{
for (int x = 0; x < img.cols; x++)
{
b = image.at<Vec3b>(y, x)[0];
g = image.at<Vec3b>(y, x)[1];
r = image.at<Vec3b>(y, x)[2];
bb = b / 255;
gg = g / 255;
rr = r / 255;
tmax = _max(bb, gg, rr);
tmin = _min(bb, gg, rr);
v = tmax;
del = tmax - tmin;
if (del == 0) {
h = 0;
s = 0;
}
else {
s = del / tmax;
delB = ((tmax - b) / 6 + del / 2) / del;
delG = ((tmax - g) / 6 + del / 2) / del;
delR = ((tmax - r) / 6 + del / 2) / del;
if (b == tmax) {
h = (2 / 3) + delG - delR;
}
if (g == tmax) {
h = (1 / 3) + delR - delB;
}
if (r == tmax) {
h = delB - delG;
}
if (h < 0) h += 1;
if (h > 1) h -= 1;
}
img.at<Vec3d>(y, x)[0] = h;
img.at<Vec3d>(y, x)[1] = s;
img.at<Vec3d>(y, x)[2] = v;
}
}
//bgr2hsv with cvtColor
cvtColor(image,clone1,CV_BGR2HSV);
imwrite("implemented_hsv.jpg",clone1);
imwrite("bgr2hsv.jpg", img);
//show images
imshow("bgr2hsv", img);
imshow("implemented_hsv",clone1);
waitKey(0);
And results are here.
enter image description here
I wouldn't suggest grabbing something from the internet and expecting it to give you the correct result unless you understand what's going on. Instead of using this, why not just use the formula from the OpenCV docs?
For an example of this particular conversion, see my answer here. It uses the exact formula OpenCV mentions in the docs linked above for BGR to HSV conversion. It's in Python and not C++, but Python is fairly easy to read anyways.

Camera pose estimation from essential matrix

I try to estimate the camera motion from pair of images. I found essential matrix E and decomposed it into the rotation and translation elements.
Here is the C++ code:
cv::SVD svd(E);
cv::Matx33d W{0, -1, 0, 1, 0 , 0, 0, 0, 1};
cv::Mat_<double> R = svd.u * cv::Mat(W) * svd.vt;
cv::Mat_<double> t = svd.u.col(2);
if (!infrontOfBothCameras(inliers[0], inliers[1], R, t)) {
t = -svd.u.col(2);
if (!posEstimator.infrontOfBothCameras(inliers[0], inliers[1], R, t)) {
R = svd.u * cv::Mat(W.t()) * svd.vt;
t = svd.u.col(2);
if (!infrontOfBothCameras(inliers[0], inliers[1], R, t)) {
t = -svd.u.col(2);
if (!infrontOfBothCameras(inliers[0], inliers[1], R, t)) {
std::cout << "Incorrect SVD decomposition" << std::endl;
}
}
}
}
function infrontOfBothCameras check if points are in front of the camera.
bool infrontOfBothCameras(std::vector<cv::Point2f>& points1, std::vector<cv::Point2f>& points2, cv::Mat_<double>& R, cv::Mat_<double>& t) {
cv::Mat r1 = R.row(0);
cv::Mat r2 = R.row(1);
cv::Mat r3 = R.row(2);
for (size_t i = 0; i < points1.size(); ++i) {
cv::Matx13d uv{ points2[i].x, points2[i].y, 1 };
double z = (r1 - points2[i].x * r3).dot(t.t()) / ((r1 - points2[i].x * r3).dot(cv::Mat_<double>(uv)));
cv::Matx31d point3d_first{points1[i].x * z, points1[i].y * z, z};
cv::Mat_<double> point3d_second = R.t() * (cv::Mat_<double>(point3d_first) - t);
if (point3d_first(2) < 0 || point3d_second(2) < 0) {
return false;
}
}
return true;
}
After I wish to estimate new pose of camera. How I can use t and R for it?
For example, i have old pose of camera: old_pose=(0,0,0) and i try to calculate new pose:
new_pose = old_pose + R * t
Is it correct?
I believe it should be:
new_pose = R*(old_pose-t);
The rest looks ok, but I haven't checked every little detail.
If you want a reference to compare to, you can look at:
https://github.com/MasteringOpenCV/code/blob/master/Chapter4_StructureFromMotion/FindCameraMatrices.cpp
Specifically functions DecomposeEtoRandT and FindCameraMatrices

cvGetHistValue_1D is deprecated. What is to be used instead?

According to latest OpenCV (OpenCV 2.4.5) documentation, cvGetHistValue_1D has been deprecated from imgproc module, and is now part of the legacy module.
I would like to know what should be used instead of cvGetHistValue_1D if I do not plan to use the legacy module.
My previous code is as under, which needs to be rewritten without the use of cvGetHistValue_1D
CvHistogram *hist = cvCreateHist(1, &numBins, CV_HIST_ARRAY, ranges, 1);
cvClearHist(hist);
cvCalcHist(&oDepth,hist);
cvNormalizeHist(hist, 1.0f);
float *cumHist = new float[numBins];
cumHist[0] = *cvGetHistValue_1D(hist, 0);
for(int i = 1; i<numBins; i++)
{
cumHist[i] = cumHist[i-1] + *cvGetHistValue_1D(hist, i);
if (cumHist[i] > 0.95)
{
oMaxDisp = i;
break;
}
}
I am assuming you are interested in using the C++ API. Matrix element access is easily accomplished using cv::Mat::at().
Your code might then look like this:
cv::Mat image; //Already in memory
size_t oMaxDisp = 0;
cv::Mat hist;
//Setup the histogram parameters
const int channels = 0;
const int numBins = 256;
const float rangevals[2] = {0.f, 256.f};
const float* ranges = rangevals;
cv::calcHist(&image, 1, &channels, cv::noArray(), hist, 1, &numBins, &ranges);
cv::normalize(hist, hist,1,0,cv::NORM_L1);
float cumHist[numBins];
float sum = 0.f;
for (size_t i = 0; i < numBins; ++i)
{
float val = hist.at<float>(i);
sum += val;
cumHist[i] = sum;
if (cumHist[i] > 0.95)
{
oMaxDisp = i;
break;
}
}
As a side note, it's a good idea not to use new unless it is necessary.