I'm using OpenCV2.4.8.2 on Mac OS 10.9.5.
I have the following snippet of code:
static void compute_weights(const vector<Mat>& images, vector<Mat>& weights)
{
weights.clear();
for (int i = 0; i < images.size(); i++) {
Mat image = images[i];
Mat mask = Mat::zeros(image.size(), CV_32F);
int x_start = (i == 0) ? 0 : image.cols/2;
int y_start = 0;
int width = image.cols/2;
int height = image.rows;
Mat roi = mask(Rect(x_start,y_start,width,height)); // Set Roi
roi.setTo(1);
weights.push_back(mask);
}
}
static void blend(const vector<Mat>& inputImages, Mat& outputImage)
{
int maxPyrIndex = 6;
vector<Mat> weights;
compute_weights(inputImages, weights);
// Find the fused pyramid:
vector<Mat> fused_pyramid;
for (int i = 0; i < inputImages.size(); i++) {
Mat image = inputImages[i];
// Build Gaussian Pyramid for Weights
vector<Mat> weight_gaussian_pyramid;
buildPyramid(weights[i], weight_gaussian_pyramid, maxPyrIndex);
// Build Laplacian Pyramid for original image
Mat float_image;
inputImages[i].convertTo(float_image, CV_32FC3, 1.0/255.0);
vector<Mat> orig_guassian_pyramid;
vector<Mat> orig_laplacian_pyramid;
buildPyramid(float_image, orig_guassian_pyramid, maxPyrIndex);
for (int j = 0; j < orig_guassian_pyramid.size() - 1; j++) {
Mat sized_up;
pyrUp(orig_guassian_pyramid[j+1], sized_up, Size(orig_guassian_pyramid[j].cols, orig_guassian_pyramid[j].rows));
orig_laplacian_pyramid.push_back(orig_guassian_pyramid[j] - sized_up);
}
// Last Lapalcian layer is the same as the Gaussian layer
orig_laplacian_pyramid.push_back(orig_guassian_pyramid[orig_guassian_pyramid.size()-1]);
// Convolve laplacian original with guassian weights
vector<Mat> convolved;
for (int j = 0; j < maxPyrIndex + 1; j++) {
// Create 3 channels for weight gaussian pyramid as well
vector<Mat> gaussian_3d_vec;
for (int k = 0; k < 3; k++) {
gaussian_3d_vec.push_back(weight_gaussian_pyramid[j]);
}
Mat gaussian_3d;
merge(gaussian_3d_vec, gaussian_3d);
//Mat convolved_result = weight_gaussian_pyramid[j].clone();
Mat convolved_result = gaussian_3d.clone();
multiply(gaussian_3d, orig_laplacian_pyramid[j], convolved_result);
convolved.push_back(convolved_result);
}
if (i == 0) {
fused_pyramid = convolved;
} else {
for (int j = 0; j < maxPyrIndex + 1; j++) {
fused_pyramid[j] += convolved[j];
}
}
}
// Blending
for (int i = (int)fused_pyramid.size()-1; i > 0; i--) {
Mat sized_up;
pyrUp(fused_pyramid[i], sized_up, Size(fused_pyramid[i-1].cols, fused_pyramid[i-1].rows));
fused_pyramid[i-1] += sized_up;
}
Mat final_color_bgr;
fused_pyramid[0].convertTo(final_color_bgr, CV_32F, 255);
final_color_bgr.copyTo(outputImage);
imshow("final", outputImage);
waitKey(0);
imwrite(outputImagePath, outputImage);
}
This code is doing some basic pyramid blending for 2 images. The key issues are related to imshow and imwrite in the last line. They gave me drastically different results. I apologize for displaying such a long/messy code, but I am afraid this difference is coming from some other parts of the code that can subsequently affect the imshow and imwrite.
The first image shows the result from imwrite and the second image shows the result from imshow, based on the code given. I'm quite confused about why this is the case.
I also noticed that when I do these:
Mat float_image;
inputImages[i].convertTo(float_image, CV_32FC3, 1.0/255.0);
imshow("float image", float_image);
imshow("orig image", image);
They show exactly the same thing, that is they both show the same picture in the original rgb image (in image).
IMWRITE functionality
By default, imwrite, converts the input image into Only 8-bit (or 16-bit unsigned (CV_16U) in case of PNG, JPEG 2000, and TIFF) single-channel or 3-channel (with ‘BGR’ channel order) images can be saved using this function.
So whatever format you feed in for imwrite, it blindly converts into CV_8U with a range 0(black) - 255(white) in BGR format.
IMSHOW - problem
So when noticed your function, fused_pyramid[0].convertTo(final_color_bgr, CV_32F, 255); fused_pyramid is already under mat type 21 (floating point CV_32F). You tried to convert into floating point with a scale factor 255. This scaling factor 255 caused the problem # imshow. Instead to visualize, you can directly feed in fused_pyramid without conversion as already it is scaled to floating point between 0.0(black) - 1.0(white).
Hope it helps.
Related
I am learning image processing with OpenCV in C++. To implement a basic down-sampling algorithm I need to work on the pixel level -to remove rows and columns. However, when I assign values with mat.at<>(i,j) other values are assign - things like 1e-38.
Here is the code :
Mat src, dst;
src = imread("diw3.jpg", CV_32F);//src is a 479x359 grayscale image
//dst will contain src low-pass-filtered I checked by displaying it works fine
Mat kernel;
kernel = Mat::ones(3, 3, CV_32F) / (float)(9);
filter2D(src, dst, -1, kernel, Point(-1, -1), 0, BORDER_DEFAULT);
// Now I try to remove half the rows/columns result is stored in downsampled
Mat downsampled = Mat::zeros(240, 180, CV_32F);
for (int i =0; i<downsampled.rows; i ++){
for (int j=0; j<downsampled.cols; j ++){
downsampled.at<uchar>(i,j) = dst.at<uchar>(2*i,2*j);
}
}
Since I read here OpenCV outputing odd pixel values that for cout I needed to cast, I wrote downsampled.at<uchar>(i,j) = (int) before dst.at<uchar> but it does not work also.
The second argument to cv::imread is cv::ImreadModes, so the line:
src = imread("diw3.jpg", CV_32F);
is not correct; it should probably be:
cv::Mat src_8u = imread("diw3.jpg", cv::IMREAD_GRAYSCALE);
src_8u.convertTo(src, CV_32FC1);
which will read the image as 8-bit grayscale image, and will convert it to floating point values.
The loop should look something like this:
Mat downsampled = Mat::zeros(240, 180, CV_32FC1);
for (int i = 0; i < downsampled.rows; i++) {
for (int j = 0; j < downsampled.cols; j++) {
downsampled.at<float>(i,j) = dst.at<float>(2*i,2*j);
}
}
note that the argument to cv::Mat::zeros is CV_32FC1 (1 channel, with 32-bit floating values), so Mat::at<float> method should be used.
I am building a scanner feature for my app and binarize the photo of the document with OpenCV:
// convert to greyscale
cv::Mat converted, blurred, blackAndWhite;
converted = cv::Mat(inputMatrix.rows, inputMatrix.cols, CV_8UC1);
cv::cvtColor(inputMatrix, converted, CV_BGR2GRAY );
// remove noise
cv::GaussianBlur(converted, blurred, cvSize(3,3), 0);
// adaptive threshold
cv::adaptiveThreshold(blackAndWhite, blackAndWhite, 255, cv::ADAPTIVE_THRESH_GAUSSIAN_C, cv::THRESH_BINARY, 15 , 9);
The result is okay, but scans from different scanner apps are much better. Especially very small, tiny sized text is much better:
Processed with opencv
Scanned With DropBox
What can I do, to improve my result?
May be the apps are using anti-aliasing to make their binarized output look nicer. To obtain a similar effect, I first tried binarizing the image, but the result didn't look very nice with all the jagged edges. Then I applied pyramid upsampling and then downsampling to the result, and the output was better.
I didn't use adaptive thresholding however. I segmented the text-like regions and processed those regions only, then pasted them to form the final images. It is a kind of local thresholding using the Otsu method or the k-means (using combinations of thr_roi_otsu, thr_roi_kmeans and proc_parts in the code). Below are some results.
Apply Otsu threshold to all text regions, then upsample followed by downsample:
Some text:
Full image:
Upsample input image, apply Otsu threshold to individual text regions, downsample the result:
Some text:
Full image:
/*
apply Otsu threshold to the region in mask
*/
Mat thr_roi_otsu(Mat& mask, Mat& im)
{
Mat bw = Mat::ones(im.size(), CV_8U) * 255;
vector<unsigned char> pixels(countNonZero(mask));
int index = 0;
for (int r = 0; r < mask.rows; r++)
{
for (int c = 0; c < mask.cols; c++)
{
if (mask.at<unsigned char>(r, c))
{
pixels[index++] = im.at<unsigned char>(r, c);
}
}
}
// threshold pixels
threshold(pixels, pixels, 0, 255, CV_THRESH_BINARY | CV_THRESH_OTSU);
// paste pixels
index = 0;
for (int r = 0; r < mask.rows; r++)
{
for (int c = 0; c < mask.cols; c++)
{
if (mask.at<unsigned char>(r, c))
{
bw.at<unsigned char>(r, c) = pixels[index++];
}
}
}
return bw;
}
/*
apply k-means to the region in mask
*/
Mat thr_roi_kmeans(Mat& mask, Mat& im)
{
Mat bw = Mat::ones(im.size(), CV_8U) * 255;
vector<float> pixels(countNonZero(mask));
int index = 0;
for (int r = 0; r < mask.rows; r++)
{
for (int c = 0; c < mask.cols; c++)
{
if (mask.at<unsigned char>(r, c))
{
pixels[index++] = (float)im.at<unsigned char>(r, c);
}
}
}
// cluster pixels by gray level
int k = 2;
Mat data(pixels.size(), 1, CV_32FC1, &pixels[0]);
vector<float> centers;
vector<int> labels(countNonZero(mask));
kmeans(data, k, labels, TermCriteria(CV_TERMCRIT_EPS+CV_TERMCRIT_ITER, 10, 1.0), k, KMEANS_PP_CENTERS, centers);
// examine cluster centers to see which pixels are dark
int label0 = centers[0] > centers[1] ? 1 : 0;
// paste pixels
index = 0;
for (int r = 0; r < mask.rows; r++)
{
for (int c = 0; c < mask.cols; c++)
{
if (mask.at<unsigned char>(r, c))
{
bw.at<unsigned char>(r, c) = labels[index++] != label0 ? 255 : 0;
}
}
}
return bw;
}
/*
apply procfn to each connected component in the mask,
then paste the results to form the final image
*/
Mat proc_parts(Mat& mask, Mat& im, Mat (procfn)(Mat&, Mat&))
{
Mat tmp = mask.clone();
vector<vector<Point>> contours;
vector<Vec4i> hierarchy;
findContours(tmp, contours, hierarchy, CV_RETR_CCOMP, CV_CHAIN_APPROX_SIMPLE, Point(0, 0));
Mat byparts = Mat::ones(im.size(), CV_8U) * 255;
for(int idx = 0; idx >= 0; idx = hierarchy[idx][0])
{
Rect rect = boundingRect(contours[idx]);
Mat msk = mask(rect);
Mat img = im(rect);
// process the rect
Mat roi = procfn(msk, img);
// paste it to the final image
roi.copyTo(byparts(rect));
}
return byparts;
}
int _tmain(int argc, _TCHAR* argv[])
{
Mat im = imread("1.jpg", 0);
// detect text regions
Mat morph;
Mat kernel = getStructuringElement(MORPH_ELLIPSE, Size(3, 3));
morphologyEx(im, morph, CV_MOP_GRADIENT, kernel, Point(-1, -1), 1);
// prepare a mask for text regions
Mat bw;
threshold(morph, bw, 0, 255, THRESH_BINARY | THRESH_OTSU);
morphologyEx(bw, bw, CV_MOP_DILATE, kernel, Point(-1, -1), 10);
Mat bw2x, im2x;
pyrUp(bw, bw2x);
pyrUp(im, im2x);
// apply Otsu threshold to all text regions, then upsample followed by downsample
Mat otsu1x = thr_roi_otsu(bw, im);
pyrUp(otsu1x, otsu1x);
pyrDown(otsu1x, otsu1x);
// apply k-means to all text regions, then upsample followed by downsample
Mat kmeans1x = thr_roi_kmeans(bw, im);
pyrUp(kmeans1x, kmeans1x);
pyrDown(kmeans1x, kmeans1x);
// upsample input image, apply Otsu threshold to all text regions, downsample the result
Mat otsu2x = thr_roi_otsu(bw2x, im2x);
pyrDown(otsu2x, otsu2x);
// upsample input image, apply k-means to all text regions, downsample the result
Mat kmeans2x = thr_roi_kmeans(bw2x, im2x);
pyrDown(kmeans2x, kmeans2x);
// apply Otsu threshold to individual text regions, then upsample followed by downsample
Mat otsuparts1x = proc_parts(bw, im, thr_roi_otsu);
pyrUp(otsuparts1x, otsuparts1x);
pyrDown(otsuparts1x, otsuparts1x);
// apply k-means to individual text regions, then upsample followed by downsample
Mat kmeansparts1x = proc_parts(bw, im, thr_roi_kmeans);
pyrUp(kmeansparts1x, kmeansparts1x);
pyrDown(kmeansparts1x, kmeansparts1x);
// upsample input image, apply Otsu threshold to individual text regions, downsample the result
Mat otsuparts2x = proc_parts(bw2x, im2x, thr_roi_otsu);
pyrDown(otsuparts2x, otsuparts2x);
// upsample input image, apply k-means to individual text regions, downsample the result
Mat kmeansparts2x = proc_parts(bw2x, im2x, thr_roi_kmeans);
pyrDown(kmeansparts2x, kmeansparts2x);
return 0;
}
I would like to know how to remove the black border from the following frame in OpenCV using C++
Original Image
Result
Any help would be really appreciated.
To remove some non-black noise I recommend using cv::threshold and morphology closing. Then you can just remove rows and columns which contains (for example) more than 5% non-black pixels.
I tried following code and it works for your example:
int main()
{
const int threshVal = 20;
const float borderThresh = 0.05f; // 5%
cv::Mat img = cv::imread("img.jpg", cv::IMREAD_GRAYSCALE);
cv::Mat thresholded;
cv::threshold(img, thresholded, threshVal, 255, cv::THRESH_BINARY);
cv::morphologyEx(thresholded, thresholded, cv::MORPH_CLOSE,
cv::getStructuringElement(cv::MORPH_RECT, cv::Size(3, 3)),
cv::Point(-1, -1), 2, cv::BORDER_CONSTANT, cv::Scalar(0));
cv::imshow("thresholded", thresholded);
cv::Point tl, br;
for (int row = 0; row < thresholded.rows; row++)
{
if (cv::countNonZero(thresholded.row(row)) > borderThresh * thresholded.cols)
{
tl.y = row;
break;
}
}
for (int col = 0; col < thresholded.cols; col++)
{
if (cv::countNonZero(thresholded.col(col)) > borderThresh * thresholded.rows)
{
tl.x = col;
break;
}
}
for (int row = thresholded.rows - 1; row >= 0; row--)
{
if (cv::countNonZero(thresholded.row(row)) > borderThresh * thresholded.cols)
{
br.y = row;
break;
}
}
for (int col = thresholded.cols - 1; col >= 0; col--)
{
if (cv::countNonZero(thresholded.col(col)) > borderThresh * thresholded.rows)
{
br.x = col;
break;
}
}
cv::Rect roi(tl, br);
cv::Mat cropped = img(roi);
cv::imwrite("cropped.jpg", cropped);
return 0;
}
Please note that in order to get the best results on all your samples you may need to adjust some parameters: threshVal and borderThresh.
Also you may want to read good tutorials about thresholding and morphology transformations.
From akarsakov's answer. His will crop out the black parts of the input image. But, it will write this cropped image in grayscale. If you are after colour try changing and adding these lines.
#include "opencv2/opencv.hpp"
using namespace cv;
// Read your input image
Mat img = imread("img.jpg");
// Prepare new grayscale image
Mat input_img_gray;
// Convert to img to Grayscale
cvtColor (img, input_img_gray, CV_RGB2GRAY);
Mat thresholded;
// Threshold uses grayscale image
threshold(input_img_gray, thresholded, threshVal, 255, cv::THRESH_BINARY);
I'd recommend ticking akarsakov's answer because it definitely works. This is just for anyone looking to output a coloured image :)
I just realised that there is nothing on the web, after much searching about how to access a pixel's intensity value in OpenCv. A grayscale image.
Most online searches are about how to access BGR values of a colour image, like this one: Accessing certain pixel RGB value in openCV
image.at<> is basically for 3 channels, namely the BGR, out of curiousity, is there another similar method from OpenCV of accessing a certain pixel value of a grayscale image?
You can use image.at<uchar>(j,i) to acces a pixel value of a grayscale image.
cv::Mat::at<>() function is for every type of image, whether it is a single channel image or multi-channel image. The type of value returned just depends on the template argument provided to the function.
The value of grayscale image can be accessed like this:
//For 8-bit grayscale image.
unsigned char value = image.at<unsigned char>(row, column);
Make sure to return the correct data type depending on the image type (8u, 16u, 32f etc.).
For IplImage* image, you can use
uchar intensity = CV_IMAGE_ELEM(image, uchar, y, x);
For Mat image, you can use
uchar intensity = image.at<uchar>(y, x);
at(y,x)]++;
for(int i = 0; i < 256; i++)
cout<<histogram[i]<<" ";
// draw the histograms
int hist_w = 512; int hist_h = 400;
int bin_w = cvRound((double) hist_w/256);
Mat histImage(hist_h, hist_w, CV_8UC1, Scalar(255, 255, 255));
// find the maximum intensity element from histogram
int max = histogram[0];
for(int i = 1; i < 256; i++){
if(max < histogram[i]){
max = histogram[i];
}
}
// normalize the histogram between 0 and histImage.rows
for(int i = 0; i < 255; i++){
histogram[i] = ((double)histogram[i]/max)*histImage.rows;
}
// draw the intensity line for histogram
for(int i = 0; i < 255; i++)
{
line(histImage, Point(bin_w*(i), hist_h),
Point(bin_w*(i), hist_h - histogram[i]),
Scalar(0,0,0), 1, 8, 0);
}
// display histogram
namedWindow("Intensity Histogram", CV_WINDOW_AUTOSIZE);
imshow("Intensity Histogram", histImage);
namedWindow("Image", CV_WINDOW_AUTOSIZE);
imshow("Image", image);
waitKey();
return 0;
}
I'm working on a face recognition project and I am having problems when projecting on PCA subspace.
When I pass a mat vector to my funcion with the resized images, I project them, and then I reconstruct them to verify it's working well, but all I have in "Cam" window is a grey image (all same color).
I don't know what I am doing bad.
This is the function:
void doPCA (const vector<Mat>& images)
{
int nEigens = images.size()-1;
Mat data (images.size(), images[0].rows*images[0].cols, images[0].type() );
for (int i = 0; i < images.size(); i++)
{
Mat aux = data.row(i);
images[i].reshape(1,1).copyTo(aux);
}
PCA pca(data,Mat(),CV_PCA_DATA_AS_ROW,nEigens);
//Project images
Mat dataprojected(data.rows, nEigens, CV_32FC1) ;
for(int i=0; i<images.size(); i++)
{
pca.project(data.row(i), dataprojected.row(i));
}
//Backproject to reconstruct images
Mat datareconstructed (data.rows, data.cols, data.type());
for(int i=0; i<images.size(); i++)
{
pca.backProject (dataprojected.row(i), datareconstructed.row(i) );
}
for(int i=0; i<images.size(); i++)
{
imshow ("Cam", datareconstructed.row(i).reshape(1,images[0].rows) );
waitKey();
}
}
I think this post is a duplicate of:
PCA + SVM using C++ Syntax in OpenCV 2.2
Ah, I have found the error in your code. When you create the data matrix you do:
images[i].reshape(1,1).copyTo(aux);
You have to use convertTo to convert the data into the correct type and copy it to your data matrix:
images[i].reshape(1,1).convertTo(aux, CV_32FC1, 1/255.);
Then the normalized eigenvectors should be ok. And don't forget to to normalize the values between 0 and 255 before displaying them, you can use cv::normalize to do this, here's a simple function for turning it into grayscale:
Mat toGrayscale(const Mat& src) {
Mat srcnorm;
cv::normalize(src, srcnorm, 0, 255, NORM_MINMAX, CV_8UC1);
return srcnorm;
}
You may want to look at the example in my blog:
http://bytefish.de/blog/pca_in_opencv#simple_example