I have an image 800x800 which is broken down to 16 blocks of 200x200.
(you can see previous post here)
These blocks are : vector<Mat> subImages;
I want to use float pointers on them , so I am doing :
float *pdata = (float*)( subImages[ idxSubImage ].data );
1) Now, I want to be able to get again the same images/blocks, going from float array to Mat data.
int Idx = 0;
pdata = (float*)( subImages[ Idx ].data );
namedWindow( "Display window", WINDOW_AUTOSIZE );
for( int i = 0; i < OriginalImgSize.height - 4; i+= 200 )
{
for( int j = 0; j < OriginalImgSize.width - 4; j+= 200, Idx++ )
{
Mat mf( i,j, CV_32F, pdata + 200 );
imshow( "Display window", mf );
waitKey(0);
}
}
So , the problem is that I am receiving an
OpenCV Error: Assertion failed
in imshow.
2) How can I recombine all the blocks to obtain the original 800x800 image?
I tried something like:
int Idx = 0;
pdata = (float*)( subImages[ Idx ].data );
Mat big( 800,800,CV_32F );
for( int i = 0; i < OriginalImgSize.height - 4; i+= 200 )
{
for( int j = 0; j < OriginalImgSize.width - 4; j+= 200, Idx++ )
{
Mat mf( i,j, CV_32F, pdata + 200 );
Rect roi(j,i,200,200);
mf.copyTo( big(roi) );
}
}
imwrite( "testing" , big );
This gives me :
OpenCV Error: Assertion failed (!fixedSize()) in release
in mf.copyTo( big(roi) );.
First, you need to know where are your subimages into the big image. To do this, you can save the rect of each subimage into the vector<Rect> smallImageRois;
Then you can use pointers (keep in mind that subimages are not continuous), or simply use copyTo to the correct place:
Have a look:
#include <opencv2\opencv.hpp>
#include <vector>
using namespace std;
using namespace cv;
int main()
{
Mat3b img = imread("path_to_image");
resize(img, img, Size(800, 800));
Mat grayImg;
cvtColor(img, grayImg, COLOR_BGR2GRAY);
grayImg.convertTo(grayImg, CV_32F);
int N = 4;
if (((grayImg.rows % N) != 0) || ((grayImg.cols % N) != 0))
{
// Error
return -1;
}
Size graySize = grayImg.size();
Size smallSize(grayImg.cols / N, grayImg.rows / N);
vector<Mat> smallImages;
vector<Rect> smallImageRois;
for (int i = 0; i < graySize.height; i += smallSize.height)
{
for (int j = 0; j < graySize.width; j += smallSize.width)
{
Rect rect = Rect(j, i, smallSize.width, smallSize.height);
smallImages.push_back(grayImg(rect));
smallImageRois.push_back(rect);
}
}
// Option 1. Using pointer to subimage data.
Mat big1(800, 800, CV_32F);
int big1step = big1.step1();
float* pbig1 = big1.ptr<float>(0);
for (int idx = 0; idx < smallImages.size(); ++idx)
{
float* pdata = (float*)smallImages[idx].data;
int step = smallImages[idx].step1();
Rect roi = smallImageRois[idx];
for (int i = 0; i < smallSize.height; ++i)
{
for (int j = 0; j < smallSize.width; ++j)
{
pbig1[(roi.y + i) * big1step + (roi.x + j)] = pdata[i * step + j];
}
}
}
// Option 2. USing copyTo
Mat big2(800, 800, CV_32F);
for (int idx = 0; idx < smallImages.size(); ++idx)
{
smallImages[idx].copyTo(big2(smallImageRois[idx]));
}
return 0;
}
For concatenating the sub-images into a single squared image, you can use the following function:
// Important: all patches should have exactly the same size
Mat concatPatches(vector<Mat> &patches) {
assert(patches.size() > 0);
// make it square
const int patch_width = patches[0].cols;
const int patch_height = patches[0].rows;
const int patch_stride = ceil(sqrt(patches.size()));
Mat image = Mat::zeros(patch_stride * patch_height, patch_stride * patch_width, patches[0].type());
for (size_t i = 0, iend = patches.size(); i < iend; i++) {
Mat &patch = patches[i];
const int offset_x = (i % patch_stride) * patch_width;
const int offset_y = (i / patch_stride) * patch_height;
// copy the patch to the output image
patch.copyTo(image(Rect(offset_x, offset_y, patch_width, patch_height)));
}
return image;
}
It takes a vector of sub-images (or patches as I refer them to) and concatenates them into a squared image. Example usage:
vector<Mat> patches;
vector<Scalar> colours = {Scalar(255, 0, 0), Scalar(0, 255, 0), Scalar(0, 0, 255)};
// fill vector with circles of different colours
for(int i = 0; i < 16; i++) {
Mat patch = Mat::zeros(100,100, CV_32FC3);
circle(patch, Point(50,50), 40, colours[i % 3], -1);
patches.push_back(patch);
}
Mat img = concatPatches(patches);
imshow("img", img);
waitKey();
Will produce the following image
print the values of i and j before creating Mat mf and I believe you will soon be able to find the error.
Hint 1: i and j will be 0 the first time
Hint 2: Use the copyTo() with a ROI like:
cv::Rect roi(0,0,200,200);
src.copyTo(dst(roi))
Edit:
Hint 3: Try not to do such pointer fiddling, you will get in trouble. Especially if you're ignoring the step (like you seem to do).
Related
I am trying to convert RGB image to gray scale using average method. But the output that is get is different from the desired output. I'm taking the image and getting the rgb values. I perform average operation and store the averaged and another array of same size of the image. Finally i'm converting the array to Mat and displaying the image.
Input image:
Desired output:
My output:
int main()
{
Mat image;
image =imread("<image_path>");
int rows=image.rows;
int cols=image.cols;
int myArray[rows][cols];
for (int i = 0; i < rows; i++) {
for (int j = 0; j < cols; j++) {
myArray[i][j] = 0;
}
}
uint8_t* pixelPtr = (uint8_t*)image.data;
int cn = image.channels();
Scalar_<uint8_t> bgrPixel;
for(int i = 0; i < rows; i++)
{
for(int j = 0; j < cols; j++)
{
bgrPixel.val[0] = pixelPtr[i*image.cols*cn + j*cn + 0]; // B
bgrPixel.val[1] = pixelPtr[i*image.cols*cn + j*cn + 1]; // G
bgrPixel.val[2] = pixelPtr[i*image.cols*cn + j*cn + 2]; // R
int average = (bgrPixel.val[0]+bgrPixel.val[1]+bgrPixel.val[2])/3;
myArray[i][j]=average;
}
}
Mat averaged_image(Size(rows, cols), CV_8UC3, myArray, Mat::AUTO_STEP);
imwrite("<path to save the image>",averaged_image);
imshow("averaged_image",averaged_image);
waitKey(0);
return 0;
}
When creating Mat averaged_image,
Mat averaged_image(Size(rows, cols), CV_8UC3, myArray, Mat::AUTO_STEP);
you need to use CV_32S not CV_8UC3 because your array element is not three chars, it's one 32-bit int.
You can also use the function cvtColor:
cv::Mat gray;
cv::cvtColor(image, gray, CV_BGR2GRAY);
Bonus: this function does correct weighting of the channels, because simple averaging may not be the right thing to do.
My code shows me values that are not accurate and i am not sure what else to try. My goal is to get the values of y such as rows, so that I can read the image and put it in an array. Ive looked at examples and Stack Overflow is literally my last option.
#include<iostream>
#include<opencv2/highgui/highgui.hpp>
#include<opencv2/imgproc/imgproc.hpp>
using namespace std;
using namespace cv;
int main()
{
Mat Rgb;
Mat Grey;
Mat image;
//Mat Histogram;
Rgb = imread("license.jpg", WINDOW_AUTOSIZE);
cvtColor(Rgb, Grey, cv::COLOR_BGR2GRAY);
threshold(Grey, image, 150, 250, THRESH_BINARY);
int histogram[255];
for (int i = 0; i < 255; i++)
{
histogram[i] = 0;
}
for (int y = 0; y < image.rows; y++)
//for (int x = 0; x < image.cols; x++)
histogram[(int)image.at<uchar>(y)]++;
//histogram[(int)image.at<uchar>(y, x)]++;
for (int i = 0; i < 255; i++)
cout << histogram[i] << " ";
// draw the histograms
int hist_w = 512; int hist_h = 400;
int bin_w = cvRound((double)hist_w / 255);
Mat histImage(hist_h, hist_w, CV_8UC1, Scalar(255, 255, 255));
int max = histogram[0];
for (int i = 1; i < 256; i++) {
if (max < histogram[i]) {
max = histogram[i];
}
}
for (int i = 0; i < 255; i++) {
histogram[i] = ((double)histogram[i] / max)*histImage.rows;
}
for (int i = 0; i < 255; i++)
{
line(histImage, Point(bin_w*(i), hist_h),
Point(bin_w*(i), hist_h - histogram[i]),
Scalar(0, 0, 0), 1, 8, 0);
}
imshow("Image", image);
waitKey(0);
cv::destroyAllWindows();
return 0;
}
Results have numbers like 319 and other values and I am only looking to get 0 or 255
I want to use connected component algorithm for object detection.I can use this algorithm on full image but I want to implementation connected component for a part of image.for example the size of my image is 760*520 and I want to implementation this algorithm on a square with size (350,270,60,60).this is a part of my code: `Mat image;
Mat stat, centroid;
int threshval = 100;
static void on_trackbar(int, void*) {
Mat bw = threshval < 128 ? (image < threshval) : (image > threshval);
Mat labelImage(image.size(), CV_32S);
int nLabels = connectedComponentsWithStats(bw, labelImage, stat, centroid, 8);
std::vector<Vec3b> colors(nLabels);
colors[0] = Vec3b(0, 0, 0); // Background
for (int label = 1; label < nLabels; ++label) {
colors[label] = Vec3b((rand() & 255), (rand() & 255), (rand() & 255));
at dst(image.size(), CV_8UC3);
for (int r = 0; r < dst.rows; ++r) {
for (int c = 0; c < dst.cols; ++c) {
int label = labelImage.at<int>(r, c);
Vec3b &pixel = dst.at<Vec3b>(r, c);
pixel = colors[label];
}
imshow("Connected Components", dst);
}
}
}
Except use image(cv::Rect(350, 270, 60, 60)) Instead of** image**,do you have any idea to help me?I'm beginner in opencv and c++.thanks a lot...
I have some problems when using Stitcher class.
First, I use ORB Feature Finder because it's faster than SURF.
but it's still slow.
Second, Stitcher class accuracy is too low.
Third, How can I get more performance by using Stitcher class?
Additional, How can I catch directions between two images?
This is my code.
Thank you.
#include "opencv2/core/core.hpp"
#include "opencv2/highgui/highgui.hpp"
#include "opencv2/stitching.hpp"
#include "opencv2/features2d.hpp"
using namespace cv;
using namespace std;
void overlayImage(const cv::Mat &background, const cv::Mat &foreground, cv::Mat &output, cv::Point2i location);
int main(int argc, char* argv[])
{
Mat first;
Mat second;
Mat m_first;
Mat m_second;
vector<Mat> images;
// vector<Mat> re_images;
Mat panorama;
Mat result;
unsigned long t;
t = getTickCount();
first = imread(argv[1], CV_LOAD_IMAGE_COLOR);
second = imread(argv[2], CV_LOAD_IMAGE_COLOR);
//Mat m_first = Mat::zeros( first.size(), first.type() );
//Mat m_second = Mat::zeros( second.size(), second.type() );
/*
for( int y = 0; y < first.rows; y++ ) {
for( int x = 0; x < first.cols; x++ ) {
for( int c = 0; c < 3; c++ ) {
m_first.at<Vec3b>(y,x)[c] = saturate_cast<uchar>( 1.2*( first.at<Vec3b>(y,x)[c] ) + 20 );
}
}
}
for( int y = 0; y < second.rows; y++ ){
for( int x = 0; x < second.cols; x++ ) {
for( int c = 0; c < 3; c++ ) {
m_second.at<Vec3b>(y,x)[c] =
saturate_cast<uchar>( 1.2*( second.at<Vec3b>(y,x)[c] ) + 20 );
}
}
}
*/
//imwrite("first.png", m_first);
//imwrite("second.png", m_second);
resize(first, m_first, Size(640, 480));
resize(second, m_second, Size(640, 480));
images.push_back(m_first);
images.push_back(m_second);
Stitcher stitcher = Stitcher::createDefault(false);
//Stitcher::Status status = stitcher.stitch(imgs, pano);
//stitcher.setWarper(new PlaneWarper());
stitcher.setWarper(new SphericalWarper());
// stitcher.setWarper(new CylindricalWarper());
stitcher.setFeaturesFinder(new detail::OrbFeaturesFinder(Size(3,1),1500));
// stitcher.setRegistrationResol(0.6);
// stitcher.setSeamEstimationResol(0.1);
// stitcher.setCompositingResol(0.5);
//stitcher.setPanoConfidenceThresh(1);
stitcher.setWaveCorrection(true);
stitcher.setWaveCorrectKind(detail::WAVE_CORRECT_HORIZ);
stitcher.setFeaturesMatcher(new detail::BestOf2NearestMatcher(false,0.3));
stitcher.setBundleAdjuster(new detail::BundleAdjusterRay());
stitcher.setBlender(new detail::MultiBandBlender());
stitcher.stitch(images, panorama);
printf("%.2lf sec \n", (getTickCount() - t) / getTickFrequency() );
Rect rect(panorama.cols / 2 - 320, panorama.rows / 2 - 240, 640, 480);
Mat subimage = panorama(rect);
Mat car = imread("car.png");
overlayImage(subimage, car, result, cv::Point(320 - (car.cols / 2), 240 - (car.rows / 2 )));
imshow("panorama", result);
// resize(panorama, result, Size(640, 480));
imwrite("result.jpg", result);
waitKey(0);
return 0;
}
void overlayImage(const cv::Mat &background, const cv::Mat &foreground, cv::Mat &output, cv::Point2i location)
{
background.copyTo(output);
// start at the row indicated by location, or at row 0 if location.y is negative.
for(int y = std::max(location.y , 0); y < background.rows; ++y)
{
int fY = y - location.y; // because of the translation
// we are done of we have processed all rows of the foreground image.
if(fY >= foreground.rows)
break;
// start at the column indicated by location,
// or at column 0 if location.x is negative.
for(int x = std::max(location.x, 0); x < background.cols; ++x)
{
int fX = x - location.x; // because of the translation.
// we are done with this row if the column is outside of the foreground image.
if(fX >= foreground.cols)
break;
// determine the opacity of the foregrond pixel, using its fourth (alpha) channel.
double opacity =
((double)foreground.data[fY * foreground.step + fX * foreground.channels() + 3])
/ 255.;
// and now combine the background and foreground pixel, using the opacity,
// but only if opacity > 0.
for(int c = 0; opacity > 0 && c < output.channels(); ++c)
{
unsigned char foregroundPx =
foreground.data[fY * foreground.step + fX * foreground.channels() + c];
unsigned char backgroundPx =
background.data[y * background.step + x * background.channels() + c];
output.data[y*output.step + output.channels()*x + c] =
backgroundPx * (1.-opacity) + foregroundPx * opacity;
}
}
}
}
FAST feature detector is faster than SURF and ORB.
Moreover, finding 1500 features in a 640*480 picture takes too much time. 300 features is ok. So you can use this code instead:
detail::OrbFeaturesFinder(Size(3,1),300));
Stitcher Class is so slow. I suggest you try to implement stitcher class yourself. Try using feature detectors, descriptors, then matching and after that find homography then making mask and then warping.
I don't understand your third question, "How can I catch directions between two images?". What do you mean exactly?
Hi everyone i tried using kmeans clustering to group the objects. So that i can use this clustering method to detect objects. I get output but the problem is its too slow{How can i solve this?? } and i get the output window is as shown in the below link. Three output images are displayed instead of one how can i solve this. I don't know where exactly the error lies.
http://tinypic.com/view.php?pic=30bd7dc&s=8#.VgkSIPmqqko
#include "opencv2/imgproc/imgproc.hpp"
#include "opencv2/highgui/highgui.hpp"
#include <iostream>
using namespace cv;
using namespace std;
int main( )
{
Mat src = imread( "Light.jpg", 0 );
// imshow("fff",src);
// cvtColor(src,src,COLOR_BGR2GRAY);
Mat dst;
// pyrDown(src,src,Size( src.cols/2, src.rows/2 ),4);
// src=dst;
resize(src,src,Size(128,128),0,0,1);
Mat samples(src.rows * src.cols, 3, CV_32F);
for( int y = 0; y < src.rows; y++ )
for( int x = 0; x < src.cols; x++ )
// for( int z = 0; z < 3; z++)
samples.at<float>(y + x*src.rows) = src.at<uchar>(y,x);
cout<<"aaa"<<endl;
int clusterCount = 15;
Mat labels;
int attempts = 2;
Mat centers;
cout<<"aaa"<<endl;
kmeans(samples, clusterCount, labels, TermCriteria(CV_TERMCRIT_ITER|CV_TERMCRIT_EPS, 10000, 0.0001), attempts, KMEANS_PP_CENTERS, centers );
Mat new_image( src.size(), src.type() );
cout<<"aaa"<<endl;
for( int y = 0; y < src.rows; y++ )
for( int x = 0; x < src.cols; x++ )
{
int cluster_idx = labels.at<int>(y + x*src.rows,0);
new_image.at<uchar>(y,x) = centers.at<float>(cluster_idx,0);
//new_image.at<Vec3b>(y,x)[1] = centers.at<float>(cluster_idx, 1);
// new_image.at<Vec3b>(y,x)[2] = centers.at<float>(cluster_idx, 2);
}
imshow( "clustered image", new_image );
waitKey( 0 );
}
In your initial code you have to change the intermedia Mat sample from 3 channels to 1 channel if you use grayscale images.
In addition, if you change the memory ordering, it might be faster (changed to (y*src.cols + x, 0) in both places):
int main( )
{
clock_t start = clock();
Mat src = imread( "Light.jpg", 0 );
Mat dst;
resize(src,src,Size(128,128),0,0,1);
Mat samples(src.rows * src.cols, 1, CV_32F);
for( int y = 0; y < src.rows; y++ )
for( int x = 0; x < src.cols; x++ )
samples.at<float>(y*src.cols + x, 0) = src.at<uchar>(y,x);
int clusterCount = 15;
Mat labels;
int attempts = 2;
Mat centers;
kmeans(samples, clusterCount, labels, TermCriteria(CV_TERMCRIT_ITER|CV_TERMCRIT_EPS, 10000, 0.0001), attempts, KMEANS_PP_CENTERS, centers );
Mat new_image( src.size(), src.type() );
for( int y = 0; y < src.rows; y++ )
for( int x = 0; x < src.cols; x++ )
{
int cluster_idx = labels.at<int>(y*src.cols + x,0);
new_image.at<uchar>(y,x) = centers.at<float>(cluster_idx,0);
}
imshow( "clustered image", new_image );
clock_t end = clock();
std::cout << "time: " << (end - start)/(float)CLOCKS_PER_SEC << std::endl;
waitKey( 0 );
}