I am trying to convert this second answer code into c++ , What I did is not giving me appropriate result , here is my code :
{
Mat img = imread("messi5.jpg");
int level_n = 2;
Mat p = Mat::zeros(img.cols*img.rows, 3, CV_32F);
vector<Mat> bgr;
cv::split(img, bgr);
//Divide each pixel color with 127 for level 2
for(int i=0; i<img.cols*img.rows; i++) {
p.at<float>(i,0) = bgr[0].data[i] / 127.0;
p.at<float>(i,1) = bgr[1].data[i] / 127.0;
p.at<float>(i,2) = bgr[2].data[i] / 127.0;
}
vector<Mat> Img2 = p[bgr];
Mat out;
cv::transform(img,out,p);
imshow ("output" , out);
}
What I didn't understand is how I put these colour's which I divided by 127 into Matrix , where I am going wrong?
Other way i am trying is
vector<Mat> bgr;
Mat blue , green , red;
cv::split(img, bgr);
blue = bgr[0]/127.0;
if (blue > 128)
{
blue = 255;
}
else
{
blue = 0;
}
same for red and green
Why don't just do it in place (for level 2):
Mat img = imread("messi5.jpg");
for(int i=0;i<img.rows;i++)
for(int j=0;j<img.cols;j++) {
cv::Vec3b p = img.at<cv::Vec3b>(i,j);
for(int k = 0;k < img.channels();k++)
p[k] = p[k] > 127 ? 255 : 0;
img.at<cv::Vec3b>(i,j) = p;
}
// do whatever you want with processed image img
Related
How to get the brightness and constrast of range of pixel and apply it to set a gradient on another image in opencv c++?
I've tried to do that with this code
but I didn't get a good result
I want to apply the brightness and constrast of second part of image1 to the first part of image2 to apply an gradiant. the final goal is to stitch the two images
Mat image = imread("1.jpg" );
Mat image2 = imread("2.jpg" );
const int darkness_threshold = 128;
cv::Mat hsv;
cvtColor(image, hsv, COLOR_BGR2HSV);
const auto result = cv::mean(hsv);
cv::Mat hsv2;
cvtColor(image2, hsv2, COLOR_BGR2HSV);
const auto result2 = cv::mean(hsv2);
cout<<"resultat1: "<<result<<endl;
cout<<"resultat2: "<<result2<<endl;
Mat new_image = Mat::zeros( image.size(), image.type() );
double alpha = result2[0] - result[0];
double beta = result2[2] - result[2];
for( int y = 0; y < image.rows; y++ ) {
for( int x = 0; x < image.cols; x++ ) {
for( int c = 0; c < image.channels(); c++ ) {
new_image.at<Vec3b>(y,x)[c] =
saturate_cast<uchar>( alpha*image.at<Vec3b>(y,x)[c] + beta );
}
}
}
namedWindow("New Image",WINDOW_NORMAL);
imshow("New Image", new_image);
namedWindow("Image2",WINDOW_NORMAL);
imshow("Image2", image2);
imwrite("imgs1.jpg",new_image);
imwrite("imgs2.jpg",image2);
waitKey();
return 0;
I would like to know how to remove the black border from the following frame in OpenCV using C++
Original Image
Result
Any help would be really appreciated.
To remove some non-black noise I recommend using cv::threshold and morphology closing. Then you can just remove rows and columns which contains (for example) more than 5% non-black pixels.
I tried following code and it works for your example:
int main()
{
const int threshVal = 20;
const float borderThresh = 0.05f; // 5%
cv::Mat img = cv::imread("img.jpg", cv::IMREAD_GRAYSCALE);
cv::Mat thresholded;
cv::threshold(img, thresholded, threshVal, 255, cv::THRESH_BINARY);
cv::morphologyEx(thresholded, thresholded, cv::MORPH_CLOSE,
cv::getStructuringElement(cv::MORPH_RECT, cv::Size(3, 3)),
cv::Point(-1, -1), 2, cv::BORDER_CONSTANT, cv::Scalar(0));
cv::imshow("thresholded", thresholded);
cv::Point tl, br;
for (int row = 0; row < thresholded.rows; row++)
{
if (cv::countNonZero(thresholded.row(row)) > borderThresh * thresholded.cols)
{
tl.y = row;
break;
}
}
for (int col = 0; col < thresholded.cols; col++)
{
if (cv::countNonZero(thresholded.col(col)) > borderThresh * thresholded.rows)
{
tl.x = col;
break;
}
}
for (int row = thresholded.rows - 1; row >= 0; row--)
{
if (cv::countNonZero(thresholded.row(row)) > borderThresh * thresholded.cols)
{
br.y = row;
break;
}
}
for (int col = thresholded.cols - 1; col >= 0; col--)
{
if (cv::countNonZero(thresholded.col(col)) > borderThresh * thresholded.rows)
{
br.x = col;
break;
}
}
cv::Rect roi(tl, br);
cv::Mat cropped = img(roi);
cv::imwrite("cropped.jpg", cropped);
return 0;
}
Please note that in order to get the best results on all your samples you may need to adjust some parameters: threshVal and borderThresh.
Also you may want to read good tutorials about thresholding and morphology transformations.
From akarsakov's answer. His will crop out the black parts of the input image. But, it will write this cropped image in grayscale. If you are after colour try changing and adding these lines.
#include "opencv2/opencv.hpp"
using namespace cv;
// Read your input image
Mat img = imread("img.jpg");
// Prepare new grayscale image
Mat input_img_gray;
// Convert to img to Grayscale
cvtColor (img, input_img_gray, CV_RGB2GRAY);
Mat thresholded;
// Threshold uses grayscale image
threshold(input_img_gray, thresholded, threshVal, 255, cv::THRESH_BINARY);
I'd recommend ticking akarsakov's answer because it definitely works. This is just for anyone looking to output a coloured image :)
I want to blend two images like multiply blending in photoshop , i want to do the same in OpenCv using C++ for my app , I visit this many time and try to understand every time but i didn't get it , i search it alot but didn't get what i want other then this but this is little bit strange as conversion is alot from IplImages to ibl etc , Any help , guide, idea and example related opencv is needed . I go through Addweight but i think its quite different from Multiply Blending
Formula which i saw here
Target * Blend
and below is what i tried
Mat img1 = imread("E:\\img.jpg");
Mat img2 = Mat (img1.size(),img1.type());
vector<Mat> colors_1;
split(img2, colors_1);
colors_1[0] = 113;
colors_1[1] = 221;
colors_1[2] = 216;
merge(colors_1,img2);
Mat result(img1.size(), CV_32F);
for(int i = 0; i < img1.size().height; ++i){
for(int j = 0; j < img1.size().width; ++j){
for (int rgb=0 ; rgb<=img1.channels();rgb++){
float target = float(img1.at<uchar>(i, j)) / 255;
float blend = float(img2.at<uchar>(i, j)) / 255;
result.at<float>(i, j) = target*blend;
}
}
}
Result is in GrayScale and its not looking exact
Thank you
You are not accessing the image channels correctly. Moreover, you do not need to store the result in a float image, uchar is OK. Also, your loop on RGB channels should end when rgb<img1.channels().
Try this code:
cv::Mat img1 = cv::imread("E:\\img.jpg");
cv::Mat img2 = cv::Mat (img1.size(),img1.type());
std::vector<cv::Mat> colors_1;
cv::split(img2, colors_1);
colors_1[0] = 113;
colors_1[1] = 221;
colors_1[2] = 216;
cv::merge(colors_1,img2);
cv::Mat result(img1.size(), CV_8UC3);
for(int i = 0; i < img1.rows; ++i){
for(int j = 0; j < img1.cols; ++j){
for (int c=0 ; c<img1.channels();c++){
uchar target = img1.at<uchar>(i, 3*j+c);
uchar blend = img2.at<uchar>(i, 3*j+c);
result.at<uchar>(i, 3*j+c) = cv::saturate_cast<uchar>(target*blend/255.);
}
}
}
I am attempting to use OpenCV to grab frames from a webcam and display them in a window using SFML.
VideoCapture returns frames in OpenCV's Mat format. To display the frames, SFML requires a 1D array of pixels in its uint8 format, which (as far as I can tell) is interchangeable with uchar. This array is expected to represent 32 bits per pixel RGBA.
So, I have a uchar array, and I'm looping over the Mat data and copying each pixel:
VideoCapture cap(0);
Mat frame;
cap >> frame;
uchar* camData = new uchar[640*480*4];
uchar* pixelPtr = frame.data;
for(int i = 0; i < frame.rows; i++)
{
for(int j = 0; j < frame.cols; j++)
{
camData[i*frame.cols + j + 2] = pixelPtr[i*frame.cols + j + 0]; // B
camData[i*frame.cols + j + 1] = pixelPtr[i*frame.cols + j + 1]; // G
camData[i*frame.cols + j + 0] = pixelPtr[i*frame.cols + j + 2]; // R
camData[i*frame.cols + j + 3] = 255;
}
}
img.LoadFromPixels(640, 480, camData); //Load pixels into SFML Image object for display
Unfortunately, this doesn't quite work. Something in that loop is wrong, as the resulting image when I load and display camData is scrambled.
As far as I can discern, either my math in the loop is wrong so the pixels are being assigned wrong, or the Mat data is in some format other than BGR.
Any ideas?
OpenCV can do all job for you:
VideoCapture cap(0);
Mat frame;
cap >> frame;
uchar* camData = new uchar[frame.total()*4];
Mat continuousRGBA(frame.size(), CV_8UC4, camData);
cv::cvtColor(frame, continuousRGBA, CV_BGR2RGBA, 4);
img.LoadFromPixels(frame.cols, frame.rows, camData);
I like the accepted answer better but this snippet helps you understand what's going on.
for (int i=0; i<srcMat.rows; i++) {
for (int j=0; j<srcMat.cols; j++) {
int index = (i*srcMat.cols+j)*4;
// copy while converting to RGBA order
dstRBBA[index + 0] = srcMat[index + 2 ];
dstRBBA[index + 1] = srcMat[index + 1 ];
dstRBBA[index + 2] = srcMat[index + 0 ];
dstRBBA[index + 3] = srcMat[index + 3 ];
}
}
For me worked following code:
VideoCapture capture(0);
Mat mat_frame;
capture >> mat_frame; // get a new frame from camera
// Be sure that we are dealing with RGB colorspace...
Mat rgbFrame(width, height, CV_8UC3);
cvtColor(mat_frame, rgbFrame, CV_BGR2RGB);
// ...now let it convert it to RGBA
Mat newSrc = Mat(rgbFrame.rows, rgbFrame.cols, CV_8UC4);
int from_to[] = { 0,0, 1,1, 2,2, 3,3 };
mixChannels(&rgbFrame, 2, &newSrc, 1, from_to, 4);
The result (newSrc) is a premultiply image!
I have a process that detects similar images using SURF and I want to add a check to know which images are real camera photos and which ones are vectorial images like logos of map-screenshots.
Examples:
Photo: http://images.gta-travel.com/HH/Images/J/TYO/TYO-NEW3-8.jpg
Logo: http://estaticos.transhotel.com/img/fotos/hoteles/000137/hft000137578_005.jpg
Logo: http://live.viajesurbis.com/vuweb/content/fichashotel/13127/HOTEL_13127_2.jpg
I tried looking at the grey histogram (and color histogram) but nothing gives me enough info to know which one are vectorials or not.
Ok, solved it, the next code is cleaning the histogram, getting all colors in grey scale and counting the different colors. Maybe in the future I will test if working with the components histograms improves the algorithm.
CvHistogram* wImage::getHistogram() {
IplImage* gray = cvCreateImage(cvGetSize(this->image), 8, 1);
CvHistogram* hist;
int hist_size = 256;
float range[] = {0, 256};
float* ranges[] = {range};
cvCvtColor(this->image, gray, CV_RGB2GRAY);
hist = cvCreateHist(1, &hist_size, CV_HIST_ARRAY, ranges, 1);
cvCalcHist(&gray, hist, 0, NULL);
return hist;
}
bool wImage::isVectorial() {
CvHistogram* hist = this->getHistogram();
int height = 240;
float max_value = 0, min_value = 0;
cvGetMinMaxHistValue(hist, &min_value, &max_value);
int total = 0;
int colors = 0;
float value;
int normalized;
for(int i=0; i < 256; i++){
value = cvQueryHistValue_1D(hist, i);
normalized = cvRound(value * height / max_value);
if(normalized < 2 || normalized > 230) {
continue;
}
colors++;
total += normalized;
}
if((total < 500 && colors < 100) || (total < 1000 && colors < 85)) {
return true;
}
return false;
}