I've got a problem with creating a trackbar which would adjust brightness of the displayed picture.
This is my code (part which is involved with brightness):
int brightness_value = 25; //global values
const int max_value = 255;
int main()
{
Mat brightImage;
srcImage.copyTo(brightImage);
namedWindow("Bright Image");
moveWindow("Bright Image", 300, 600);
createTrackbar("Brightness", "Bright Image", &brightness_value, max_value);
for (int i = 0; i < brightImage.rows; i++)
{
for (int j = 0; j < brightImage.cols; j++)
{
Vec3b pixelColor;
pixelColor = brightImage.at<Vec3b>(Point(j, i));
for (int k = 0; k < 3; k++) //vector with 3 byte entries
{
if (pixelColor[k] + getTrackbarPos("Brightness", "Bright Image") > 255)
pixelColor[k] = 255;
else
pixelColor[k] += getTrackbarPos("Brightness", "Bright Image");
brightImage.at<Vec3b>(Point(j, i)) = pixelColor;
}
}
}
imshow("Bright Image", brightImage);
waitKey(0);
return 0;
}
This way the brightness of the image is adjusted only once, when the program starts. But when I want to change it with the trackbar nothing happens. Where is the problem, how should I do it so the brightness will change every time I move the trackbar?
Thanks for any help :)
And that's the result: (on the left original image, on the right with changed brightness)
createTrackbar takes a pointer to callback function which is called when position of trackbar is being changed. In this callback you should redrawn your image with changed brightness level and refresh window by imshow. Such callback takes a pointer to void data - it enables you to pass any data you want to use when redrawing your image, in this case it should be pointer to output image (and probably a pointer to source image - you should always add new brightness level to original image, not modified one):
struct Params {
cv::Mat* src;
cv::Mat* dest;
};
void makeBrightness(int pos, void* data) {
Params* params = (Params*)data;
for (int i = 0; i < params->src->rows; i++) {
for (int j = 0; j < params->src->cols; j++) {
Vec3b pixelColor;
pixelColor = params->src->at<Vec3b>(Point(j, i));
for (int k = 0; k < 3; k++) {
if (pixelColor[k] + pos > 255)
pixelColor[k] = 255;
else
pixelColor[k] += pos;
params->dest->at<Vec3b>(Point(j, i)) = pixelColor;
}
}
}
imshow("Bright Image", *(params->dest));
}
int main()
{
int brightness_value = 25; //global values
const int max_value = 255;
Mat srcImage = cv::imread("D:/lena.jpg");
Mat brightImage;
srcImage.copyTo(brightImage);
namedWindow("Bright Image");
moveWindow("Bright Image", 300, 600);
Params params;
params.src = &srcImage;
params.dest = &brightImage;
createTrackbar("Brightness", "Bright Image", &brightness_value, max_value, makeBrightness, ¶ms);
makeBrightness(brightness_value, ¶ms); // for first painting your image
waitKey(0);
return 0;
Related
I would like to obtain the RGB values for any pixel I choose or loop. This is currently how I achieve it.
Vec3b color = img.at<Vec3b>(Point(i, j));
and for the loop
for (int i = 0; i < width; i++) {
for (int j = 0; j < height; j++) {
Vec3b color = img.at<Vec3b>(Point(i,j));
img.at<Vec3b>(Point(i, j)) = color;
}
}
But when I apply the Canny/cvtColor function it all messes up. An Unhandled exception at memory location pops up.
I ran more tests, and found that the loop just works fine with the height, but as for the width it works only up (1/3) of the actual width. Most likely to do with Vec3b.
One such solution suggested
unsigned char color = img.at<unsigned char>(Point(i,j));
or
Vec<uchar, 3> color = img2.at <uchar>(Point(i,j));
But in uchar cases, how can i obtain individual RGB from color and how to set color back to pixel?
FULL CODE :
int main() {
VideoCapture cap("Assets/test2.mp4");
if (!cap.isOpened()) {
std::cout << "Problem in reading" << std::endl;
return -1;
}
while (1) {
Mat frame;
cap >> frame;
if (frame.empty()) {
break;
}
frame = Imgfn(frame);
imshow("FRAME", frame);
char c = (char)waitKey(100);
if (c == 27) {
break;
}
}
cap.release();
destroyAllWindows();
return 0;
}
Mat Imgfn(Mat img) {
int width = img.size().width , height = img.size().height;
cvtColor(img, img, COLOR_BGR2GRAY);
GaussianBlur(img, img, Size(3,3),0,0);
Canny(img, img, 50, 150);
for (int i = 0; i < width; i++) {
for (int j = 0; j < height; j++) {
Vec3b color = img.at<Vec3b>(Point(i, j));
color[0] = 0; color[1] = 255; color[2] = 0;
img.at<Vec3b>(Point(i, j)) = color;
// so **Obtain, select, set** color
}
}
return img;
}
[About the mp4][1]
[1]: https://i.stack.imgur.com/9hOBV.png
EDIT - FOUND THE SOLUTION - from one of the answers suggested below. The problem is when cvtColor convert to gray is performed it brings down to just 1 channel from 3. Mat.channels() can be used to check. so a simple uchar would work fine. if 3 channels of RGB are required then one could simply created their own RGB to GRAY function
Currently, how do you choose the type such as { Vec3b, unsigned char, ... } ?
If you access pixels in a known Mat , you know the type at the time and you can use that. Just do it so.
Otherwise, if you are talking about a situation in which you don't know the type of pixels in Mat, you can use Mat::depth(), Mat::channels(), etc to determine the type.
I am wanting to move through an image and take a 5x5 grid centered around each pixel in the image. I then want to sum that grid and compare it to a threshold.
int main()
{
Mat element = getStructuringElement(MORPH_RECT, Size(7, 7));
Mat im = imread("blob.png", IMREAD_GRAYSCALE);
bool fromCenter = false;
namedWindow("Crop frame", WINDOW_NORMAL);
Rect2d r = selectROI("Crop frame", im, fromCenter);
im = im(r);
erode(im, im, element);
Mat clone = im;
int sectionSize = 4;
int width = im.cols - sectionSize/2;
int height = im.rows - sectionSize/2;
int sum = 0;
int counter = 0;
for (int i = sectionSize/2; i < width; i++) {
for (int j = sectionSize/2; j < height; j++) {
Rect rect = Rect(i, j, sectionSize, sectionSize);
rect -= Point(rect.width / 2, rect.height / 2);
Mat temp = im(rect);
for (int x = 0; x < temp.cols; x++) {
for (int y = 0; y < temp.rows; y++) {
int pixelValue = (int)temp.at<uchar>(y, x);
sum += pixelValue;
}
}
cout << sum << endl;
if (sum > 3800) {
clone.at<uchar>(j, i) = 255;
}
else {
clone.at<uchar>(j, i) = 0;
}
namedWindow("erode", WINDOW_NORMAL);
imshow("erode", clone);
waitKey(1);
sum = 0;
}
}
}
I am getting fluctuations in the pixel sum based on where I select my ROI in the image even when both over white space Also, my pixel sum is changing when I change the value of the clone pixel in this section of the code which I do not understand at all:
if (sum > 3800) {
clone.at<uchar>(j, i) = 255;
}
else {
clone.at<uchar>(j, i) = 0;
}
I want to make a negative transformation for the image which is a very simple program.
But when I run the program. I want to transform all of the pixels in the image, but only 1/3 parts of that are processed. I don't make sure where is wrong. all the code I followed the book. But the result is different.
I think there is something wrong about the columns, but when I change the value of I.cols in negativeImage function with the actual value of image. the output still keep the same. only 1/3 parts of image are processed. If I 3 times the I.cols all of the pixels in the iamge could be processed.
vector<uchar> getNegativeLUT() {
vector<uchar> LUT(256, 0);
for (int i = 0; i < 256; ++i)
LUT[i] = (uchar)(255 - i);
return LUT;
}
void negativeImage(Mat& I) {
vector<uchar> LUT = getNegativeLUT();
for (int i = 0; i < I.rows; ++i) {
for (int j = 0; j < I.cols; ++j) {
I.at<uchar>(i, j) = LUT[I.at<uchar>(i, j)];
//stack overflow
}
}
}
int main() {
Mat image = imread("1.png");
Mat processed_image2 = image.clone();
negativeImage(processed_image2);
printf("%d", image.cols);
imshow("Input Image", image);
imshow("Negative Image", processed_image2);
waitKey(0);
return 0;
}
Output Image
You need to put correct type with at<> operator. Your PNG image has to be converted to 8UC1 to then use uchar type to access each pixel. I suppose your image has 3 channels, so you only iterate over 1/3 of the image. Also, I suggest you to use ptr<> operator in rows loop and then access to pixel as an array.
Mat M;
cvtColor(I, M, CV_BGR2GRAY);
// M is CV_8UC1 type
for(int i = 0; i < M.rows; i++)
{
uchar* p = M.ptr<uchar>(i);
for(int j = 0; j < I.cols; j++)
{
p[j] = LUT[p[j]];
}
}
EDIT: you should use cv::LUT instead of doing it yourself.
cv::Mat lut(1, 256, CV_8UC1);
for( int i = 0; i < 256; ++i)
{
lut.at<uchar>(0,i) = uchar(255-i);
}
cv::LUT(M, lut, result);
I have an image 800x800 which is broken down to 16 blocks of 200x200.
(you can see previous post here)
These blocks are : vector<Mat> subImages;
I want to use float pointers on them , so I am doing :
float *pdata = (float*)( subImages[ idxSubImage ].data );
1) Now, I want to be able to get again the same images/blocks, going from float array to Mat data.
int Idx = 0;
pdata = (float*)( subImages[ Idx ].data );
namedWindow( "Display window", WINDOW_AUTOSIZE );
for( int i = 0; i < OriginalImgSize.height - 4; i+= 200 )
{
for( int j = 0; j < OriginalImgSize.width - 4; j+= 200, Idx++ )
{
Mat mf( i,j, CV_32F, pdata + 200 );
imshow( "Display window", mf );
waitKey(0);
}
}
So , the problem is that I am receiving an
OpenCV Error: Assertion failed
in imshow.
2) How can I recombine all the blocks to obtain the original 800x800 image?
I tried something like:
int Idx = 0;
pdata = (float*)( subImages[ Idx ].data );
Mat big( 800,800,CV_32F );
for( int i = 0; i < OriginalImgSize.height - 4; i+= 200 )
{
for( int j = 0; j < OriginalImgSize.width - 4; j+= 200, Idx++ )
{
Mat mf( i,j, CV_32F, pdata + 200 );
Rect roi(j,i,200,200);
mf.copyTo( big(roi) );
}
}
imwrite( "testing" , big );
This gives me :
OpenCV Error: Assertion failed (!fixedSize()) in release
in mf.copyTo( big(roi) );.
First, you need to know where are your subimages into the big image. To do this, you can save the rect of each subimage into the vector<Rect> smallImageRois;
Then you can use pointers (keep in mind that subimages are not continuous), or simply use copyTo to the correct place:
Have a look:
#include <opencv2\opencv.hpp>
#include <vector>
using namespace std;
using namespace cv;
int main()
{
Mat3b img = imread("path_to_image");
resize(img, img, Size(800, 800));
Mat grayImg;
cvtColor(img, grayImg, COLOR_BGR2GRAY);
grayImg.convertTo(grayImg, CV_32F);
int N = 4;
if (((grayImg.rows % N) != 0) || ((grayImg.cols % N) != 0))
{
// Error
return -1;
}
Size graySize = grayImg.size();
Size smallSize(grayImg.cols / N, grayImg.rows / N);
vector<Mat> smallImages;
vector<Rect> smallImageRois;
for (int i = 0; i < graySize.height; i += smallSize.height)
{
for (int j = 0; j < graySize.width; j += smallSize.width)
{
Rect rect = Rect(j, i, smallSize.width, smallSize.height);
smallImages.push_back(grayImg(rect));
smallImageRois.push_back(rect);
}
}
// Option 1. Using pointer to subimage data.
Mat big1(800, 800, CV_32F);
int big1step = big1.step1();
float* pbig1 = big1.ptr<float>(0);
for (int idx = 0; idx < smallImages.size(); ++idx)
{
float* pdata = (float*)smallImages[idx].data;
int step = smallImages[idx].step1();
Rect roi = smallImageRois[idx];
for (int i = 0; i < smallSize.height; ++i)
{
for (int j = 0; j < smallSize.width; ++j)
{
pbig1[(roi.y + i) * big1step + (roi.x + j)] = pdata[i * step + j];
}
}
}
// Option 2. USing copyTo
Mat big2(800, 800, CV_32F);
for (int idx = 0; idx < smallImages.size(); ++idx)
{
smallImages[idx].copyTo(big2(smallImageRois[idx]));
}
return 0;
}
For concatenating the sub-images into a single squared image, you can use the following function:
// Important: all patches should have exactly the same size
Mat concatPatches(vector<Mat> &patches) {
assert(patches.size() > 0);
// make it square
const int patch_width = patches[0].cols;
const int patch_height = patches[0].rows;
const int patch_stride = ceil(sqrt(patches.size()));
Mat image = Mat::zeros(patch_stride * patch_height, patch_stride * patch_width, patches[0].type());
for (size_t i = 0, iend = patches.size(); i < iend; i++) {
Mat &patch = patches[i];
const int offset_x = (i % patch_stride) * patch_width;
const int offset_y = (i / patch_stride) * patch_height;
// copy the patch to the output image
patch.copyTo(image(Rect(offset_x, offset_y, patch_width, patch_height)));
}
return image;
}
It takes a vector of sub-images (or patches as I refer them to) and concatenates them into a squared image. Example usage:
vector<Mat> patches;
vector<Scalar> colours = {Scalar(255, 0, 0), Scalar(0, 255, 0), Scalar(0, 0, 255)};
// fill vector with circles of different colours
for(int i = 0; i < 16; i++) {
Mat patch = Mat::zeros(100,100, CV_32FC3);
circle(patch, Point(50,50), 40, colours[i % 3], -1);
patches.push_back(patch);
}
Mat img = concatPatches(patches);
imshow("img", img);
waitKey();
Will produce the following image
print the values of i and j before creating Mat mf and I believe you will soon be able to find the error.
Hint 1: i and j will be 0 the first time
Hint 2: Use the copyTo() with a ROI like:
cv::Rect roi(0,0,200,200);
src.copyTo(dst(roi))
Edit:
Hint 3: Try not to do such pointer fiddling, you will get in trouble. Especially if you're ignoring the step (like you seem to do).
I'm new to openCV and C++. I would like to change the pixel values of an image I loaded and display that new image in another window to compare the results (just visually). However, when I run the code, I get two original images. This means that either my for loop isn't doing what's it suppose to do (which i doubt since it makes sense to me) or the pixel value is lost and is not being saved to show the new image. I read a previous post that said I should include this statement after working with each pixel to set in to the altered image. The statement is: img.at(Point(x,y)) = color.
Could somebody please tell me what I'm doing wrong?
Thank you
cv::Mat img = cv::imread("12.jpg", CV_LOAD_IMAGE_COLOR);
// start of pixel navigation
Mat navigateImage(Mat) {
for(int x = 0; x > img.rows; x++)
{
for(int y = 0; y > img.cols; y++){
Vec3b color = img.at<Vec3b>(Point(x,y));
if ( color[0] > 10 && color [1] > 10 && color[2]> 10 )
{
color[0] = 0 ;
color[1] = 0;
color[2] = 0;
img.at<Vec3b>(Point(x,y)) = color;
}
else
{
color.val[0] = 255 ;
color.val[1] = 255;
color.val[2] = 255;
img.at<Vec3b>(Point(x,y)) = color;
}
}
}
return img;
}
// end of pixel navigation
int main( int argc, char** argv )
{
if(! img.data){
cout << "could not open or find the image" << endl;
return -1;}
Mat newImage = navigateImage(img);
cv::imshow( " Original", img);
cv::imshow( " Altered ", newImage);
cv::waitKey(0);
return 0;
}
(1). Firstly,
for(int x = 0; x > img.rows; x++)
and
for(int y = 0; y > img.cols; y++)
should be
for(int x = 0; x < img.cols; x++)
and
for(int y = 0; y < img.rows; y++)
respectively.
Since, you never enter the loop because of this mistake, both images are same.
(2). Secondly,
Mat navigateImage(Mat)
should be
Mat navigateImage(Mat img)
(3). Thirdly, put
cv::Mat img = cv::imread("12.jpg", CV_LOAD_IMAGE_COLOR);
in main function.
(4). Lastly,
replace,
Mat newImage = navigateImage();
by
Mat newImage = navigateImage(img.clone());
else, both images will be same.
CORRECTED CODE -
#include <iostream>
#include <opencv2/opencv.hpp>
using namespace cv;
using namespace std;
// start of pixel navigation
Mat navigateImage(Mat img) {
for(int x = 0; x < img.cols; x++)
{
for(int y = 0; y < img.rows; y++){
Vec3b color = img.at<Vec3b>(Point(x,y));
if ( color[0] > 10 && color [1] > 10 && color[2]> 10 )
{
color[0] = 0 ;
color[1] = 0;
color[2] = 0;
img.at<Vec3b>(Point(x,y)) = color;
}
else
{
color.val[0] = 255 ;
color.val[1] = 255;
color.val[2] = 255;
img.at<Vec3b>(Point(x,y)) = color;
}
}
}
return img;
}
// end of pixel navigation
int main( int argc, char** argv )
{
Mat img = cv::imread("12.png", CV_LOAD_IMAGE_COLOR);
if(! img.data){
cout << "could not open or find the image" << endl;
return -1;
}
Mat newImage = navigateImage(img.clone());
cv::imshow( " Original", img);
cv::imshow( " Altered ", newImage);
cv::waitKey(0);
return 0;
}