Hello guys, I've a map_server that reads in a PGM file yet prints out a flipped image of it on a QImage.
Here is my code.
int width = msg->info.width;
int height = msg->info.height;
const std::vector<int8_t>& data (msg->data);
unsigned char *p, *q;
if( mapImage == NULL ) {
lineImage = new unsigned char[ width * 3 ];
mapImage = new QImage( width, height, QImage::Format_RGB888 );
}
if( mapImage != NULL ) {
for( int i = 0; i < height; i++ ) {
q = lineImage;
p = ( unsigned char * )&data[ i * width ];
for( int j = 0; j < width; j++ ) {
*q++ = *p;
*q++ = *p;
*q++ = *p;
p++;
}
memcpy( mapImage->scanLine( i ), lineImage, width * 3 );
}
}
printf( "Map received!\n" );
The "for loop" for height takes in from "0" till the limit(height) and I can assume that the picture it reads in the limit, till "0".
I can't provide Images due to the reputation. But I still hope I could garner some help in this...
Thanks!
JeremyEthanKoh.
When converting between JPG and BMP scan lines are inverted in Y. This is specific to the BMP format. It seems that your QImage is a RGB 24-bil bitmap and you directly write in its pixel map line by line. Just reverse the scanlines in Y:
if( mapImage != NULL ) {
for( int i = 0; i < height; i++ ) {
q = lineImage;
p = ( unsigned char * )&data[ i * width ];
for( int j = 0; j < width; j++ ) {
*q++ = *p;
*q++ = *p;
*q++ = *p;
p++;
}
memcpy( mapImage->scanLine( height-i-1 ), lineImage, width * 3 );
}
}
Related
I'm learning some basic image processing and using a gray scale BMP file to work some algorithms but I'd like to convert my code to put out color BMP files instead of gray scale. I'm using the EasyBMP library and have the following to read in and write to my BMP file:
bool Image::readFromBMPFile(const std::string & inputFileName){
bool success = true;
// use BMP object to read image
BMP inputImage;
success = inputImage.ReadFromFile(inputFileName.c_str() );
if( success ){
// allocate memory for image (deleting old, if exists)
m_numRows = inputImage.TellHeight();
m_numCols = inputImage.TellWidth();
if( m_pixels != NULL ){
// deallocate old memory
delete [] m_pixels;
}
m_pixels = new double[m_numRows * m_numCols];
// copy pixels
for( int r = 0; r < m_numRows; ++r ){
for( int c = 0; c < m_numCols; ++c ){
RGBApixel pixelVal = inputImage.GetPixel(c, r);
double val = (double) pixelVal.Blue + (double) pixelVal.Green + (double) pixelVal.Red;
val = (val / 3.0 + 0.5);
m_pixels[r * m_numCols + c] = val;
}
}
}
return success;
}
bool Image::writeToBMPFile(const std::string & outputFileName){
bool success = true;
if( m_pixels != NULL ){
// create bitmap image
BMP outputImage;
outputImage.SetSize(m_numCols, m_numRows);
outputImage.SetBitDepth( 24 );
double maxVal = m_pixels[0];
double minVal = m_pixels[0];
// Maximum and minimum values
for( int i = 1; i < m_numRows * m_numCols; ++i ){
if( m_pixels[i] > maxVal ){
maxVal = m_pixels[i];
}
if( m_pixels[i] <= minVal ){
minVal = m_pixels[i];
}
}
for( int r = 0; r < m_numRows; ++r ){
for( int c = 0; c < m_numCols; ++c ){
// get pixel value and clamp between 0 and 255
double val = 255.0 * (m_pixels[r * m_numCols + c] - minVal) / (maxVal - minVal);
if( val < 0 ){
val = 0;
}
if( val > 255 ){
val = 255;
}
// set output color based on mapping
RGBApixel pixelVal;
pixelVal.Blue = (int)val;
pixelVal.Green = (int)val;
pixelVal.Red = (int)val;
outputImage.SetPixel(c, r, pixelVal);
}
}
// write to file
success = outputImage.WriteToFile( outputFileName.c_str() );
} else {
success = false;
}
return success;
}
What kind of steps would I try to make my program compatible with RGB images?
I tried to make a gaussian blur operation using c + + (OpenCV).
This is the code
int mask [3][3] = {1 ,2 ,1 ,
2 ,3 ,2 ,
1 ,2 ,1 };
int getPixel ( unsigned char * arr , int col , int row ) {
int sum = 0;
for ( int j = -1; j <=1; j ++) {
for ( int i = -1; i <=1; i ++) {
int color = arr [( row + j ) * width + ( col + i ) ];
sum += color * mask [ i +1][ j +1];
}
}
return sum /15;
}
void h_blur ( unsigned char * arr , unsigned char * result) {
int offset = 2 *width ;
for ( int row =2; row < height -3; row ++) {
for ( int col =2; col < width -3; col ++) {
result [ offset + col ] = getPixel ( arr , col , row ) ;
}
offset += width ;
}
}
int main(int argc, char** argv)
{
starttime = getTickCount();
image_input = cvLoadImage("test.jpg", CV_LOAD_IMAGE_UNCHANGED);
width = image_input->width;
height = image_input->height;
widthStep = image_input->widthStep;
channels = image_input->nChannels;
IplImage* image_output = cvCreateImage(cvGetSize(image_input),IPL_DEPTH_8U,channels);
unsigned char *h_out = (unsigned char*)image_output->imageData;
unsigned char *h_in = (unsigned char*)image_input->imageData;
//sobel_parallel(h_in, h_out, width, height, widthStep, channels);
h_blur ( h_in , h_out) ;
endtime = getTickCount();
printf("Waktu Eksekusi = %f\n", (endtime-starttime)/getTickFrequency());
cvShowImage("CPU", image_output);
cvSaveImage("output.jpg",image_output);
cvReleaseImage(&image_output);
waitKey(0);
}
but when i run the program, the image are divided into three. I still have not found what is wrong with my code. T_T
here the result
please help me solve this problem.
#include <opencv2/opencv.hpp>
int mask [3][3] = {1 ,2 ,1 ,
2 ,3 ,2 ,
1 ,2 ,1 };
int width;
int height;
int widthStep;
int channels;
int getPixel ( unsigned char * arr , int col , int row , int k ) {
int sum = 0;
int denom = 0;
for ( int j = -1; j <=1; j ++) {
for ( int i = -1; i <=1; i ++) {
if ((row + j) >= 0 && (row + j) < height && (col + i) >= 0 && (col + i) < width) {
int color = arr [( row + j ) * 3 * width + ( col + i ) * 3 + k];
sum += color * mask [ i +1][ j +1];
denom += mask [ i +1][ j +1];
}
}
}
return sum / denom;
}
void h_blur ( unsigned char * arr , unsigned char * result) {
for ( int row =0; row < height; row ++) {
for ( int col =0; col < width; col ++) {
for (int k = 0; k < 3; k++) {
result [ 3 * row * width + 3 * col + k] = getPixel ( arr , col , row , k ) ;
}
}
}
}
int main(int argc, char** argv)
{
//starttime = getTickCount();
IplImage *image_input = cvLoadImage("test.jpg", CV_LOAD_IMAGE_UNCHANGED);
width = image_input->width;
height = image_input->height;
widthStep = image_input->widthStep;
channels = image_input->nChannels;
IplImage* image_output = cvCreateImage(cvGetSize(image_input),IPL_DEPTH_8U,channels);
unsigned char *h_out = (unsigned char*)image_output->imageData;
unsigned char *h_in = (unsigned char*)image_input->imageData;
//sobel_parallel(h_in, h_out, width, height, widthStep, channels);
h_blur ( h_in , h_out) ;
//endtime = getTickCount();
//printf("Waktu Eksekusi = %f\n", (endtime-starttime)/getTickFrequency());
cvShowImage("input", image_input);
cvShowImage("CPU", image_output);
cvSaveImage("output.jpg",image_output);
cvReleaseImage(&image_output);
cv::waitKey(0);
}
I had a few minor issues when compiling your code, so there are a few extra changes (it seems like the top section of your code may have been cut off, so a few variable declarations are missing).
In any case, the big changes are to getPixel and h_blur.
The main problem in your code was that you did not handle the fact that the data contains three bytes (blue, green, red) for each pixel, not one byte. Because of this, your code only actually looked at the first third of the image, and it swapped around the colors a bit.
So I have an image that I want to overlay with a checkerboard pattern.
This is what I have come up with so far:
for ( uint_8 nRow = 0; nRow < image.width(); ++nRow)
for (uint_8 nCol = 0; nCol < image.height(); ++nCol)
if(((nRow/20 + nCol/20) % 2) == 0)
memset(&image.data[nCol + nRow], 0, 1);
Produces a white image unfortunately. I dont think this is very performant because memset is called for every single pixel in the image instead of multiple.
Why does this code not produce a chckerboard pattern? How would you improve it?
For better performance, don't treat the image as a 2-dimensional entity. Instead, look at it as a 1D array of continuous data, where all lines of the image are arranged one after the other.
With this approach, you can write the pattern in one go with a single loop, where in every iteration you memset() multiple adjacent pixels and increase the index by twice the amount of pixels you set:
int data_size = image.width() * image.height();
for (auto it = image.data; it < image.data + data_size; it += 20) {
memset(it, 0, 20);
if (((it - data) + 40) % (20 * 400) == 0) {
it += 40;
} else if (((it - data) + 20) % (20 * 400) != 0) {
it += 20;
}
}
(Replace auto with the type of image.data if you're not using C++11; I suspect it's unsigned char*.)
This is quite friendly for the CPU cache prefetch. It's also friendly for the compiler, which can potentially vectorize and/or perform loop unrolling.
If you have an image's dimensions which are multiple of the checker square size :
(I coded in C but it is fairly easy to transpose to C++)
#include <stdio.h>
#include <string.h>
#include <stdlib.h>
#define uint unsigned int
#define WIDTH 40
#define HEIGHT 40
#define BLOCK_SIZE 5
void create_checker_row(uint* row, uint size_block, uint nb_col, uint offset )
{
uint ic;
for (ic = size_block*offset ; ic < nb_col; ic+= 2*size_block )
{
memset( (row + ic) , 0, size_block*sizeof(uint) );
}
}
int main()
{
uint ir,ic;
// image creation
uint* pixels = (uint*) malloc(WIDTH*HEIGHT*sizeof(uint));
for (ir = 0; ir < WIDTH; ir++)
{
for ( ic = 0; ic < HEIGHT; ic++)
{
// arbitrary numbers
pixels[ir*WIDTH + ic] = (ir*WIDTH + ic) % 57 ;
printf("%d,", pixels[ir*WIDTH + ic] );
}
printf("\n");
}
for (ir = 0; ir < WIDTH; ir++)
{
create_checker_row( pixels + ir*WIDTH , // pointer at the beggining of n-th row
BLOCK_SIZE , // horizontal length for square
WIDTH , // image width
(ir/BLOCK_SIZE) % 2 // offset to create the checker pattern
);
}
// validation
printf("\n");
printf("Validation \n");
printf("\n");
for (ir = 0; ir < WIDTH; ir++)
{
for ( ic = 0; ic < HEIGHT; ic++)
{
printf("%d,", pixels[ir*WIDTH + ic] );
}
printf("\n");
}
return 0;
}
Seems pretty checkered for me : http://ideone.com/gp9so6
I use this and stb_image_write.h
#include <stdlib.h>
#include <stb_image_write.h>
int main(int argc, char *argv[])
{
const int w = 256, h = 256, ch = 4, segments = 8, box_sz = w / segments;
unsigned char rgba_fg[4] = {255, 255, 0, 255}; //yellow
unsigned char rgba_bg[4] = {255, 0, 0, 255}; //red
unsigned char* data = calloc(w * h * ch, sizeof(unsigned char));
int swap = 0;
int fill = 0; /* set to 1 to fill fg first*/
unsigned char* col = NULL;
for(int i = 0; i < w * h; i++)
{
if(i % (w * box_sz) == 0 && i != 0)
swap = !swap;
if(i % box_sz == 0 && i != 0)
fill = !fill;
if(fill)
{
if(swap)
col = rgba_bg;
else
col = rgba_fg;
}else
{
if(swap)
col = rgba_fg;
else
col = rgba_bg;
}
for(int j = 0; j < ch; j++)
{
data[i*ch + j] = col[j];
}
}
stbi_write_png("checker.png", w, h, ch, data, 0);
free(data);
return 0;
}
Its a bit slow with large images but gets the job done if you cache them
So I'm trying to erode a binary matrix.
I create the matrix using this code:
cv::Mat tmp = cv::Mat::zeros( IMG->width, IMG->height, CV_8U );
for( auto i = 0 ; i < IMG->width ; i++)
{
for ( auto j = 0 ; j < IMG->height ; j++)
{
if( cv::pointPolygonTest(cv::Mat(contour),cv::Point(i,j),true) < 0 )
{
tmp.at<double>(i,j) = 255;
}
}
}
Here is the source picture I'm using:
And this what I get with my loop (it's the tmp matrix):
So after I'm trying to erode the picture using this code:
int erosion_elem = 1;
int erosion_size = 8;
int erosion_type;
if( erosion_elem == 0 ){ erosion_type = MORPH_RECT; }
else if( erosion_elem == 1 ){ erosion_type = MORPH_CROSS; }
else if( erosion_elem == 2) { erosion_type = MORPH_ELLIPSE; }
Mat element = getStructuringElement( erosion_type,
Size( 2*erosion_size + 1, 2*erosion_size+1 ),
Point( erosion_size, erosion_size ) );
/// Apply the erosion operation
erode( binary, erosion_dst, element );`
So it compiles well but I get an exception on this line:
erode( binary, erosion_dst, element );`
It says it's an unsupported data type.
Does anyone have an idea why do I get this exception?
I tried to change the data type of the matrix tmp but I have the same error.
Thanks for your help !
Your binary image pixels are stored as unsigned char (CV_8U -> on 8bits -> 1 byte),
you should store your pixels' value as unsigned char too
cv::Mat tmp = cv::Mat::zeros( IMG->width, IMG->height, CV_8U );
for( auto i = 0 ; i < IMG->width ; i++)
{
for ( auto j = 0 ; j < IMG->height ; j++)
{
if( cv::pointPolygonTest(cv::Mat(contour),cv::Point(i,j),true) < 0 )
{
tmp.at<unsigned char>(i,j) = 255;
}
}
}
(made answer from comment)
Given: (In C++)
int main () {
int* ptr;
int ary [10][2];
ptr = ary;
return 0;
}
How would I access ary[0][1] with ptr?
You can't, because the type of ptr is wrong. The variable should be declared as int(*)[2] (pointer to an array of size 2 of integers). Then you could just use ptr[0][1].
#include <cstdio>
int main () {
int (* ptr) [2]; // <--
int ary [10][2];
ptr = ary;
ary[0][1] = 5;
printf("%d\n", ptr[0][1]);
return 0;
}
If you must use an int*, you need to introduce a reinterpret_cast. The array indices are laid out like:
0 1 2 3 2*n 2*n+1
[0][0] [0][1] [1][0] [1][1] ... [n][0] [n][1]
so you could use ptr[1] to get ary[0][1].
#include <cstdio>
int main () {
int* ptr;
int ary [10][2];
ptr = reinterpret_cast<int*>(ary); // <--
ary[0][1] = 5;
printf("%d\n", ptr[1]);
return 0;
}
typedef int tenints[10]; // tenints is an array of 10 ints
int main () {
tenints ary[2]; // array of 2 tenints, same as your int ary[10][2];
tenints* ptr = ary
// ptr[0] or *ptr is the first row
// ptr[1] or *(ptr+1)is the second row
int* ptr2 = ptr[0];
// ptr2[1] or *(ptr2+1) is ary[0][1]
// if you don't want do use as intermediate variable,
// just substitute "*ptr" for "ptr2" in "*(ptr2+1)"
int val = *((*ptr)+1);
return 0;
}
What you want only works when the data is on block which it might not be in all cases. In the context of image processing, you mostly do something like this:
int width = 1024;
int height = 768;
char* img = new char[width*height];
char** img2d = new char*[height];
for (int y = 0; y < height; ++y){
img2d[y] = img + y*width;
}
//set pixel at x=50, y=100
img2d[100][50] = 1;
//does the same
img2d[100*width+50] = 1;
delete[] img;
it is possible, just look at this example ( these are dynamic arrays, but works with static too ):
void bla ( void )
{
const int32_t sx = 50, sy = 30, sz = 50;
uint64_t *** n = NULL;
n = ( uint64_t*** )malloc( sizeof( uint64_t** ) * sx );
for ( int32_t x = 0; x < sx; x++ )
{
*( n + x ) = ( uint64_t** )malloc( sizeof( uint64_t* ) * sy );
for ( int32_t y = 0; y < sy; y++ )
*( *( n + x ) + y ) = ( uint64_t* )malloc( sizeof( uint64_t ) * sz );
}
for ( int32_t x = 0; x < sx; x++ )
for( int32_t y = 0; y < sy; y++ )
for( int32_t z = 0; z < sz; z++ )
*( *( *( n + x ) + y ) + z ) = 1024 * 1024;
}