How to implement the imrotate functioin using OpenCV - c++

Yes. I want to get the same result like the imrotate in matlab, but I can't get the point.
More specifically,I get a mat A
a =
1 2 3
4 5 6
imrotate(a, 45)
ans =
0 0 0 0 0
0 0 3 6 0
0 1 2 6 0
0 1 4 0 0
0 0 0 0 0
How can I do that using OpenCV?

You may need use getRotationMatrix2D and warpAffine.
like this:
Mat A(5, 5, CV_8U, Scalar(0));
Point center(3, 3);
double angle = 45;
int value=1;
for (int i = 2 ; i<4 ;i++)
{
for (int j = 1 ; j <4 ;j++)
{
A.at(j,i) = value;
value++;
}
}
Mat matrix = getRotationMatrix2D(center, angle, 1.0);
Mat imgRotation;
warpAffine(A, imgRotation, matrix, Size(5, 5));

Related

Finding boundaries of figure in 2d array

There is a figure that is represented by 1 values that are “connected” vertically, horizontally or diagonally in a 2 dementional array.
I need to save the index of the boundary of the figure (the row and column of the 0's that are connected to the figure, in any type of c++ container.
For instance, in the following 2d array, I should get the following indexes:
(0,2), (0,3), (0,4), (1,2), (1,4), (1,5), (2,2), (2,3), (2,5), (2,6)... etc.
0 0 0 0 0 0 0 0
0 0 0 1 0 0 0 0
0 0 0 0 1 0 0 0
0 0 0 1 1 1 0 0
0 0 0 0 1 1 0 0
0 0 0 1 1 1 0 0
0 0 0 1 1 0 0 0
0 0 0 1 0 0 0 0
0 0 0 0 0 0 0 0
0 0 0 0 0 0 0 0
0 0 0 0 0 0 0 0
What is the most efficient way to do so, on both space and time complexity?
void dfs(vector<vector<int>>& matrix, vector<vector<int>>& boundary, int rows, int cols, int i, int j){
if(!isValidCoordinate(i, j))
return;
if(isAnyNeighborOne(i, j)){
boundary.push_back({i, j});
matrix[i][j] = 2;
}
else
matrix[i][j] = 3;
//Explore eight directions
/* I didn't bother about x = 0 and y = 0.
* You can, if you want.
* Doesn't make a difference though.
*/
for(int x = -1; x < 2; x++){
for(int y = -1; y < 2; y++){
dfs(matrix, boundary, rows, cols, i + x, i + y);
}
}
}
vector<vector<int>> getBoundary(vector<vector<int>>& matrix){
vector<vector<int>> boundary;
int rows = matrix.size();
if(!rows)
return boundary;
int cols = matrix[0].size();
for(int i = 0; i < rows; i++){
for(int j = 0; j < cols; j++){
if(matrix[i][j] == 0){
dfs(matrix, boundary, rows, cols, i, j);
}
}
}
return boundary;
}
If you print the matrix at the end, you'll see the boundary with 2.
Whatever you see as 3, if you want, you can set it back to 0.
isValidCoordinate() and isAnyNeighborOne() is left to you as an exercise.
I use vector<vector<int>> for boundary. You can try using vector<pair<int,int>> as well.
With the above solution you'll get inner boundary as well as outer boundary. As an exercise, you can try only inner boundary or only outer boundary.
You can solve the same problem with BFS as well. If the matrix is of large size, stack might overflow due to recursive calls. Better to prefer BFS in such cases.
Time and space complexity of the above solution is O(rows * cols).

Image rotation by very small step size in openCV c++

I'm trying to rotate an image by small degrees like 1 degree or below 1 degree,
Consider this is my Source Image and I'm trying to rotate it by 1 degree in MATLAB and OpenCV (C++) :
0 0 0 0
0 255 255 0
0 255 255 0
0 0 0 0
When I rotate it in MATLAB by 1 degree this is the result:
0 0 2.223835069639144 0
2.223835069639553 252.7942370468069 2.527942370468065 0
0 252.7942370468065 252.794237046807 2.223835069639648
0 2.223835069639184 0 0
This is the code in MATLAB:
sourceImage = [0 0 0 0; 0 255.0 255.0 0; 0 255.0 255.0 0; 0 0 0 0];
rotationDegree = 1.0;
shearInX_Y1 = (cosd(rotationDegree)-1)/sind(rotationDegree);
shearInX_Y2 = sind(rotationDegree);
transformationMatrix = [(1 + shearInX_Y1*shearInX_Y2), (2*shearInX_Y1 + ...
((shearInX_Y1)^2)*shearInX_Y2), 0; (shearInX_Y2), (1 + shearInX_Y1*shearInX_Y2), 0; 0, 0, 1];
tform = affine2d(transformationMatrix);
imref = imref2d(size(sourceImage));
imref.XWorldLimits = imref.XWorldLimits-mean(imref.XWorldLimits);
imref.YWorldLimits = imref.YWorldLimits-mean(imref.YWorldLimits);
transformedImg2 = imwarp(sourceImage , imref, tform, 'OutputView', imref, 'Interp', 'bilinear');
transformedImg2
The transformationMatrix in Matlab (which is our rotation matrix) is:
transformationMatrix =
9.998476951563913e-01 -1.745240643728003e-02 0
1.745240643728351e-02 9.998476951563913e-01 0
0 0 1.000000000000000e+00
And the content of "tform.T" is:
9.998476951563913e-01 -1.745240643728003e-02 0
1.745240643728351e-02 9.998476951563913e-01 0
0 0 1
And the content of "rotationMatrix" in OpenCV is:
9.998476951563913e-01 1.745240643728351e-02 -0.008650050796837391
-1.745240643728351e-02 9.998476951563913e-01 0.008802355640446121
But when I do the rotation by 1 degree in OpenCV (C++) This is the result:
(which is same as source image!) which means openCV has a problem with small rotations!
0 0 0 0
0 255 255 0
0 255 255 0
0 0 0 0
This is the code I use for rotation in OpenCV (C++): (Rotation is done with respect to image center)
#include <opencv2/highgui/highgui.hpp>
#include <iostream>
using namespace cv;
using namespace std;
# define INTERPOLATION_METHOD INTER_CUBIC
// INTER_AREA
// INTER_LINEAR
// INTER_NEAREST
Mat rotateImage(Mat sourceImage, double rotationDegree);
int main(){
Mat sourceImage = Mat::zeros(4, 4, CV_64F);
sourceImage.at<double>(1, 1) = 255.0;
sourceImage.at<double>(1, 2) = 255.0;
sourceImage.at<double>(2, 1) = 255.0;
sourceImage.at<double>(2, 2) = 255.0;
double rotationDegree = 1.0;
Mat rotatedImage = rotateImage(sourceImage, rotationDegree);
cout << "sourceImage: \n" << sourceImage << endl << endl;
cout << "rotatedImage : \n" << rotatedImage << endl << endl;
return 0;
}
Mat rotateImage(Mat sourceImage, double rotationDegree){
double rowOfImgCenter;
double colOfImgCenter;
rowOfImgCenter = sourceImage.rows / 2.0 - 0.5;
colOfImgCenter = sourceImage.cols / 2.0 - 0.5;
Point2d imageCenter(colOfImgCenter, rowOfImgCenter);
Mat rotationMatrix;
rotationMatrix = getRotationMatrix2D(imageCenter, rotationDegree, 1.0);
Mat rotatedImage;
warpAffine(sourceImage, rotatedImage, rotationMatrix, sourceImage.size(), INTERPOLATION_METHOD);
return rotatedImage;
}
Any idea would be appreciated.

Better ways to create a rectangular mask by openCV

Creating a mask in openCV
/** result I want
0 0 0 0 0 0 0 0
0 0 0 0 0 0 0 0
0 0 1 1 1 1 0 0
0 0 1 1 1 1 0 0
0 0 1 1 1 1 0 0
0 0 1 1 1 1 0 0
0 0 0 0 0 0 0 0
0 0 0 0 0 0 0 0
*/
cv::Mat mask = cv::Mat::zeros(8, 8, CV_8U);
std::cout<<"before : \n"<<mask<<std::endl;
for(int i = 2; i != 6; ++i)
{
auto ptr = mask.ptr<uchar>(i) + 2;
for(int j = 0; j != 4; ++j)
{
*ptr++ = 1;
}
}
std::cout<<"after : \n"<<mask<<std::endl;
Do openCV provide us any build in function to create a mask like this?
It is trivial to create a function fot this task, but the function of openCV
always faster than naive handcrafted codes
sure, there's an easier way, use the roi operator:
cv::Mat mask = cv::Mat::zeros(8, 8, CV_8U); // all 0
mask(Rect(2,2,4,4)) = 1;
done!
If some one is looking for creating a non rectangular mask and then to apply it on the image then have a look here :
Mat& obtainIregularROI(Mat& origImag, Point2f topLeft, Point2f topRight, Point2f botLeft, Point2f botRight){
static Mat black(origImag.rows, origImag.cols, origImag.type(), cv::Scalar::all(0));
Mat mask(origImag.rows, origImag.cols, CV_8UC1, cv::Scalar(0));
vector< vector<Point> > co_ordinates;
co_ordinates.push_back(vector<Point>());
co_ordinates[0].push_back(topLeft);
co_ordinates[0].push_back(botLeft);
co_ordinates[0].push_back(botRight);
co_ordinates[0].push_back(topRight);
drawContours( mask,co_ordinates,0, Scalar(255),CV_FILLED, 8 );
origImag.copyTo(black,mask);
return black;
}
"black" is the image where we will finally obtain the result by cropping out the irregular ROI from the original image.
static Mat black(origImag.rows, origImag.cols, origImag.type(), cv::Scalar::all(0));
The "mask" is a Mat, initialized as the same size of original image and filled with 0.
Mat mask(origImag.rows, origImag.cols, CV_8UC1, cv::Scalar(0));
Putting the coordinates in ANTICLOCKWISE direction
vector< vector<Point> > co_ordinates;
co_ordinates.push_back(vector<Point>());
co_ordinates[0].push_back(topLeft);
co_ordinates[0].push_back(botLeft);
co_ordinates[0].push_back(botRight);
co_ordinates[0].push_back(topRight);
Now generating the mask actually
drawContours( mask,co_ordinates,0, Scalar(255),CV_FILLED, 8 );
At the end copy the masked portion/ROI from original image (origImag) and paste on the portion of ROI from the original image (using mask) into image named as "black"
origImag.copyTo(black,mask);

Converting 1-bit bmp file to array in C/C++ [closed]

Closed. This question needs to be more focused. It is not currently accepting answers.
Want to improve this question? Update the question so it focuses on one problem only by editing this post.
Closed 2 years ago.
Improve this question
I'm looking to turn a 1-bit bmp file of variable height/width into a simple two-dimensional array with values of either 0 or 1. I don't have any experience with image editing in code and most libraries that I've found involve higher bit-depth than what I need. Any help regarding this would be great.
Here's the code to read a monochrome .bmp file
(See dmb's answer below for a small fix for odd-sized .bmps)
#include <stdio.h>
#include <string.h>
#include <malloc.h>
unsigned char *read_bmp(char *fname,int* _w, int* _h)
{
unsigned char head[54];
FILE *f = fopen(fname,"rb");
// BMP header is 54 bytes
fread(head, 1, 54, f);
int w = head[18] + ( ((int)head[19]) << 8) + ( ((int)head[20]) << 16) + ( ((int)head[21]) << 24);
int h = head[22] + ( ((int)head[23]) << 8) + ( ((int)head[24]) << 16) + ( ((int)head[25]) << 24);
// lines are aligned on 4-byte boundary
int lineSize = (w / 8 + (w / 8) % 4);
int fileSize = lineSize * h;
unsigned char *img = malloc(w * h), *data = malloc(fileSize);
// skip the header
fseek(f,54,SEEK_SET);
// skip palette - two rgb quads, 8 bytes
fseek(f, 8, SEEK_CUR);
// read data
fread(data,1,fileSize,f);
// decode bits
int i, j, k, rev_j;
for(j = 0, rev_j = h - 1; j < h ; j++, rev_j--) {
for(i = 0 ; i < w / 8; i++) {
int fpos = j * lineSize + i, pos = rev_j * w + i * 8;
for(k = 0 ; k < 8 ; k++)
img[pos + (7 - k)] = (data[fpos] >> k ) & 1;
}
}
free(data);
*_w = w; *_h = h;
return img;
}
int main()
{
int w, h, i, j;
unsigned char* img = read_bmp("test1.bmp", &w, &h);
for(j = 0 ; j < h ; j++)
{
for(i = 0 ; i < w ; i++)
printf("%c ", img[j * w + i] ? '0' : '1' );
printf("\n");
}
return 0;
}
It is plain C, so no pointer casting - beware while using it in C++.
The biggest problem is that the lines in .bmp files are 4-byte aligned which matters a lot with single-bit images. So we calculate the line size as "width / 8 + (width / 8) % 4". Each byte contains 8 pixels, not one, so we use the k-based loop.
I hope the other code is obvious - much has been told about .bmp header and pallete data (8 bytes which we skip).
Expected output:
0 1 1 1 1 1 1 1 1 1 1 1 1 1 1 0
0 1 1 1 1 1 1 1 1 1 1 1 1 1 1 0
0 0 0 0 0 0 1 1 1 1 0 0 1 1 0 0
0 0 0 0 0 0 1 1 1 1 0 0 0 0 0 0
0 0 0 0 0 0 1 1 1 1 0 0 0 0 0 0
0 0 0 1 0 0 1 1 1 1 0 0 0 0 0 0
0 0 0 0 0 0 1 1 1 1 0 0 0 0 0 0
0 0 0 0 0 0 1 1 1 1 0 0 0 0 0 0
0 0 0 0 0 0 1 1 1 1 0 0 0 0 0 0
0 0 0 0 0 0 1 1 1 1 0 0 1 0 0 0
0 0 0 0 0 0 1 1 1 1 0 0 0 0 0 0
0 0 0 0 0 1 1 1 1 1 0 0 0 0 0 0
0 0 0 0 0 1 1 1 1 1 0 0 0 0 1 0
0 0 0 0 0 1 1 1 1 1 0 0 0 0 0 0
0 0 0 1 0 1 1 1 1 1 0 0 0 0 0 0
0 0 0 0 0 1 1 1 1 1 0 0 0 0 0 0
I tried the solution of Viktor Lapyov on a 20x20 test image:
But with his code, I get this output (slightly reformatted but you can see the problem):
The last 4 pixels are not read. The problem is here. (The last partial byte in a row is ignored.)
// decode bits
int i, j, k, rev_j;
for(j = 0, rev_j = h - 1; j < h ; j++, rev_j--) {
for(i = 0 ; i < w / 8; i++) {
int fpos = j * lineSize + i, pos = rev_j * w + i * 8;
for(k = 0 ; k < 8 ; k++)
img[pos + (7 - k)] = (data[fpos] >> k ) & 1;
}
}
I rewrote the inner loop like this:
// decode bits
int i, byte_ctr, j, rev_j;
for(j = 0, rev_j = h - 1; j < h ; j++, rev_j--) {
for( i = 0; i < w; i++) {
byte_ctr = i / 8;
unsigned char data_byte = data[j * lineSize + byte_ctr];
int pos = rev_j * w + i;
unsigned char mask = 0x80 >> i % 8;
img[pos] = (data_byte & mask ) ? 1 : 0;
}
}
and all is well:
The following c code works with monochrome bitmaps of any size. I'll assume you've got your bitmap in a buffer with heights and width initialized from file. So
// allocate mem for global buffer
if (!(img = malloc(h * w)) )
return(0);
int i = 0, k, j, scanline;
// calc the scanline. Monochrome images are
// padded with 0 at every line end. This
// makes them divisible by 4.
scanline = ( w + (w % 8) ) >> 3;
// account for the paddings
if (scanline % 4)
scanline += (4 - scanline % 4);
// loop and set the img values
for (i = 0, k = h - 1; i < h; i++)
for (j = 0; j < w; j++) {
img[j+i*w] = (buffer[(j>>3)+k*scanline])
& (0x80 >> (j % 8));
}
Hope this help's. To convert it to 2D is now a trivial matter: But if u get lost here is the math to convert 1D array to 2D suppose r & c are row and column and w is the width then:
. c + r * w = r, c
If you got further remarks hit me back, am out!!!
Lets think of a1x7 monochrome bitmap i.e. This is a bitmap of a straight line with 7 pixels wide. To store this image on a Windows OS; since 7 is not evenly divisible by 4 it's going to pad in it an extra 3 bytes.
So the biSizeImage of the BITMAPINFOHEADER structure will show a total of 4 bytes. Nonetheless the biHeight and biWidth members will correctly state the true bitmap dimensions.
The above code will fail because 7 / 8 = 0 (by rounding off as with all c compilers do). Hence loop "i" will not execute so will "k".
That means the vector "img" now contains garbage values that do not correspond to the pixels contained in " data" i.e. the result is incorrect.
And by inductive reasoning if it does not satisfy the base case then chances are it wont do much good for general cases.

How clEnqueueCopyBufferRect is working?

I need to understand how clEnqueueCopyBufferRect is working.
For Example, I need to copy 3x3 region from 4x4 region (let it be the int array) with origin (1,1).
I have two suggestions:
It just copy the linear region which size matches the size of rectangular 3x3 region, so the result is:
1 1 1 1 0 0 0 0
1 1 1 1 -> 0 1 1 1
1 1 1 1 1 1 1 1
1 1 1 1 1 1 0 0
Or it copy the rectangular region and the result is:
1 1 1 1 0 0 0 0
1 1 1 1 -> 0 1 1 1
1 1 1 1 0 1 1 1
1 1 1 1 0 1 1 1
To check this I used next example code:
int main(int argc, char * argv[])
{
std::vector<Platform> platforms;
Platform::get(&platforms);
cl::Platform platform = platforms[0];
cl_context_properties cps[3] = {CL_CONTEXT_PLATFORM, (cl_context_properties)platform(), 0};
cl::Context context(CL_DEVICE_TYPE_GPU,cps);
std::string kr_str = "__kernel void StepKernel(__global int* in) { in[get_global_id(1)*4 + get_global_id(0)] = 1;}";
Program program=cl::Program(
context,
cl::Program::Sources(
1,
std::make_pair(kr_str.c_str(),kr_str.length()+1)
)
);
std::vector<cl::Device> devices = context.getInfo<CL_CONTEXT_DEVICES>();
std::vector<cl::Device> device(1,devices[0]);
program.build(device);
cl::Kernel kernel = cl::Kernel(program, "StepKernel");
cl::CommandQueue queue(
context,
device[0],
CL_NONE
);
cl::Buffer in_buffer_on_device(
context,
CL_MEM_READ_WRITE,
16*sizeof(int)
);
cl::Buffer out_buffer_on_host(
context,
CL_MEM_READ_WRITE|CL_MEM_ALLOC_HOST_PTR,
16*sizeof(int)
);
void *ptr_on_host =
queue.enqueueMapBuffer(
out_buffer_on_host,
CL_BLOCKING,
CL_MAP_WRITE|CL_MAP_READ,
0, 16*sizeof(int),
NULL, NULL
);
for(int k = 0; k < 4; k++)
for(int i = 0; i < 4; i++)
static_cast<int*>(ptr_on_host)[k*4 + i] = 0;
cl::size_t<3> origin, region;
origin[0]=0; origin[1]=0; origin[2]=0;
region[0]=4*sizeof(int); region[1]=4; region[2]=1;
Event evt;
queue.enqueueCopyBufferRect(
out_buffer_on_host,
in_buffer_on_device,
origin, origin, region,
sizeof(int)*4,sizeof(int)*4*4,sizeof(int)*4,sizeof(int)*4*4,
NULL,
&evt
);
evt.wait();
kernel.setArg(0,in_buffer_on_device);
queue.enqueueNDRangeKernel(
kernel,
cl::NDRange( 0, 0),
cl::NDRange( 4, 4),
cl::NullRange,
NULL,
&evt
);
evt.wait();
origin[0]=1*sizeof(int); origin[1]=1; origin[2]=0;
region[0]=3*sizeof(int); region[1]=3; region[2]=1;
queue.enqueueCopyBufferRect(
in_buffer_on_device,
out_buffer_on_host,
origin, origin, region,
sizeof(int)*4,sizeof(int)*4*4,sizeof(int)*4,sizeof(int)*4*4,
NULL,
&evt
);
evt.wait();
for(int k = 0; k < 4; k++)
{
for(int i = 0; i < 4; i++)
{
std::cout << static_cast<int*>(ptr_on_host)[k*4 + i]<< "\t";
}
std::cout << std::endl;
}
return 0;
}
And the output was:
0 0 0 0
0 1 1 1
1 1 1 1
1 1 0 0
So, if i have not done any mistake the CL copies linear region. So such behavior is useless for me.
It copies the rectangular region. Otherwise it would be quite useless as an API.