AntiAliasing routine for downsizing - c++

i'm trying to implement a custom anti-aliasing routine by summing nearest pixels for downsizing an image.
I tried to sum and mean up to 3 pixels-wide but the result is not good, it's as much antialised (edit i mean aliased) as before.
here is the code i used:
h is height
w is width
buf is a buffer containing the values.
out is the output buffer
for (int x = 0; x < w; x++)
for (int y = 0; y < h; y++)
{
int n = 3;
int x1 = x - n;
int x2 = x + n;
if (x1 < 0) x1 = 0;
if (x2 > w -1) x2 = w - 1;
int y1 = y - n;
int y2 = y + n;
if (y1 < 0) y1 = 0;
if (y2 > h - 1) y2 = h - 1;
double sum = 0;
for (int i = x1; i <= x2; i++)
for (int j = y1; j <= y2; j++)
sum += buf[i][j];
sum = sum / double((x2 - x1 +1) * (y2 - y1 +1));
out[x][y] = sum;
}
What quick routine could i use to do a simple basic anti-aliasing?

Related

Unsharp Masking opencv c++

I am new to opencv and I am performing unsharp masking by using using this criteria Image+(K*(Image-low pass filter)),however ,the resultant image may have values <0 or >255,i need to write a loop to scale that down.
I tried to write one but seemingly its in correct.
Here are the errors.
a) (k * (float)(src.at(y, x) - res.at(y, x))) can be negative, it is incorrect to do (uchar)(k * (float)(src.at(y, x) - res.at(y, x))).
b) src.at(y,x) + (k * (float)(src.at(y, x) - res.at(y, x))) can be greater than 255 and can be smaller than 0.
Can someone help me fix this,thanks in advance
#include <opencv2/imgcodecs.hpp>
#include <opencv2/highgui.hpp>
#include <opencv2/imgproc.hpp>
#include <iostream>
#include <stdlib.h>
#include <math.h>
using namespace std;
using namespace cv;
//the pixel lying outside the image i.e. (x – j, y – k) are reflected back into the image
int reflect(int M, int x)
{
if (x < 0)
{
return -x - 1;
}
if (x >= M)
{
return 2 * M - x - 1;
}
return x;
}
int circular(int M, int x)
{
if (x < 0)
return x + M;
if (x >= M)
return x - M;
return x;
}
void noBorderProcessing(Mat src, Mat res, float Kernel[][3])
{
float sum;
for (int y = 1; y < src.rows - 1; y++) {
for (int x = 1; x < src.cols - 1; x++) {
sum = 0.0;
for (int k = -1; k <= 1; k++) {
for (int j = -1; j <= 1; j++) {
sum = sum + Kernel[j + 1][k + 1] * src.at<uchar>(y - j, x - k);
}
}
res.at<uchar>(y, x) = sum;
}
}
}
void refletedIndexing(Mat src, Mat res, float Kernel[][3])
{
float sum, x1, y1;
for (int y = 0; y < src.rows; y++) {
for (int x = 0; x < src.cols; x++) {
sum = 0.0;
for (int k = -1; k <= 1; k++) {
for (int j = -1; j <= 1; j++) {
x1 = reflect(src.cols, x - j);
y1 = reflect(src.rows, y - k);
sum = sum + Kernel[j + 1][k + 1] * src.at<uchar>(y1, x1);
}
}
res.at<uchar>(y, x) = sum;
}
}
}
//coordinates that exceed the bounds of the image wrap around to the opposite side
void circularIndexing(Mat src, Mat res, float Kernel[][3])
{
float sum, x1, y1;
for (int y = 0; y < src.rows; y++) {
for (int x = 0; x < src.cols; x++) {
sum = 0.0;
for (int k = -1; k <= 1; k++) {
for (int j = -1; j <= 1; j++) {
x1 = circular(src.cols, x - j);
y1 = circular(src.rows, y - k);
sum = sum + Kernel[j + 1][k + 1] * src.at<uchar>(y1, x1);
}
}
res.at<uchar>(y, x) = sum;
}
}
}
int main()
{
Mat src, res,dst;
/// Load an image
src = cv::imread("Images4DZ/Gray_Win.bmp", cv::IMREAD_ANYDEPTH);
//low pass filtering
float Kernel[3][3] = {
{1 / 9.0, 1 / 9.0, 1 / 9.0},
{1 / 9.0, 1 / 9.0, 1 / 9.0},
{1 / 9.0, 1 / 9.0, 1 / 9.0}
};
res = src.clone();
for (int y = 0; y < src.rows; y++)
for (int x = 0; x < src.cols; x++)
res.at<uchar>(y, x) = 0.0;
circularIndexing(src, res, Kernel);
//Unsharpen Masking
dst = cv::Mat::zeros(res.rows, res.cols, CV_8UC1);
float k = 0.5;
for (int y = 0; y < res.rows; y++) {
for (int x = 0; x < res.cols; x++) {
dst.at<uchar>(y, x) = src.at<uchar>(y,x) + (uchar)(k * (float)(src.at<uchar>(y, x) - res.at<uchar>(y, x)));
}
}
imshow("Source Image", src);
imshow("Low Pass Filter", res);
imshow("Unsharpen Masking", dst);
waitKey();
return 0;
}

How get precise output without rounding off?

I am trying to solve this question but in the output the last digit is rounded. Is there a way to convert this output to a string so that the output isn't rounded?
We are given a convex polygon with N sides. You have to answer Q queries. The ith query is described by two integers vi,ti. In this query, all sides of the polygon start moving parallel to their respective perpendicular vector away from the centroid with a constant velocity of viunitssec. Output the final area covered by N sides of the polygon after time ti seconds.
#include <bits/stdc++.h>
#define ll long long int
#define lld long double
using namespace std;
lld dist(int x1, int y1, int x2, int y2)
{
return sqrt(pow(x2 - x1, 2) + pow(y2 - y1, 2) * 1.0);
}
lld dist2(int x1, int y1, int x2, int y2)
{
return (pow(x2 - x1, 2) + pow(y2 - y1, 2) * 1.0);
}
lld cot_sum(int x1, int y1, int x2, int y2)
{
lld dot = x1 * x2 + y1 * y2;
lld cosi = dot / (sqrt((lld)(x1 * x1 + y1 * y1) * (x2 * x2 + y2 * y2)));
lld theta = acos(cosi);
theta = (M_PI - theta) / 2;
return cos(theta) / sin(theta);
}
lld polygonArea(vector<int> X, vector<int> Y)
{
int n = X.size();
lld area = 0.0;
int j = n - 1;
for (int i = 0; i < n; i++)
{
area += (X[j] + X[i]) * (Y[j] - Y[i]);
j = i; // j previous i
}
return abs(area / 2.0);
}
int main()
{
ios_base::sync_with_stdio(false);
cin.tie(NULL);
long long test;
cin >> test;
while (test--)
{
ll n, q;
cin >> n >> q;
vector<int> x(n);
vector<int> y(n);
vector<int> vx(n);
vector<int> vy(n);
vector<lld> angle(n);
vector<int> v(q);
vector<int> t(q);
vector<lld> ans(q);
for (int i = 0; i < n; i++)
{
cin >> x[i] >> y[i];
}
for (int i = 0; i < q; i++)
{
cin >> v[i] >> t[i];
}
lld area = polygonArea(x, y);
lld perimeter = 0;
lld cotsum = 0;
// cout<<"Area: "<<area<<endl;
x.push_back(x[0]);
// x.push_back(x[1]);
y.push_back(y[0]);
// y.push_back(y[1]);
for (int i = 0; i < n; i++)
{
vx[i] = x[i + 1] - x[i];
vy[i] = y[i + 1] - y[i];
}
vx.push_back(vx[0]);
vy.push_back(vy[0]);
for (int i = 0; i < n; i++)
{
perimeter += dist(x[i], y[i], x[i + 1], y[i + 1]);
cotsum += cot_sum(vx[i], vy[i], vx[i + 1], vy[i + 1]);
// cotsum = cotsum + 1 / cot_sum(x[i], y[i], x[i + 1], y[i + 1], x[i + 2], y[i + 2]);
}
for (int i = 0; i < q; i++)
{
ans[i] = area + perimeter * v[i] * t[i] + cotsum * v[i] * t[i] * v[i] * t[i];
// for (int j = 0; j < n; j++)
// {
// // ans[i] += v[i]*t[i]*dist(x[j],y[j],x[j+1],y[j+1]);
// ans[i] += (lld)(v[i] * t[i] * v[i] * t[i]) / cot_sum(x[j], y[j], x[j + 1], y[j + 1],x[j+2],y[j+2]);
// }
// cout<<setprecision(7)<<ans[i]<<endl;
std::stringstream stream;
cout<< stream << std::fixed << std::setprecision(7) <<ans[i]<<endl;
//std::string s = stream.str();
}
}
return 0;
}
SAMPLE INPUT
2
4 1
1 1
2 1
2 2
1 2
1 1
3 2
1 1
2 1
1 2
1 1
2 3
SAMPLE OUTPUT
9.0000000
9.7426406
230.8086578
MY OUTPUT
9
9.742641
230.8087

Differences in filter2D implementation

I was trying to implement convolute2D (filter2D in OpenCV) and came up with the following code.
Mat convolute2D(Mat image, double** kernel, int W){
Mat filtered_image = image.clone();
// find center position of kernel (half of kernel size)
int kCenterX = W / 2;
int kCenterY = W / 2;
int xx = 0;
int yy = 0;
cout << endl << "Performing convolution .." << endl;
cout << "Image Size : " << image.rows << ", " << image.cols <<endl;
for (int i = 0; i < image.rows; ++i){
for (int j = 0; j < image.cols; ++j){
for(int x = 0; x < W; ++x){
xx = W - 1 - x;
for(int y = 0; y < W; ++y){
yy = W - 1 - y;
int ii = i + (x - kCenterX);
int jj = j + (y - kCenterY);
if( ii >= 0 && ii < image.rows && jj >= 0 && jj < image.cols) {
filtered_image.at<uchar>(Point(j, i)) += image.at<uchar>(Point(jj, ii)) * kernel[xx][yy];
}
}
}
}
}
return filtered_image;
}
Assuming we always have a square kernel. But my results have been much different from filter2D. Is it because of possible overflow or is there a problem with my implementation?
Thanks
There are two issues with your code:
You don't set the output image to zero before adding values to it. Consequently, you are computing "input + filtered input", rather than just "filtered input".
Presuming that kernel has quite small values, "input pixel * kernel value" will likely yield a small number, which is rounded down when written to a uchar. Adding up each of these values for the kernel, you'll end up with a result that is too low.
I recommend that you do this:
double res = 0;
for(int x = 0; x < W; ++x){
int xx = W - 1 - x;
for(int y = 0; y < W; ++y){
int yy = W - 1 - y;
int ii = i + (x - kCenterX);
int jj = j + (y - kCenterY);
if( ii >= 0 && ii < image.rows && jj >= 0 && jj < image.cols) {
res += image.at<uchar>(Point(jj, ii)) * kernel[xx][yy];
}
}
}
filtered_image.at<uchar>(Point(j, i)) = res;
This solves both issues at once. Also, this should be a bit faster because accessing the output image has a bit of overhead.
For much faster speeds, consider that the check for out-of-bounds reads (the if in the inner loop) slows down your code significantly, and is totally unnecessary for most pixels (as few pixels are near the image edge). Instead, you can split up your loops into [0,kCenterX], [kCenterX,image.rows-kCenterX], and [image.rows-kCenterX,image.rows]. The middle loop, which is typically by far the largest, will not need to check for out-of-bounds reads.
And use cv::saturate_cast for correct assignment to uchar, for example:
filtered_image.at<uchar>(Point(j, i)) = cv::saturate_cast<uchar>(res);

meshgrid matlab to c++ different results

here is the matlab code i'm trying to convert to c++
where
size(Iorig) == 1334X 2026
%% label checkers
Label = zeros(size(Iorig));
Margins = 11;
[X,Y] = meshgrid(1:size(Iorig,2),1:size(Iorig,1));
k = 1;
for i = 1:4
for j = 1:6
rr = rect{i,j};
x1 = rr(1);
x2 = rr(1) + rr(3);
y1 = rr(2);
y2 = rr(2) + rr(4);
Label(X>=x1+Margins&X<x2-Margins&Y>=y1+Margins&Y<y2-Margins) = k;
k = k+1;
end
end
I understand that we want to label the rectangles which are found in the previous step, there are 24 of those.
but I don't understand how to convert this line into easy c++ code without allocating a huge buffer of X and Y which basically just holds... indices..
thanks for your help here is what i started doing.
//label Checkers
List<List<int>^>^ label = gcnew List<List<int>^>();
int margins = 11;
int k = 1;
for (size_t i = 0; i < 4; i++)
{
for (size_t j = 0; j < 6; j++)
{
MacbethCheckerBatchesColor^ rect = autoDetectMacbethResult[i * 6 + j];
Point^ r = rect->Points[0];
int x1 = r->X;
int y1 = r->Y;
r = rect->Points[2];
int x2 = r->X;
int y2 = r->Y;
for (int h = 0; h < inputImage->HeightLines; h++)
{
List<int>^ tempRow = gcnew List<int>();
for (int w = 0; w < inputImage->WidthColumns; w++)
{
if ( (w>= x1+margins) & (w<x2-margins) & (h >= y1+margins) & (h<y2-margins) )
{
tempRow->Add(k);
}
else
{
tempRow->Add(0);
}
}
label->Add(tempRow);
}
k= k+100;//i tried here many other numbers... same result
}
}
Here is my result can you please help me find my mistake, the rectangles are the same, I guesss I have some other logical mistake.

Exponential Smoothing of Newton Fractal

I'm writing myself a Newton Fractal Generator. The images all looked like this:
But I actually would like it to look a bit smoother - sure I've done some research and I ran over http://www.hiddendimension.com/FractalMath/Convergent_Fractals_Main.html and this looks rather correct, except that there are at the edges of the basins some issues..
This is my generation loop:
while (i < 6000 && fabs(z.r) < 10000 && !found){
f = computeFunction(z, params, paramc[0]);
d = computeFunction(z, paramsD, paramc[1]);
iterexp = iterexp + exp(-fabs(z.r) - 0.5 / (fabs(subComplex(zo, z).r)));
zo = z;
z = subComplex(z, divComplex(f, d));
i++;
for (int j = 0; j < paramc[0] - 1; j++){
if (compComplex(z, zeros[j], RESOLUTION)){
resType[x + xRes * y] = j;
result[x + xRes * y] = iterexp;
found = true;
break;
}
}
if (compComplex(z, zo, RESOLUTION/100)){
resType[x + xRes * y] = 12;
break;
}
}
The coloration:
const int xRes = res[0];
const int yRes = res[1];
for (int y = 0; y < fraktal->getHeight(); y++){
for (int x = 0; x < fraktal->getWidth(); x++){
int type, it;
double conDiv;
if (genCL && genCL->err == CL_SUCCESS){
conDiv = genCL->result[x + y * xRes];
type = genCL->typeRes[x + y * xRes];
it = genCL->iterations[x + y * xRes];
} else {
type = 3;
conDiv = runNewton(std::complex<double>((double)((x - (double)(xRes / 2)) / zoom[0]), (double)((y - (double)(yRes / 2)) / zoom[1])), type);
}
if (type < 15){
Color col;
col.setColorHexRGB(colors[type]);
col.setColorHSV(col.getHue(), col.getSaturation(), 1-conDiv);
fraktal->setPixel(x, y, col);
} else {
fraktal->setPixel(x, y, conDiv, conDiv, conDiv, 1);
}
}
}
I appreciate any help to actually smooth this ;-)
Thanks,
- fodinabor