Slow motion in C++ - c++

I want to do slow motion. I've seen an implementation here: https://github.com/vaibhav06891/SlowMotion
I modified the code to generate only one frame.
#include <opencv2/imgproc/imgproc.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/video/tracking.hpp>
#include <opencv2/opencv.hpp>
#include <iostream>
#include <fstream>
#include <string>
using namespace cv;
using namespace std;
#define CLAMP(x,min,max) ( ((x) < (min)) ? (min) : ( ((x) > (max)) ? (max) : (x) ) )
int main(int argc, char** argv)
{
Mat frame,prevframe;
prevframe = imread("img1.png");
frame = imread("img2.png");
Mat prevgray, gray;
Mat fflow,bflow;
Mat flowf(frame.rows,frame.cols ,CV_8UC3); // the forward co-ordinates for interpolation
flowf.setTo(Scalar(255,255,255));
Mat flowb(frame.rows,frame.cols ,CV_8UC3); // the backward co-ordinates for interpolation
flowb.setTo(Scalar(255,255,255));
Mat final(frame.rows,frame.cols ,CV_8UC3);
int fx,fy,bx,by;
cvtColor(prevframe,prevgray,COLOR_BGR2GRAY); // Convert to gray space for optical flow calculation
cvtColor(frame, gray, COLOR_BGR2GRAY);
calcOpticalFlowFarneback(prevgray, gray, fflow, 0.5, 3, 15, 3, 3, 1.2, 0); // forward optical flow
calcOpticalFlowFarneback(gray, prevgray, bflow, 0.5, 3, 15, 3, 3, 1.2, 0); //backward optical flow
for (int y=0; y<frame.rows; y++)
{
for (int x=0; x<frame.cols; x++)
{
const Point2f fxy = fflow.at<Point2f>(y,x);
fy = CLAMP(y+fxy.y*0.5,0,frame.rows);
fx = CLAMP(x+fxy.x*0.5,0,frame.cols);
flowf.at<Vec3b>(fy,fx) = prevframe.at<Vec3b>(y,x);
const Point2f bxy = bflow.at<Point2f>(y,x);
by = CLAMP(y+bxy.y*(1-0.5),0,frame.rows);
bx = CLAMP(x+bxy.x*(1-0.5),0,frame.cols);
flowb.at<Vec3b>(by,bx) = frame.at<Vec3b>(y,x);
}
}
final = flowf*(1-0.5) + flowb*0.5; //combination of frwd and bckward martrix
cv::medianBlur(final,final,3);
imwrite( "output.png",final);
return 0;
}
But the result is not as expected.
For the images:
The result is :
Does anyone know what is the problem?

The optical flow algorithm won't work for your test images.
The first problem is that your test images have very little difference in neighbour pixel values. That completely black lines and a single color square give no clues to optical flow algorithm where the image areas moved as the algorithm is not able to process the whole image at once and calculates optical flow with a small 15x15 (as you set it in calcOpticalFlowFarneback) pixels window.
The second problem is that your test images differ too much. The distance between positions of brown square is too big. Again Farneback is not able to detect it.
Try the code with some real life video frames or edit your tests to be less monotonous (set some texture to the square, background and rectangle lines) and bring the squares closer to each other on the images (try 2-10 px distance). You can also play with calcOpticalFlowFarneback arguments (read here) to suit your conditions.
You can use this code to save the optical flow you get to an image for debugging:
Mat debugImage = Mat::zeros(fflow.size(), CV_8UC3);
float hsvHue, magnitude;
for (int x = 0; x < fflow.cols; x++)
{
for (int y = 0; y < fflow.rows; y++)
{
auto& item = fflow.at<Vec2f>(y, x);
magnitude = sqrtf(item[0] * item[0] + item[1] * item[1]);
hsvHue = atan2f(item[1], item[0]) / static_cast<float>(CV_PI)* 180.f;
// div 2 to fit 0..255 range
hsvHue = (hsvHue >= 0. ? hsvHue : (360.f + hsvHue)) / 2.f;
debugImage.at<Vec3b>(y, x)[0] = static_cast<uchar>(hsvHue);
debugImage.at<Vec3b>(y, x)[1] = 255;
debugImage.at<Vec3b>(y, x)[2] = static_cast<uchar>(255.f * magnitude);
}
}
cvtColor(debugImage, debugImage, CV_HSV2BGR);
imwrite("OpticalFlow.png", debugImage);
Here pixel flow direction will be represented with color (hue), and pixel move distance will be represented with brightness.
Try to use this images I created:
.
Also note that
for (int y = 0; y < frame.rows; y++)
{
for (int x = 0; x < frame.cols; x++)
{
const Point2f fxy = fflow.at<Point2f>(y, x);
fy = CLAMP(y + fxy.y*0.5, 0, frame.rows);
fx = CLAMP(x + fxy.x*0.5, 0, frame.cols);
flowf.at<Vec3b>(fy, fx) = prevframe.at<Vec3b>(y, x);
...
code won't color some flowf pixels that have no corresponding target positions they moved to, and optical flow algorithm can produce such situations. I would change it to:
for (int y = 0; y < frame.rows; y++)
{
for (int x = 0; x < frame.cols; x++)
{
const Point2f fxy = fflow.at<Point2f>(y, x);
fy = CLAMP(y - fxy.y*0.5, 0, frame.rows);
fx = CLAMP(x - fxy.x*0.5, 0, frame.cols);
flowf.at<Vec3b>(y, x) = prevframe.at<Vec3b>(fy, fx);
const Point2f bxy = bflow.at<Point2f>(y, x);
by = CLAMP(y - bxy.y*(1 - 0.5), 0, frame.rows);
bx = CLAMP(x - bxy.x*(1 - 0.5), 0, frame.cols);
flowb.at<Vec3b>(y, x) = frame.at<Vec3b>(by, bx);
}
}
With this changed code and my tests I get this output:

Related

How to do Parabolic Curve Deformation inside ROI (rectangle) of an Image

I am trying to "deform" pixels using parabola functions. In the middle of the ROI (the middle line of the rectangle) I want that the parabola will have a more significant curved line (the middle of the rectangle). As you move away from the center/middle line (both right and left) then the parabolas should "fade" and get closer to the shape of a straight line. I am using interpolation (right now the nearest neighbor interpolation) for the goal.
Note: Right now I try to perform it only on grayscale images.
It should look natural like in the next images:
I am using the next formula:
What I manage to do so far:
#include <iostream>
#include <opencv2/opencv.hpp>
#include <cmath>
using namespace std;
int main() {
cv:: Mat originalImage = cv::imread("../dog.jpeg",cv::IMREAD_GRAYSCALE);
cv:: Mat outPutImage = originalImage.clone();
//***** SELECT ROI *****
cv::Rect2d r = cv::selectROI("Select ROI than press Enter/Space",originalImage);
cv::Mat imCrop = originalImage(r);
cv::destroyWindow("Select ROI than press Enter/Space");
double a = 0.001;
double rate_change = 0.00005;
double newX;
for(int y = r.y; y < r.y + r.height ; y++){
if(y <= r.y + r.height/2)
a = a + rate_change;
else {
a = a - rate_change;
//cout << "here" << endl;
}
for(int x = r.x; x< r.x + r.width; x++){
newX = a*pow(double(y-(double(r.y + double(r.height/2)))),2) + double(x);
// // ** NEAREST
//
int nearestNeighborX = round(newX);
if(r.x < nearestNeighborX < r.x + r.width)
outPutImage.at<uchar>(cv::Point2i(x,y)) = originalImage.at<uchar>(cv::Point2i(nearestNeighborX,y));
}
}
I suspect that I need to decide what is a and what is rate_change using the rectangle size or the width-height of the rectangle but Im not sure how to do that.
My output looks unnatural.

Hough Circular Transform

Im trying to implement Hough Transform using gradient direction. I know that there is an implementation in OpenCv but I want to do it myself.
I'm using Sobel to get the X and Y gradient. Then for every pixel the
magnitute ---> sqrt(sobelX^2 + sobelY^2)
directions --> atan2(sobelY,sobelX) * 180/PI
if the magnitude is higher then 220 (so almost black) this is the edge.
And then the direction is used on the circle equation.
But the results are not acceptable. Any help?
I know there are the cv::polar and cv::cartToPolar, but I want to optimize code so that all equations will be calculated on fly, no empty loops.
cv::Mat sobelX,sobelY;
Sobel(mat, sobelX, CV_32F, 1, 0, kernelSize, 1, 0, cv::BORDER_REPLICATE);
Sobel(mat, sobelY, CV_32F, 0, 1, kernelSize, 1, 0, cv::BORDER_REPLICATE);
//cv::Canny(mat,mat,100,200,kernelSize,false);
debug::showImage("sobelX",sobelX);
debug::showImage("SobelY",sobelY);
debug::showImage("MAT",mat);
cv::Mat magnitudeMap,angleMap;
magnitudeMap = cv::Mat::zeros(mat.rows,mat.cols,mat.type());
angleMap = cv::Mat::zeros(mat.rows,mat.cols,mat.type());
std::vector<cv::Mat> hough_spaces(max);
for(int i=0; i<max; ++i)
{
hough_spaces[i] = cv::Mat::zeros(mat.rows,mat.cols,mat.type());
}
for(int x=0; x<mat.rows; ++x)
{
for(int y=0; y<mat.cols; ++y)
{
const float magnitude = sqrt(sobelX.at<uchar>(x,y)*sobelX.at<uchar>(x,y)+sobelY.at<uchar>(x,y)*sobelY.at<uchar>(x,y));
const float theta= atan2(sobelY.at<uchar>(x,y),sobelX.at<uchar>(x,y)) * 180/CV_PI;
magnitudeMap.at<uchar>(x,y) = magnitude;
if(magnitude > 225)//mat.at<const uchar>(x,y) == 255)
{
for(int radius=min; radius<max; ++radius)
{
const int a = x - radius * cos(theta);//lookup::cosArray[static_cast<int>(theta)];//+ 0.5f;
const int b = y - radius * sin(theta);//lookup::sinArray[static_cast<int>(theta)]; //+ 0.5f;
if(a >= 0 && a <hough_spaces[radius].rows && b >= 0 && b<hough_spaces[radius].cols) {
hough_spaces[radius].at<uchar>(a,b)+=10;
}
}
}
}
}
debug::showImage("magnitude",magnitudeMap);
for(int radius=min; radius<max; ++radius)
{
double min_f,max_f;
cv::Point min_loc,max_loc;
cv::minMaxLoc(hough_spaces[radius],&min_f,&max_f,&min_loc,&max_loc);
if(max_f>=treshold)
{
circles.emplace_back(cv::Point3f(max_loc.x,max_loc.y,radius));
// debug::showImage(std::to_string(radius).c_str(),hough_spaces[radius]);
}
}
circles.shrink_to_fit();

Generate image from an unorganized Point Cloud in PCL

I have an unorganized point cloud of the scene. Below is a screenshot of the point cloud-
I want to compose an image from this point cloud. Below is the code snippet-
#include <iostream>
#include <pcl/io/pcd_io.h>
#include <pcl/point_types.h>
#include <opencv2/opencv.hpp>
int main(int argc, char** argv)
{
pcl::PointCloud<pcl::PointXYZRGBA>::Ptr cloud(new pcl::PointCloud<pcl::PointXYZRGBA>);
pcl::io::loadPCDFile("file.pcd", *cloud);
cv::Mat image = cv::Mat(cloud->height, cloud->width, CV_8UC3);
for (int i = 0; i < image.rows; i++)
{
for (int j = 0; j < image.cols; j++)
{
pcl::PointXYZRGBA point = cloud->at(j, i);
image.at<cv::Vec3b>(i, j)[0] = point.b;
image.at<cv::Vec3b>(i, j)[1] = point.g;
image.at<cv::Vec3b>(i, j)[2] = point.r;
}
}
cv::imwrite("image.png", image);
return (0);
}
The PCD file can be found here. The above code throws following error at runtime-
terminate called after throwing an instance of 'pcl::IsNotDenseException'
what(): : Can't use 2D indexing with a unorganized point cloud
Since the cloud is unorganized, the HEIGHT field is 1. This makes me confused while defining the dimensions of the image.
Questions
How to compose an image from an unorganized point cloud?
How to convert pixels located in composed image back to point cloud (3D space)?
PS: I am using PCL 1.7 in Ubuntu 14.04 LTS OS.
What Unorganized point cloud means is that the points are NOT assigned to a fixed (organized) grid, therefore ->at(j, i) can't be used (height is always 1, and the width is just the size of the cloud.
If you want to generate an image from your cloud, I suggest the following process:
Project the point cloud to a plane.
Generate a grid (organized point cloud) on that plane.
Interpolate the colors from the unorganized cloud to the grid (organized cloud).
Generate image from your organized grid (your initial attempt).
To be able to convert back to 3D:
When projecting to the plane save the "projection vectors" (vector from original point to the projected point).
Interpolate that as well to the grid.
methods for creating the grid:
Project the point cloud to a plane (unorganized cloud), and optionally save the reconstruction information in the normals:
pcl::PointCloud<pcl::PointXYZINormal>::Ptr ProjectToPlane(pcl::PointCloud<pcl::PointXYZINormal>::Ptr cloud, Eigen::Vector3f origin, Eigen::Vector3f axis_x, Eigen::Vector3f axis_y)
{
PointCloud<PointXYZINormal>::Ptr aux_cloud(new PointCloud<PointXYZINormal>);
copyPointCloud(*cloud, *aux_cloud);
auto normal = axis_x.cross(axis_y);
Eigen::Hyperplane<float, 3> plane(normal, origin);
for (auto itPoint = aux_cloud->begin(); itPoint != aux_cloud->end(); itPoint++)
{
// project point to plane
auto proj = plane.projection(itPoint->getVector3fMap());
itPoint->getVector3fMap() = proj;
// optional: save the reconstruction information as normals in the projected cloud
itPoint->getNormalVector3fMap() = itPoint->getVector3fMap() - proj;
}
return aux_cloud;
}
Generate a grid based on an origin point and two axis vectors (length and image_size can either be predetermined as calculated from your cloud):
pcl::PointCloud<pcl::PointXYZINormal>::Ptr GenerateGrid(Eigen::Vector3f origin, Eigen::Vector3f axis_x , Eigen::Vector3f axis_y, float length, int image_size)
{
auto step = length / image_size;
pcl::PointCloud<pcl::PointXYZINormal>::Ptr image_cloud(new pcl::PointCloud<pcl::PointXYZINormal>(image_size, image_size));
for (auto i = 0; i < image_size; i++)
for (auto j = 0; j < image_size; j++)
{
int x = i - int(image_size / 2);
int y = j - int(image_size / 2);
image_cloud->at(i, j).getVector3fMap() = center + (x * step * axisx) + (y * step * axisy);
}
return image_cloud;
}
Interpolate to an organized grid (where the normals store reconstruction information and the curvature is used as a flag to indicate empty pixel (no corresponding point):
void InterpolateToGrid(pcl::PointCloud<pcl::PointXYZINormal>::Ptr cloud, pcl::PointCloud<pcl::PointXYZINormal>::Ptr grid, float max_resolution, int max_nn_to_consider)
{
pcl::search::KdTree<pcl::PointXYZINormal>::Ptr tree(new pcl::search::KdTree<pcl::PointXYZINormal>);
tree->setInputCloud(cloud);
for (auto idx = 0; idx < grid->points.size(); idx++)
{
std::vector<int> indices;
std::vector<float> distances;
if (tree->radiusSearch(grid->points[idx], max_resolution, indices, distances, max_nn_to_consider) > 0)
{
// Linear Interpolation of:
// Intensity
// Normals- residual vector to inflate(recondtruct) the surface
float intensity(0);
Eigen::Vector3f n(0, 0, 0);
float weight_factor = 1.0F / accumulate(distances.begin(), distances.end(), 0.0F);
for (auto i = 0; i < indices.size(); i++)
{
float w = weight_factor * distances[i];
intensity += w * cloud->points[indices[i]].intensity;
auto res = cloud->points[indices[i]].getVector3fMap() - grid->points[idx].getVector3fMap();
n += w * res;
}
grid->points[idx].intensity = intensity;
grid->points[idx].getNormalVector3fMap() = n;
grid->points[idx].curvature = 1;
}
else
{
grid->points[idx].intensity = 0;
grid->points[idx].curvature = 0;
grid->points[idx].getNormalVector3fMap() = Eigen::Vector3f(0, 0, 0);
}
}
}
Now you have a grid (an organized cloud), which you can easily map to an image. Any changes you make to the images, you can map back to the grid, and use the normals to project back to your original point cloud.
usage example for creating the grid:
pcl::PointCloud<pcl::PointXYZINormal>::Ptr original_cloud = ...;
// reference frame for the projection
// e.g. take XZ plane around 0,0,0 of length 100 and map to 128*128 image
Eigen::Vector3f origin = Eigen::Vector3f(0,0,0);
Eigen::Vector3f axis_x = Eigen::Vector3f(1,0,0);
Eigen::Vector3f axis_y = Eigen::Vector3f(0,0,1);
float length = 100
int image_size = 128
auto aux_cloud = ProjectToPlane(original_cloud, origin, axis_x, axis_y);
// aux_cloud now contains the points of original_cloud, with:
// xyz coordinates projected to XZ plane
// color (intensity) of the original_cloud (remains unchanged)
// normals - we lose the normal information, as we use this field to save the projection information. if you wish to keep the normal data, you should define a custom PointType.
// note: for the sake of projection, the origin is only used to define the plane, so any arbitrary point on the plane can be used
auto grid = GenerateGrid(origin, axis_x , axis_y, length, image_size)
// organized cloud that can be trivially mapped to an image
float max_resolution = 2 * length / image_size;
int max_nn_to_consider = 16;
InterpolateToGrid(aux_cloud, grid, max_resolution, max_nn_to_consider);
// Now you have a grid (an organized cloud), which you can easily map to an image. Any changes you make to the images, you can map back to the grid, and use the normals to project back to your original point cloud.
additional helper methods for how I use the grid:
// Convert an Organized cloud to cv::Mat (an image and a mask)
// point Intensity is used for the image
// if as_float is true => take the raw intensity (image is CV_32F)
// if as_float is false => assume intensity is in range [0, 255] and round it (image is CV_8U)
// point Curvature is used for the mask (assume 1 or 0)
std::pair<cv::Mat, cv::Mat> ConvertGridToImage(pcl::PointCloud<pcl::PointXYZINormal>::Ptr grid, bool as_float)
{
int rows = grid->height;
int cols = grid->width;
if ((rows <= 0) || (cols <= 0))
return pair<Mat, Mat>(Mat(), Mat());
// Initialize
Mat image = Mat(rows, cols, as_float? CV_32F : CV_8U);
Mat mask = Mat(rows, cols, CV_8U);
if (as_float)
{
for (int y = 0; y < image.rows; y++)
{
for (int x = 0; x < image.cols; x++)
{
image.at<float>(y, x) = grid->at(x, image.rows - y - 1).intensity;
mask.at<uchar>(y, x) = 255 * grid->at(x, image.rows - y - 1).curvature;
}
}
}
else
{
for (int y = 0; y < image.rows; y++)
{
for (int x = 0; x < image.cols; x++)
{
image.at<uchar>(y, x) = (int)round(grid->at(x, image.rows - y - 1).intensity);
mask.at<uchar>(y, x) = 255 * grid->at(x, image.rows - y - 1).curvature;
}
}
}
return pair<Mat, Mat>(image, mask);
}
// project image to cloud (using the grid data)
// organized - whether the resulting cloud should be an organized cloud
pcl::PointCloud<pcl::PointXYZI>::Ptr BackProjectImage(cv::Mat image, pcl::PointCloud<pcl::PointXYZINormal>::Ptr grid, bool organized)
{
if ((image.size().height != grid->height) || (image.size().width != grid->width))
{
assert(false);
throw;
}
PointCloud<PointXYZI>::Ptr cloud(new PointCloud<PointXYZI>);
cloud->reserve(grid->height * grid->width);
// order of iteration is critical for organized target cloud
for (auto r = image.size().height - 1; r >= 0; r--)
{
for (auto c = 0; c < image.size().width; c++)
{
PointXYZI point;
auto mask_value = mask.at<uchar>(image.rows - r - 1, c);
if (mask_value > 0) // valid pixel
{
point.intensity = mask_value;
point.getVector3fMap() = grid->at(c, r).getVector3fMap() + grid->at(c, r).getNormalVector3fMap();
}
else // invalid pixel
{
if (organized)
{
point.intensity = 0;
point.x = numeric_limits<float>::quiet_NaN();
point.y = numeric_limits<float>::quiet_NaN();
point.z = numeric_limits<float>::quiet_NaN();
}
else
{
continue;
}
}
cloud->push_back(point);
}
}
if (organized)
{
cloud->width = grid->width;
cloud->height = grid->height;
}
return cloud;
}
usage example for working with the grid:
// image_mask is std::pair<cv::Mat, cv::Mat>
auto image_mask = ConvertGridToImage(grid, false);
...
do some work with the image/mask
...
auto new_cloud = BackProjectImage(image_mask.first, grid, false);
For an unorganized point cloud, height and width have different meanings as you may have noticed. http://pointclouds.org/documentation/tutorials/basic_structures.php
It is not as simple to convert an unorganized point cloud to an image, as the points are represented as floats and there is no defined perspective. However, you can work around that by determining a perspective and creating discrete bins for the points. A similar question and answer can be found here: Converting a pointcloud to a depth/multi channel image

OpenCV : algorithm for simple image rotation and reduction

I have tried image rotation and reduction(JPEG) with getRotationMatrix2D(center, angle, scale);
and warpAffine(image1, image3, rotation, image3.size());
I got the result I wanted(image as below)
for (int r = 0;r < image1.rows;r++) {
for (int c = r + 1;c < image1.cols;c++) {
Point center = Point(image1.cols / 2, image1.rows / 2);
Point center1 = Point(image1.cols / 2, image1.rows / 2);
double angle = 90.0;
double scale = 1;
double angle1 = 90.0;
double scale1 = 0.5;
rotation = getRotationMatrix2D(center, angle, scale);
rotation1 = getRotationMatrix2D(center1, angle1, scale1);
but I want to learn some simple rotation and reduction algorithm ( simple for beginner like me ) without using any library
to get the same result.
After searching for various solutions, i ended up with this
from https://gamedev.stackexchange.com/questions/67613/how-can-i-rotate-a-bitmap-without-d3d-or-opengl
Can anyone break up bit by bit of the simple linear algebra to explain to me in regards to my pesudo code?
EDIT:
Reduction code
void reduction(Mat image1)
{
for (int r = 0;r < imgC.rows;r++)
{
for (int c = 0;c < imgC.cols;c++)
{
int new_x = c * (125 / 256);
int new_y = r * (125 / 256);
imgC.at<uchar>(r, c) = imgC.at<uchar>(new_y, new_x);
}
}
}
In this example I have loaded two images. One is in gray scale and other is color image. Both are same image so you can understand easily how to handle rotation with mathematical equation. Please see this example which is very easy to understand. Also in the similar manner you can add scaling and reduction. Here each point is converted according to the equation and color value set on new location. Here is the code:
#include <iostream>
#include <string>
#include "opencv/highgui.h"
#include "opencv2/imgproc/imgproc.hpp"
#include "opencv2/objdetect/objdetect.hpp"
using namespace std;
using namespace cv;
#define PIPI 3.14156
int main()
{
Mat img = imread("C:/Users/dell2/Desktop/DSC00587.JPG",0);//loading gray scale image
Mat imgC = imread("C:/Users/dell2/Desktop/DSC00587.JPG",1);//loading color image
Mat rotC(imgC.cols, imgC.rows, imgC.type());
rotC = Scalar(0,0,0);
Mat rotG(img.cols, img.rows, img.type());
rotG = Scalar(0,0,0);
float angle = 90.0 * PIPI / 180.0;
for(int r=0;r<imgC.rows;r++)
{
for(int c=0;c<imgC.cols;c++)
{
float new_px = c * cos(angle) - r * sin(angle);
float new_py = c * sin(angle) + r * cos(angle);
Point pt((int)-new_px, (int)new_py);
//color image
rotC.at<Vec3b>(pt) = imgC.at<Vec3b>(r,c);//assign color value at new location from original image
//gray scale image
rotG.at<uchar>(pt) = img.at<uchar>(r,c);//assign color value at new location from original image
}
}
imshow("color",rotC);
imshow("gray",rotG);
waitKey(0);
return 0;
}

Creating vignette filter in opencv?

How we can make vignette filter in opencv? Do we need to implement any algorithm for it or only to play with the values of BGR ? How we can make this type of filters. I saw its implementation here but i didn't understand it clearly . Anyone with complete algorithms guidance and implementation guidance is highly appriciated.
After Abid rehman K answer I tried this in c++
int main()
{
Mat v;
Mat img = imread ("D:\\2.jpg");
img.convertTo(v, CV_32F);
Mat a,b,c,d,e;
c.create(img.rows,img.cols,CV_32F);
d.create(img.rows,img.cols,CV_32F);
e.create(img.rows,img.cols,CV_32F);
a = getGaussianKernel(img.cols,300,CV_32F);
b = getGaussianKernel(img.rows,300,CV_32F);
c = b*a.t();
double minVal;
double maxVal;
cv::minMaxLoc(c, &minVal, &maxVal);
d = c/maxVal;
e = v*d ; // This line causing error
imshow ("venyiet" , e);
cvWaitKey();
}
d is displaying right but e=v*d line is causing runtime error of
OpenCV Error: Assertion failed (type == B.type() && (type == CV_32FC1 || type ==
CV_64FC1 || type == CV_32FC2 || type == CV_64FC2)) in unknown function, file ..
\..\..\src\opencv\modules\core\src\matmul.cpp, line 711
First of all, Abid Rahman K describes the easiest way to go about this filter. You should seriously study his answer with time and attention. Wikipedia's take on Vignetting is also quite clarifying for those that had never heard about this filter.
Browny's implementation of this filter is considerably more complex. However, I ported his code to the C++ API and simplified it so you can follow the instructions yourself.
#include <math.h>
#include <vector>
#include <cv.hpp>
#include <highgui/highgui.hpp>
// Helper function to calculate the distance between 2 points.
double dist(CvPoint a, CvPoint b)
{
return sqrt(pow((double) (a.x - b.x), 2) + pow((double) (a.y - b.y), 2));
}
// Helper function that computes the longest distance from the edge to the center point.
double getMaxDisFromCorners(const cv::Size& imgSize, const cv::Point& center)
{
// given a rect and a line
// get which corner of rect is farthest from the line
std::vector<cv::Point> corners(4);
corners[0] = cv::Point(0, 0);
corners[1] = cv::Point(imgSize.width, 0);
corners[2] = cv::Point(0, imgSize.height);
corners[3] = cv::Point(imgSize.width, imgSize.height);
double maxDis = 0;
for (int i = 0; i < 4; ++i)
{
double dis = dist(corners[i], center);
if (maxDis < dis)
maxDis = dis;
}
return maxDis;
}
// Helper function that creates a gradient image.
// firstPt, radius and power, are variables that control the artistic effect of the filter.
void generateGradient(cv::Mat& mask)
{
cv::Point firstPt = cv::Point(mask.size().width/2, mask.size().height/2);
double radius = 1.0;
double power = 0.8;
double maxImageRad = radius * getMaxDisFromCorners(mask.size(), firstPt);
mask.setTo(cv::Scalar(1));
for (int i = 0; i < mask.rows; i++)
{
for (int j = 0; j < mask.cols; j++)
{
double temp = dist(firstPt, cv::Point(j, i)) / maxImageRad;
temp = temp * power;
double temp_s = pow(cos(temp), 4);
mask.at<double>(i, j) = temp_s;
}
}
}
// This is where the fun starts!
int main()
{
cv::Mat img = cv::imread("stack-exchange-chefs.jpg");
if (img.empty())
{
std::cout << "!!! Failed imread\n";
return -1;
}
/*
cv::namedWindow("Original", cv::WINDOW_NORMAL);
cv::resizeWindow("Original", img.size().width/2, img.size().height/2);
cv::imshow("Original", img);
*/
What img looks like:
cv::Mat maskImg(img.size(), CV_64F);
generateGradient(maskImg);
/*
cv::Mat gradient;
cv::normalize(maskImg, gradient, 0, 255, CV_MINMAX);
cv::imwrite("gradient.png", gradient);
*/
What maskImg looks like:
cv::Mat labImg(img.size(), CV_8UC3);
cv::cvtColor(img, labImg, CV_BGR2Lab);
for (int row = 0; row < labImg.size().height; row++)
{
for (int col = 0; col < labImg.size().width; col++)
{
cv::Vec3b value = labImg.at<cv::Vec3b>(row, col);
value.val[0] *= maskImg.at<double>(row, col);
labImg.at<cv::Vec3b>(row, col) = value;
}
}
cv::Mat output;
cv::cvtColor(labImg, output, CV_Lab2BGR);
//cv::imwrite("vignette.png", output);
cv::namedWindow("Vignette", cv::WINDOW_NORMAL);
cv::resizeWindow("Vignette", output.size().width/2, output.size().height/2);
cv::imshow("Vignette", output);
cv::waitKey();
return 0;
}
What output looks like:
As stated in the code above, by changing the values of firstPt, radius and power you can achieve stronger/weaker artistic effects.
Good luck!
You can do a simple implementation using Gaussian Kernels available in OpenCV.
Load the image, Get its number of rows and columns
Create two Gaussian Kernels of size rows and columns, say A,B. Its variance depends upon your needs.
C = transpose(A)*B, ie multiply a column vector with a row vector such that result array should be same size as that of the image.
D = C/C.max()
E = img*D
See the implementation below (for a grayscale image):
import cv2
import numpy as np
from matplotlib import pyplot as plt
img = cv2.imread('temp.jpg',0)
row,cols = img.shape
a = cv2.getGaussianKernel(cols,300)
b = cv2.getGaussianKernel(rows,300)
c = b*a.T
d = c/c.max()
e = img*d
cv2.imwrite('vig2.png',e)
Below is my result:
Similarly for Color image:
NOTE : Of course, it is centered. You will need to make additional modifications to move focus to other places.
Similar one close to Abid's Answer. But the code is for the colored image
import cv2
import numpy as np
from matplotlib import pyplot as plt
img = cv2.imread('turtle.jpg',1)
rows,cols = img.shape[:2]
zeros = np.copy(img)
zeros[:,:,:] = 0
a = cv2.getGaussianKernel(cols,900)
b = cv2.getGaussianKernel(rows,900)
c = b*a.T
d = c/c.max()
zeros[:,:,0] = img[:,:,0]*d
zeros[:,:,1] = img[:,:,1]*d
zeros[:,:,2] = img[:,:,2]*d
cv2.imwrite('vig2.png',zeros)
Original Image (Taken from Pexels under CC0 Licence)
After Applying Vignette with a sigma of 900 (i.e `cv2.getGaussianKernel(cols,900))
After Applying Vignette with a sigma of 300 (i.e `cv2.getGaussianKernel(cols,300))
Additionally you can focus the vignette effect to the cordinates of your wish by simply shifting the mean of the gaussian to your focus point as follows.
import cv2
import numpy as np
img = cv2.imread('turtle.jpg',1)
fx,fy = 1465,180 # Add your Focus cordinates here
fx,fy = 145,1000 # Add your Focus cordinates here
sigma = 300 # Standard Deviation of the Gaussian
rows,cols = img.shape[:2]
fxn = fx - cols//2 # Normalised temperory vars
fyn = fy - rows//2
zeros = np.copy(img)
zeros[:,:,:] = 0
a = cv2.getGaussianKernel(2*cols ,sigma)[cols-fx:2*cols-fx]
b = cv2.getGaussianKernel(2*rows ,sigma)[rows-fy:2*rows-fy]
c = b*a.T
d = c/c.max()
zeros[:,:,0] = img[:,:,0]*d
zeros[:,:,1] = img[:,:,1]*d
zeros[:,:,2] = img[:,:,2]*d
zeros = add_alpha(zeros)
cv2.imwrite('vig4.png',zeros)
The size of the turtle image is 1980x1200 (WxH). The following is an example focussing at the cordinate 1465,180 (i.e fx,fy = 1465,180) (Note that I have reduced the variance to exemplify the change in focus)
The following is an example focussing at the cordinate 145,1000 (i.e fx,fy = 145,1000)
Here is my c++ implementation of Vignette filter on Colored Image using opencv. It is faster than the accepted answer.
#include "opencv2/core/core.hpp"
#include "opencv2/highgui/highgui.hpp"
#include "opencv2/imgproc/imgproc.hpp"
#include <iostream>
using namespace cv;
using namespace std;
double fastCos(double x){
x += 1.57079632;
if (x > 3.14159265)
x -= 6.28318531;
if (x < 0)
return 1.27323954 * x + 0.405284735 * x * x;
else
return 1.27323954 * x - 0.405284735 * x * x;
}
double dist(double ax, double ay,double bx, double by){
return sqrt((ax - bx)*(ax - bx) + (ay - by)*(ay - by));
}
int main(int argv, char** argc){
Mat src = cv::imread("filename_of_your_image.jpg");
Mat dst = Mat::zeros(src.size(), src.type());
double radius; //value greater than 0,
//greater the value lesser the visible vignette
//for a medium vignette use a value in range(0.5-1.5)
cin << radius;
double cx = (double)src.cols/2, cy = (double)src.rows/2;
double maxDis = radius * dist(0,0,cx,cy);
double temp;
for (int y = 0; y < src.rows; y++) {
for (int x = 0; x < src.cols; x++) {
temp = fastCos(dist(cx, cy, x, y) / maxDis);
temp *= temp;
dst.at<Vec3b>(y, x)[0] =
saturate_cast<uchar>((src.at<Vec3b>(y, x)[0]) * temp);
dst.at<Vec3b>(y, x)[1] =
saturate_cast<uchar>((src.at<Vec3b>(y, x)[1]) * temp );
dst.at<Vec3b>(y, x)[2] =
saturate_cast<uchar>((src.at<Vec3b>(y, x)[2]) * temp);
}
}
imshow ("Vignetted Image", dst);
waitKey(0);
}
Here is a C++ implementation of Vignetting for Grayscale Image
#include "opencv2/opencv.hpp"
#include "opencv2/core/core.hpp"
#include "opencv2/highgui/highgui.hpp"
#include "opencv2/imgproc/imgproc.hpp"
#include <iostream>
using namespace cv;
using namespace std;
int main(int argv, char** argc)
{
Mat test = imread("test.jpg", IMREAD_GRAYSCALE);
Mat kernel_X = getGaussianKernel(test.cols, 100);
Mat kernel_Y = getGaussianKernel(test.rows, 100);
Mat kernel_X_transpose;
transpose(kernel_X, kernel_X_transpose);
Mat kernel = kernel_Y * kernel_X_transpose;
Mat mask_v, proc_img;
normalize(kernel, mask_v, 0, 1, NORM_MINMAX);
test.convertTo(proc_img, CV_64F);
multiply(mask_v, proc_img, proc_img);
convertScaleAbs(proc_img, proc_img);
imshow ("Vignette", proc_img);
waitKey(0);
return 0;
}