I would like to make a camera rotate around object, but without shifting pivot to it's center. A good example I made with blender:
Link to gif (In this example camera rotates around cursor, but it works as an example)
So what I want is when I click a certain object, I want to rotate around it, but without centering camera pivot to objects position, basically retaining objects position on screen. I found many examples on rotating around objects center, but I can seem to find anything for my problem.
Currently I have working camera rotation and movement, but I don't know how to approach this. I am working in OpenGL with Cinder framework.
I would be grateful for a simple explanation on how would I be able to do it :)
My current code:
void HandleUICameraRotate() {
//selectedObj <- object...has position etc..
float deltaX = (mMousePos.x - mInitialMousePos.x) / -100.0f;
float deltaY = (mMousePos.y - mInitialMousePos.y) / 100.0f;
// Camera direction vector
glm::vec3 mW = glm::normalize(mInitialCam.getViewDirection());
bool invertMotion = (mInitialCam.getOrientation() * mInitialCam.getWorldUp()).y < 0.0f;
// Right axis vector
vec3 mU = normalize(cross(mInitialCam.getWorldUp(), mW));
if (invertMotion) {
deltaX = -deltaX;
deltaY = -deltaY;
}
glm::vec3 rotatedVec = glm::angleAxis(deltaY, mU) * (-mInitialCam.getViewDirection() * mInitialPivotDistance);
rotatedVec = glm::angleAxis(deltaX, mInitialCam.getWorldUp()) * rotatedVec;
mCamera.setEyePoint(mInitialCam.getEyePoint() + mInitialCam.getViewDirection() * mInitialPivotDistance + rotatedVec);
mCamera.setOrientation(glm::angleAxis(deltaX, mInitialCam.getWorldUp()) * glm::angleAxis(deltaY, mU) * mInitialCam.getOrientation());
}
This is how you can do this rotation (look at the function orbit(...) in the code below).
The basic idea is to rotate the position and the lookAt direction of the camera about the target position. When you run the code demo, use the mouse right button to select the target, and move the mouse to rotate the camera around the target.
Hit me up if you need any clarifications.
let renderer;
let canvas;
let camera;
let scene;
const objects = [];
const highlightGroup = new THREE.Group();
const xaxis = new THREE.Vector3(1, 0, 0);
const yaxis = new THREE.Vector3(0, 1, 0);
const zaxis = new THREE.Vector3(0, 0, 1);
const radius = 10;
const fov = 40;
const tanfov = Math.tan(fov * Math.PI / 360.0);
function initCamera() {
const aspect = 2; // the canvas default
const near = 0.1;
const far = 2000;
camera = new THREE.PerspectiveCamera(fov, aspect, near, far);
camera.position.set(0, 0, 500);
}
function initLights() {
const color = 0xFFFFFF;
const intensity = 1;
const light = new THREE.PointLight(color, intensity);
light.position.set(0,0,200)
scene.add(light);
const light1 = new THREE.PointLight(color, intensity);
light1.position.set(100,200,-200)
scene.add(light1);
}
function initObjects() {
const geometry = new THREE.SphereBufferGeometry( radius, 13, 13 );
const yellowMat = new THREE.MeshPhongMaterial( {color: 0xffff00} );
const redMat = new THREE.MeshPhongMaterial( {color: 0xff0000} );
const greenMat = new THREE.MeshPhongMaterial( {color: 0x00ff00} );
const blueMat = new THREE.MeshPhongMaterial( {color: 0x0000ff} );
const magentaMat = new THREE.MeshPhongMaterial( {color: 0xff00ff} );
const cyanMat = new THREE.MeshPhongMaterial( {color: 0x00ffff} );
const lblueMat = new THREE.MeshPhongMaterial( {color: 0x6060ff} );
let sphere
sphere = new THREE.Mesh( geometry, yellowMat );
sphere.position.set(0, 0, 0);
objects.push(sphere);
scene.add(sphere)
sphere = new THREE.Mesh( geometry, redMat );
sphere.position.set(50, 0, 0);
objects.push(sphere);
scene.add(sphere)
sphere = new THREE.Mesh( geometry, blueMat );
sphere.position.set(0, 0, 50);
objects.push(sphere);
scene.add(sphere)
sphere = new THREE.Mesh( geometry, greenMat );
sphere.position.set(0, 50, 0);
objects.push(sphere);
scene.add(sphere)
sphere = new THREE.Mesh( geometry, magentaMat );
sphere.position.set(0, -50, 0);
objects.push(sphere);
scene.add(sphere)
sphere = new THREE.Mesh( geometry, cyanMat );
sphere.position.set(-50, 0, 0);
objects.push(sphere);
scene.add(sphere);
sphere = new THREE.Mesh( geometry, lblueMat );
sphere.position.set(0, 0, -50);
objects.push(sphere);
scene.add(sphere);
scene.add( highlightGroup );
}
function createRenderLoop() {
function render(time) {
time *= 0.001;
renderer.render(scene, camera);
requestAnimationFrame(render);
}
requestAnimationFrame(render);
}
function initEventHandlers() {
function onWindowResize() {
camera.aspect = window.innerWidth / window.innerHeight;
camera.updateProjectionMatrix();
renderer.setSize( window.innerWidth, window.innerHeight );
}
window.addEventListener( 'resize', onWindowResize, false );
onWindowResize()
canvas.addEventListener('contextmenu', event => event.preventDefault());
}
function initOrbitCam() {
const diffToAngle = 0.01;
const hscale = 1.05;
const highlightMat = new THREE.MeshBasicMaterial({
color: 0xffffff,
transparent: true,
opacity: 0.2,
});
let isMouseButtonDown = -1;
let mouseDownPos;
let rightDownDragging = false;
let savedCamPos;
let savedCamLookAt = new THREE.Vector3();
let orbitTarget;
function absScrDist(pos1, pos2) {
return Math.abs(pos1[0] - pos2[0]) + Math.abs(pos1[1] - pos2[1]);
}
function addHighlight(obj) {
const objCopy = obj.clone();
objCopy.material = highlightMat;
objCopy.scale.set(hscale, hscale, hscale);
highlightGroup.add(objCopy);
}
function emptyHighlightGroup() {
highlightGroup.children.slice(0).forEach(child => {
highlightGroup.remove(child);
})
}
function getTarget(camera, event) {
const [x, y] = [event.offsetX, event.offsetY];
const [cw, ch] = [canvas.width, canvas.height];
const mouse3D = new THREE.Vector3( ( x / cw ) * 2 - 1,
-( y / ch ) * 2 + 1,
0.5 );
const raycaster = new THREE.Raycaster();
raycaster.setFromCamera( mouse3D, camera );
const intersects = raycaster.intersectObjects( objects );
console.log(intersects)
if ( intersects.length > 0 ) {
addHighlight(intersects[0].object);
return intersects[0].object.position.clone();
}
const nv = new THREE.Vector3();
camera.getWorldDirection(nv);
return camera.position.clone().add(nv.clone().multiplyScalar(500));
}
function onCanvasMouseDown(event) {
isMouseButtonDown = event.button;
mouseDownPos = [event.offsetX, event.offsetY];
orbitTarget = getTarget(camera, event);
event.preventDefault();
event.stopPropagation();
}
canvas.addEventListener("mousedown", onCanvasMouseDown, false);
function onCanvasMouseUp(event) {
isMouseButtonDown = -1;
rightDownDragging = false;
emptyHighlightGroup();
event.preventDefault();
event.stopPropagation();
}
canvas.addEventListener("mouseup", onCanvasMouseUp, false);
function onCanvasMouseMove(event) {
if (rightDownDragging === false) {
if (isMouseButtonDown === 2) {
const currPos = [event.clientX, event.clientY];
const dragDist = absScrDist(mouseDownPos, currPos);
if (dragDist >= 5) {
rightDownDragging = true;
savedCamPos = camera.position.clone();
camera.getWorldDirection( savedCamLookAt );
}
}
} else {
const xdiff = event.clientX - mouseDownPos[0];
const ydiff = event.clientY - mouseDownPos[1];
const yAngle = xdiff * diffToAngle;
const xAngle = ydiff * diffToAngle;
orbit(-xAngle, -yAngle, savedCamPos.clone(), savedCamLookAt.clone(), orbitTarget)
}
}
canvas.addEventListener("mousemove", onCanvasMouseMove, false);
function orbit(xRot, yRot, camPos, camLookAt, target) {
const newXAxis = camLookAt.clone();
const lx = camLookAt.x;
const lz = camLookAt.z;
newXAxis.x = -lz;
newXAxis.z = lx;
newXAxis.y = 0;
const newCamPos = camPos
.sub(target)
.applyAxisAngle( newXAxis, xRot )
.applyAxisAngle( yaxis, yRot )
.add(target);
camera.position.set(...newCamPos.toArray());
const relLookAt = camLookAt
.applyAxisAngle( newXAxis, xRot )
.applyAxisAngle( yaxis, yRot )
.add(newCamPos);
camera.lookAt(...relLookAt.toArray());
camera.updateProjectionMatrix();
}
}
function setup() {
canvas = document.querySelector('#c');
renderer = new THREE.WebGLRenderer({canvas});
scene = new THREE.Scene();
initCamera();
initLights();
initObjects();
initEventHandlers();
initOrbitCam();
createRenderLoop();
}
setup();
#c {
width: 100vw;
height: 100vh;
display: block;
}
<canvas id="c"></canvas>
<script src="https://unpkg.com/three#0.85.0/examples/js/libs/stats.min.js"></script>
<script src="https://cdnjs.cloudflare.com/ajax/libs/three.js/110/three.min.js"></script>
<script src="https://unpkg.com/three#0.85.0/examples/js/controls/OrbitControls.js"></script>
<script src="https://cdnjs.cloudflare.com/ajax/libs/gsap/3.2.5/gsap.min.js"></script>
I don't exactly understand what you want to do... But maybe this helps...
Transformations in 3d space happen through matrcices, there are different kind of transformation matrices (i.e. translation, scale, rotation, ...) if you want to rotate an object around an axis which is not its own, you will have to move the object to this axis, rotate it this position and than move it back. What will happen is you multply the coordinates of whatever object you want to rotate around something, by the translation matrix, then mutltiply with a rotation matrix and than again multiple with a translation matrix. Luckily according to the rules of linear algebra, we can simply multiply all of these matrices in order, than multply it with the coordinates...
instead of this:
translationMatrix * somePosition;
rotationMatrix * somePosition;
anotherTranslationMatrix * somePosition;
this:
translationMatrix * rotationMatrix * anotherTranslationMatrix * somePosition;
It is a bit vague to explain this like that, but the idea is there. This might seem a like a lot of work, but GPUs are highly optimised to perform matrix multiplications, so if you succeed in lettling the GPU perform these, it will not be an issue performance wise...
If you already knew this: welp...
If you did not know this, research some linear algebra, specifically: coordinate spaces, matrix multiplication and transformation matrices.
cheers!
I am trying to implement the snake algorithm for active contour using C++ and OpenCV 3. I am working with the version that uses the gradient descent. As base test I am trying to draw a contour of a lip. This is the base image.
This is the evolution of the contour without external forces (alpha = 0.001, beta = 3, step-size=0.3).
When I add the external force, this is the result.
As external force I have used just the edge detection with Sobel derivative.
This is the code I use for points update.
array<Mat, 2> edges = edgeMatrices(croppedImage);
const float ALPHA = 0.001, BETA = 3, GAMMA = 0.3, // Gamma is step size.
a = GAMMA * ALPHA, b = GAMMA * BETA;
const uint16_t CYCLES = 1000;
const float p = b, q = -a - 4 * b, r = 1 + 2 * a + 6 * b;
Mat pMatrix = pentadiagonalMatrix(POINTS_NUM, p, q, r).inv();
for (uint16_t i = 0; i < CYCLES; ++i) {
// Extract the x and y derivatives for current points.
auto externalForces = external(edges, x, y);
x = pMatrix * (x + GAMMA * externalForces[0]);
y = pMatrix * (y + GAMMA * externalForces[1]);
// Draw the points.
if (i % 200 == 0 && i > 0)
drawPoints(croppedImage, x, y, { 0.2f * i, 0.2f * i, 0 });
}
This is the code for computing the derivatives.
array<Mat, 2> edgeMatrices(Mat &img) {
// Convert image.
Mat gray;
cvtColor(img, gray, COLOR_BGR2GRAY);
// Apply scharr filter.
Mat grad_x, grad_y, blurred_x, blurred_y;
int scale = 1;
int delta = 0;
int ddepth = CV_16S;
int kernSize = 3;
Sobel(gray, grad_x, ddepth, 1, 0, kernSize, scale, delta, BORDER_DEFAULT);
Sobel(gray, grad_y, ddepth, 0, 1, kernSize, scale, delta, BORDER_DEFAULT);
GaussianBlur(grad_x, blurred_x, Size(5, 5), 30);
GaussianBlur(grad_y, blurred_y, Size(5, 5), 30);
return { blurred_x, blurred_y };
}
array<Mat, 2> external(array<Mat, 2> &edgeMat, Mat &x, Mat &y) {
array<Mat, 2> ext;
ext[0] = { Size{ 1, POINTS_NUM }, CV_32FC1 };
ext[1] = { Size{ 1, POINTS_NUM }, CV_32FC1 };
for (size_t i = 0; i < POINTS_NUM; ++i) {
ext[0].at<float>(0, i) = - edgeMat[0].at<short>(y.at<float>(0, i), x.at<float>(0, i));
ext[1].at<float>(0, i) = - edgeMat[1].at<short>(y.at<float>(0, i), x.at<float>(0, i));
}
return ext;
}
As you can see, the contour points converge in a very strange way and not towards the edge of the lip (that was the result I would expect).
I am not able to understand if it is an error about implementation or about tuning the parameters or it is just is normal behaviour and I misunderstood something about the algorithm.
I have some doubts on the derivative matrices, I think that they should be regularized in some way, but I am not sure which is the right one. Can someone help me?
The only implementations I have found are of the greedy method.
I'm trying perspective transformation of an image using a homography matrix.
Given translation and rotation, I made a homography matrix and applied it to the perspective transformation as:
Mat srcImg = imread("tests/image3.jp2", IMREAD_COLOR);
Mat dstImg, H;
Get_Homography(H, srcImg.size());
warpPerspective(srcImg, dstImg, H, dstImg.size());
imshow("output", dstImg);
and
#define FL_x 1000.0
#define FL_y 1000.0
void Get_Homography(Mat &H_out, cv::Size size)
{
static float H_uc[9], C[9], C_inv[9], H[9], C_inv_H_uc[9];
static float H33[3][3];
static float R[9];
static float T[3];
static float n[3];
static float d;
static float nTd[9];
static float phi, the, psi;
n[0] = n[1] = 0.0;
n[2] = -1.0;
T[0] = -500;
T[1] = -500;
T[2] = 0.0;
d = 100.0;
phi = 0.0*D2R;
the = 0.0*D2R;
psi = 0.0*D2R;
matMult(T, n, 3, 1, 3, nTd);
matMult(nTd, &d, 9, 1, 1, nTd);
getDCM_I2B(phi, the, psi, R);
matAdd(R, nTd, 3, 3, H_uc);
C[0] = FL_x; C[1] = 0.0; C[2] = size.width / 2.0;
C[3] = 0.0; C[4] = FL_y; C[5] = size.height / 2.0;
C[6] = 0.0; C[7] = 0.0; C[8] = 1.0;
matInv33(C, C_inv);
matMult(C_inv, H_uc, 3, 3, 3, C_inv_H_uc);
matMult(C_inv_H_uc, C, 3, 3, 3, H);
H33[0][0] = H[0];
H33[0][1] = H[1];
H33[0][2] = H[2];
H33[1][0] = H[3];
H33[1][1] = H[4];
H33[1][2] = H[5];
H33[2][0] = H[6];
H33[2][1] = H[7];
H33[2][2] = H[8];
H_out = Mat(3, 3, CV_32F, H33);
return;
}
The rotations by z-axis (any value at "psi") work alright.
But when I put any value (even 1.0 deg) to "the" or "phi", the resultant image is awkward. I cannot recognize what it is.
And when I put [-500, -500, 0] to T (translation), it produces a shifted image as if it is taken in a different position (shifted in right direction), but I think -500 -500 are too big. For d = 1.0, the resultant image just shows a few pixel shift (not recognizably).
I thought my implementation of constructing a homography matrix is right, but the results are awkward.
Could you give me some advice on this?
Thank you.
I'd like to rotate an image, but I can't obtain the rotated image without cropping
My original image:
Now I use this code:
#include <opencv2/core/core.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/imgproc/imgproc.hpp>
// Compile with g++ code.cpp -lopencv_core -lopencv_highgui -lopencv_imgproc
int main()
{
cv::Mat src = cv::imread("im.png", CV_LOAD_IMAGE_UNCHANGED);
cv::Mat dst;
cv::Point2f pc(src.cols/2., src.rows/2.);
cv::Mat r = cv::getRotationMatrix2D(pc, -45, 1.0);
cv::warpAffine(src, dst, r, src.size()); // what size I should use?
cv::imwrite("rotated_im.png", dst);
return 0;
}
And obtain the following image:
But I'd like to obtain this:
My answer is inspired by the following posts / blog entries:
Rotate cv::Mat using cv::warpAffine offsets destination image
http://john.freml.in/opencv-rotation
Main ideas:
Adjusting the rotation matrix by adding a translation to the new image center
Using cv::RotatedRect to rely on existing opencv functionality as much as possible
Code tested with opencv 3.4.1:
#include "opencv2/opencv.hpp"
int main()
{
cv::Mat src = cv::imread("im.png", CV_LOAD_IMAGE_UNCHANGED);
double angle = -45;
// get rotation matrix for rotating the image around its center in pixel coordinates
cv::Point2f center((src.cols-1)/2.0, (src.rows-1)/2.0);
cv::Mat rot = cv::getRotationMatrix2D(center, angle, 1.0);
// determine bounding rectangle, center not relevant
cv::Rect2f bbox = cv::RotatedRect(cv::Point2f(), src.size(), angle).boundingRect2f();
// adjust transformation matrix
rot.at<double>(0,2) += bbox.width/2.0 - src.cols/2.0;
rot.at<double>(1,2) += bbox.height/2.0 - src.rows/2.0;
cv::Mat dst;
cv::warpAffine(src, dst, rot, bbox.size());
cv::imwrite("rotated_im.png", dst);
return 0;
}
Just try the code below, the idea is simple:
You need to create a blank image with the maximum size you're expecting while rotating at any angle. Here you should use Pythagoras as mentioned in the above comments.
Now copy the source image to the newly created image and pass it to warpAffine. Here you should use the centre of newly created image for rotation.
After warpAffine if you need to crop exact image for this translate four corners of source image in enlarged image using rotation matrix as described here
Find minimum x and minimum y for top corner, and maximum x and maximum y for bottom corner from the above result to crop image.
This is the code:
int theta = 0;
Mat src,frame, frameRotated;
src = imread("rotate.png",1);
cout<<endl<<endl<<"Press '+' to rotate anti-clockwise and '-' for clockwise 's' to save" <<endl<<endl;
int diagonal = (int)sqrt(src.cols*src.cols+src.rows*src.rows);
int newWidth = diagonal;
int newHeight =diagonal;
int offsetX = (newWidth - src.cols) / 2;
int offsetY = (newHeight - src.rows) / 2;
Mat targetMat(newWidth, newHeight, src.type());
Point2f src_center(targetMat.cols/2.0F, targetMat.rows/2.0F);
while(1){
src.copyTo(frame);
double radians = theta * M_PI / 180.0;
double sin = abs(std::sin(radians));
double cos = abs(std::cos(radians));
frame.copyTo(targetMat.rowRange(offsetY, offsetY + frame.rows).colRange(offsetX, offsetX + frame.cols));
Mat rot_mat = getRotationMatrix2D(src_center, theta, 1.0);
warpAffine(targetMat, frameRotated, rot_mat, targetMat.size());
//Calculate bounding rect and for exact image
//Reference:- https://stackoverflow.com/questions/19830477/find-the-bounding-rectangle-of-rotated-rectangle/19830964?noredirect=1#19830964
Rect bound_Rect(frame.cols,frame.rows,0,0);
int x1 = offsetX;
int x2 = offsetX+frame.cols;
int x3 = offsetX;
int x4 = offsetX+frame.cols;
int y1 = offsetY;
int y2 = offsetY;
int y3 = offsetY+frame.rows;
int y4 = offsetY+frame.rows;
Mat co_Ordinate = (Mat_<double>(3,4) << x1, x2, x3, x4,
y1, y2, y3, y4,
1, 1, 1, 1 );
Mat RotCo_Ordinate = rot_mat * co_Ordinate;
for(int i=0;i<4;i++){
if(RotCo_Ordinate.at<double>(0,i)<bound_Rect.x)
bound_Rect.x=(int)RotCo_Ordinate.at<double>(0,i); //access smallest
if(RotCo_Ordinate.at<double>(1,i)<bound_Rect.y)
bound_Rect.y=RotCo_Ordinate.at<double>(1,i); //access smallest y
}
for(int i=0;i<4;i++){
if(RotCo_Ordinate.at<double>(0,i)>bound_Rect.width)
bound_Rect.width=(int)RotCo_Ordinate.at<double>(0,i); //access largest x
if(RotCo_Ordinate.at<double>(1,i)>bound_Rect.height)
bound_Rect.height=RotCo_Ordinate.at<double>(1,i); //access largest y
}
bound_Rect.width=bound_Rect.width-bound_Rect.x;
bound_Rect.height=bound_Rect.height-bound_Rect.y;
Mat cropedResult;
Mat ROI = frameRotated(bound_Rect);
ROI.copyTo(cropedResult);
imshow("Result", cropedResult);
imshow("frame", frame);
imshow("rotated frame", frameRotated);
char k=waitKey();
if(k=='+') theta+=10;
if(k=='-') theta-=10;
if(k=='s') imwrite("rotated.jpg",cropedResult);
if(k==27) break;
}
Cropped Image
Thanks Robula!
Actually, you do not need to compute sine and cosine twice.
import cv2
def rotate_image(mat, angle):
# angle in degrees
height, width = mat.shape[:2]
image_center = (width/2, height/2)
rotation_mat = cv2.getRotationMatrix2D(image_center, angle, 1.)
abs_cos = abs(rotation_mat[0,0])
abs_sin = abs(rotation_mat[0,1])
bound_w = int(height * abs_sin + width * abs_cos)
bound_h = int(height * abs_cos + width * abs_sin)
rotation_mat[0, 2] += bound_w/2 - image_center[0]
rotation_mat[1, 2] += bound_h/2 - image_center[1]
rotated_mat = cv2.warpAffine(mat, rotation_mat, (bound_w, bound_h))
return rotated_mat
Thanks #Haris! Here's the Python version:
def rotate_image(image, angle):
'''Rotate image "angle" degrees.
How it works:
- Creates a blank image that fits any rotation of the image. To achieve
this, set the height and width to be the image's diagonal.
- Copy the original image to the center of this blank image
- Rotate using warpAffine, using the newly created image's center
(the enlarged blank image center)
- Translate the four corners of the source image in the enlarged image
using homogenous multiplication of the rotation matrix.
- Crop the image according to these transformed corners
'''
diagonal = int(math.sqrt(pow(image.shape[0], 2) + pow(image.shape[1], 2)))
offset_x = (diagonal - image.shape[0])/2
offset_y = (diagonal - image.shape[1])/2
dst_image = np.zeros((diagonal, diagonal, 3), dtype='uint8')
image_center = (diagonal/2, diagonal/2)
R = cv2.getRotationMatrix2D(image_center, angle, 1.0)
dst_image[offset_x:(offset_x + image.shape[0]), \
offset_y:(offset_y + image.shape[1]), \
:] = image
dst_image = cv2.warpAffine(dst_image, R, (diagonal, diagonal), flags=cv2.INTER_LINEAR)
# Calculate the rotated bounding rect
x0 = offset_x
x1 = offset_x + image.shape[0]
x2 = offset_x
x3 = offset_x + image.shape[0]
y0 = offset_y
y1 = offset_y
y2 = offset_y + image.shape[1]
y3 = offset_y + image.shape[1]
corners = np.zeros((3,4))
corners[0,0] = x0
corners[0,1] = x1
corners[0,2] = x2
corners[0,3] = x3
corners[1,0] = y0
corners[1,1] = y1
corners[1,2] = y2
corners[1,3] = y3
corners[2:] = 1
c = np.dot(R, corners)
x = int(c[0,0])
y = int(c[1,0])
left = x
right = x
up = y
down = y
for i in range(4):
x = int(c[0,i])
y = int(c[1,i])
if (x < left): left = x
if (x > right): right = x
if (y < up): up = y
if (y > down): down = y
h = down - up
w = right - left
cropped = np.zeros((w, h, 3), dtype='uint8')
cropped[:, :, :] = dst_image[left:(left+w), up:(up+h), :]
return cropped
Increase the image canvas (equally from the center without changing the image size) so that it can fit the image after rotation, then apply warpAffine:
Mat img = imread ("/path/to/image", 1);
double offsetX, offsetY;
double angle = -45;
double width = img.size().width;
double height = img.size().height;
Point2d center = Point2d (width / 2, height / 2);
Rect bounds = RotatedRect (center, img.size(), angle).boundingRect();
Mat resized = Mat::zeros (bounds.size(), img.type());
offsetX = (bounds.width - width) / 2;
offsetY = (bounds.height - height) / 2;
Rect roi = Rect (offsetX, offsetY, width, height);
img.copyTo (resized (roi));
center += Point2d (offsetX, offsetY);
Mat M = getRotationMatrix2D (center, angle, 1.0);
warpAffine (resized, resized, M, resized.size());
After searching around for a clean and easy to understand solution and reading through the answers above trying to understand them, I eventually came up with a solution using trigonometry.
I hope this helps somebody :)
import cv2
import math
def rotate_image(mat, angle):
height, width = mat.shape[:2]
image_center = (width / 2, height / 2)
rotation_mat = cv2.getRotationMatrix2D(image_center, angle, 1)
radians = math.radians(angle)
sin = math.sin(radians)
cos = math.cos(radians)
bound_w = int((height * abs(sin)) + (width * abs(cos)))
bound_h = int((height * abs(cos)) + (width * abs(sin)))
rotation_mat[0, 2] += ((bound_w / 2) - image_center[0])
rotation_mat[1, 2] += ((bound_h / 2) - image_center[1])
rotated_mat = cv2.warpAffine(mat, rotation_mat, (bound_w, bound_h))
return rotated_mat
EDIT: Please refer to #Remi Cuingnet's answer below.
A python version of rotating an image and take control of the padded black coloured region you can use the scipy.ndimage.rotate. Here is an example:
from skimage import io
from scipy import ndimage
image = io.imread('https://www.pyimagesearch.com/wp-
content/uploads/2019/12/tensorflow2_install_ubuntu_header.jpg')
io.imshow(image)
plt.show()
rotated = ndimage.rotate(image, angle=234, mode='nearest')
rotated = cv2.resize(rotated, (image.shape[:2]))
# rotated = cv2.cvtColor(rotated, cv2.COLOR_BGR2RGB)
# cv2.imwrite('rotated.jpg', rotated)
io.imshow(rotated)
plt.show()
If you have a rotation and a scaling of the image:
#include "opencv2/opencv.hpp"
#include <functional>
#include <vector>
bool compareCoords(cv::Point2f p1, cv::Point2f p2, char coord)
{
assert(coord == 'x' || coord == 'y');
if (coord == 'x')
return p1.x < p2.x;
return p1.y < p2.y;
}
int main(int argc, char** argv)
{
cv::Mat image = cv::imread("lenna.png");
float angle = 45.0; // degrees
float scale = 0.5;
cv::Mat_<float> rot_mat = cv::getRotationMatrix2D( cv::Point2f( 0.0f, 0.0f ), angle, scale );
// Image corners
cv::Point2f pA = cv::Point2f(0.0f, 0.0f);
cv::Point2f pB = cv::Point2f(image.cols, 0.0f);
cv::Point2f pC = cv::Point2f(image.cols, image.rows);
cv::Point2f pD = cv::Point2f(0.0f, image.rows);
std::vector<cv::Point2f> pts = { pA, pB, pC, pD };
std::vector<cv::Point2f> ptsTransf;
cv::transform(pts, ptsTransf, rot_mat );
using namespace std::placeholders;
float minX = std::min_element(ptsTransf.begin(), ptsTransf.end(), std::bind(compareCoords, _1, _2, 'x'))->x;
float maxX = std::max_element(ptsTransf.begin(), ptsTransf.end(), std::bind(compareCoords, _1, _2, 'x'))->x;
float minY = std::min_element(ptsTransf.begin(), ptsTransf.end(), std::bind(compareCoords, _1, _2, 'y'))->y;
float maxY = std::max_element(ptsTransf.begin(), ptsTransf.end(), std::bind(compareCoords, _1, _2, 'y'))->y;
float newW = maxX - minX;
float newH = maxY - minY;
cv::Mat_<float> trans_mat = (cv::Mat_<float>(2,3) << 0, 0, -minX, 0, 0, -minY);
cv::Mat_<float> M = rot_mat + trans_mat;
cv::Mat warpedImage;
cv::warpAffine( image, warpedImage, M, cv::Size(newW, newH) );
cv::imshow("lenna", image);
cv::imshow("Warped lenna", warpedImage);
cv::waitKey();
cv::destroyAllWindows();
return 0;
}
Thanks to everyone for this post, it has been super useful. However, I have found some black lines left and up (using Rose's python version) when rotating 90º. The problem seemed to be some int() roundings. In addition to that, I have changed the sign of the angle to make it grow clockwise.
def rotate_image(image, angle):
'''Rotate image "angle" degrees.
How it works:
- Creates a blank image that fits any rotation of the image. To achieve
this, set the height and width to be the image's diagonal.
- Copy the original image to the center of this blank image
- Rotate using warpAffine, using the newly created image's center
(the enlarged blank image center)
- Translate the four corners of the source image in the enlarged image
using homogenous multiplication of the rotation matrix.
- Crop the image according to these transformed corners
'''
diagonal = int(math.ceil(math.sqrt(pow(image.shape[0], 2) + pow(image.shape[1], 2))))
offset_x = (diagonal - image.shape[0])/2
offset_y = (diagonal - image.shape[1])/2
dst_image = np.zeros((diagonal, diagonal, 3), dtype='uint8')
image_center = (float(diagonal-1)/2, float(diagonal-1)/2)
R = cv2.getRotationMatrix2D(image_center, -angle, 1.0)
dst_image[offset_x:(offset_x + image.shape[0]), offset_y:(offset_y + image.shape[1]), :] = image
dst_image = cv2.warpAffine(dst_image, R, (diagonal, diagonal), flags=cv2.INTER_LINEAR)
# Calculate the rotated bounding rect
x0 = offset_x
x1 = offset_x + image.shape[0]
x2 = offset_x + image.shape[0]
x3 = offset_x
y0 = offset_y
y1 = offset_y
y2 = offset_y + image.shape[1]
y3 = offset_y + image.shape[1]
corners = np.zeros((3,4))
corners[0,0] = x0
corners[0,1] = x1
corners[0,2] = x2
corners[0,3] = x3
corners[1,0] = y0
corners[1,1] = y1
corners[1,2] = y2
corners[1,3] = y3
corners[2:] = 1
c = np.dot(R, corners)
x = int(round(c[0,0]))
y = int(round(c[1,0]))
left = x
right = x
up = y
down = y
for i in range(4):
x = c[0,i]
y = c[1,i]
if (x < left): left = x
if (x > right): right = x
if (y < up): up = y
if (y > down): down = y
h = int(round(down - up))
w = int(round(right - left))
left = int(round(left))
up = int(round(up))
cropped = np.zeros((w, h, 3), dtype='uint8')
cropped[:, :, :] = dst_image[left:(left+w), up:(up+h), :]
return cropped
Go version (using gocv) of #robula and #remi-cuingnet
func rotateImage(mat *gocv.Mat, angle float64) *gocv.Mat {
height := mat.Rows()
width := mat.Cols()
imgCenter := image.Point{X: width/2, Y: height/2}
rotationMat := gocv.GetRotationMatrix2D(imgCenter, -angle, 1.0)
absCos := math.Abs(rotationMat.GetDoubleAt(0, 0))
absSin := math.Abs(rotationMat.GetDoubleAt(0, 1))
boundW := float64(height) * absSin + float64(width) * absCos
boundH := float64(height) * absCos + float64(width) * absSin
rotationMat.SetDoubleAt(0, 2, rotationMat.GetDoubleAt(0, 2) + (boundW / 2) - float64(imgCenter.X))
rotationMat.SetDoubleAt(1, 2, rotationMat.GetDoubleAt(1, 2) + (boundH / 2) - float64(imgCenter.Y))
gocv.WarpAffine(*mat, mat, rotationMat, image.Point{ X: int(boundW), Y: int(boundH) })
return mat
}
I rotate in the same matrice in-memory, make a new matrice if you don't want to alter it
For anyone using Emgu.CV or OpenCvSharp wrapper in .NET, there's a C# implement of Lars Schillingmann's answer:
Emgu.CV:
using Emgu.CV;
using Emgu.CV.CvEnum;
using Emgu.CV.Structure;
public static class MatExtension
{
/// <summary>
/// <see>https://stackoverflow.com/questions/22041699/rotate-an-image-without-cropping-in-opencv-in-c/75451191#75451191</see>
/// </summary>
public static Mat Rotate(this Mat src, float degrees)
{
degrees = -degrees; // counter-clockwise to clockwise
var center = new PointF((src.Width - 1) / 2f, (src.Height - 1) / 2f);
var rotationMat = new Mat();
CvInvoke.GetRotationMatrix2D(center, degrees, 1, rotationMat);
var boundingRect = new RotatedRect(new(), src.Size, degrees).MinAreaRect();
rotationMat.Set(0, 2, rotationMat.Get<double>(0, 2) + (boundingRect.Width / 2f) - (src.Width / 2f));
rotationMat.Set(1, 2, rotationMat.Get<double>(1, 2) + (boundingRect.Height / 2f) - (src.Height / 2f));
var rotatedSrc = new Mat();
CvInvoke.WarpAffine(src, rotatedSrc, rotationMat, boundingRect.Size);
return rotatedSrc;
}
/// <summary>
/// <see>https://stackoverflow.com/questions/32255440/how-can-i-get-and-set-pixel-values-of-an-emgucv-mat-image/69537504#69537504</see>
/// </summary>
public static unsafe void Set<T>(this Mat mat, int row, int col, T value) where T : struct =>
_ = new Span<T>(mat.DataPointer.ToPointer(), mat.Rows * mat.Cols * mat.ElementSize)
{
[(row * mat.Cols) + col] = value
};
public static unsafe T Get<T>(this Mat mat, int row, int col) where T : struct =>
new ReadOnlySpan<T>(mat.DataPointer.ToPointer(), mat.Rows * mat.Cols * mat.ElementSize)
[(row * mat.Cols) + col];
}
OpenCvSharp:
OpenCvSharp already has Mat.Set<> method that functions same as mat.at<> in the original OpenCV, so we don't have to copy these methods from How can I get and set pixel values of an EmguCV Mat image?
using OpenCvSharp;
public static class MatExtension
{
/// <summary>
/// <see>https://stackoverflow.com/questions/22041699/rotate-an-image-without-cropping-in-opencv-in-c/75451191#75451191</see>
/// </summary>
public static Mat Rotate(this Mat src, float degrees)
{
degrees = -degrees; // counter-clockwise to clockwise
var center = new Point2f((src.Width - 1) / 2f, (src.Height - 1) / 2f);
var rotationMat = Cv2.GetRotationMatrix2D(center, degrees, 1);
var boundingRect = new RotatedRect(new(), new Size2f(src.Width, src.Height), degrees).BoundingRect();
rotationMat.Set(0, 2, rotationMat.Get<double>(0, 2) + (boundingRect.Width / 2f) - (src.Width / 2f));
rotationMat.Set(1, 2, rotationMat.Get<double>(1, 2) + (boundingRect.Height / 2f) - (src.Height / 2f));
var rotatedSrc = new Mat();
Cv2.WarpAffine(src, rotatedSrc, rotationMat, boundingRect.Size);
return rotatedSrc;
}
}
Also, you may want to mutate the src param instead of returning a new clone of it during rotation, for that you can just set the det param of WrapAffine() as the same with src: c++, opencv: Is it safe to use the same Mat for both source and destination images in filtering operation?
CvInvoke.WarpAffine(src, src, rotationMat, boundingRect.Size);
This is being called as in-place mode: https://answers.opencv.org/question/24/do-all-opencv-functions-support-in-place-mode-for-their-arguments/
Can the OpenCV function cvtColor be used to convert a matrix in place?
If it is just to rotate 90 degrees, maybe this code could be useful.
Mat img = imread("images.jpg");
Mat rt(img.rows, img.rows, CV_8U);
Point2f pc(img.cols / 2.0, img.rows / 2.0);
Mat r = getRotationMatrix2D(pc, 90, 1);
warpAffine(img, rt, r, rt.size());
imshow("rotated", rt);
Hope it's useful.
By the way, for 90º rotations only, here is a more efficient + accurate function:
def rotate_image_90(image, angle):
angle = -angle
rotated_image = image
if angle == 0:
pass
elif angle == 90:
rotated_image = np.rot90(rotated_image)
elif angle == 180 or angle == -180:
rotated_image = np.rot90(rotated_image)
rotated_image = np.rot90(rotated_image)
elif angle == -90:
rotated_image = np.rot90(rotated_image)
rotated_image = np.rot90(rotated_image)
rotated_image = np.rot90(rotated_image)
return rotated_image