I have a yolov5 onnx file where I trained apples and bananas. I was using python until today, but I decided to switch to c++ to gain some speed. I get correct results when I use yolov5's own onnx files and image in the code I added below. But when I add my own onnx file and my test image it gives me wrong result. You can also find the attached image. What is the problem here?
// Include Libraries.
\#include \<opencv2/opencv.hpp\>
\#include \<fstream\>
// Namespaces.
using namespace cv;
using namespace std;
using namespace cv::dnn;
// Constants.
const float INPUT_WIDTH = 640.0;
const float INPUT_HEIGHT = 640.0;
const float SCORE_THRESHOLD = 0.3;
const float NMS_THRESHOLD = 0.4;
const float CONFIDENCE_THRESHOLD = 0.65;
// Text parameters.
const float FONT_SCALE = 0.7;
const int FONT_FACE = FONT_HERSHEY_SIMPLEX;
const int THICKNESS = 1;
// Colors.
Scalar BLACK = Scalar(0,0,0);
Scalar BLUE = Scalar(255, 178, 50);
Scalar YELLOW = Scalar(0, 255, 255);
Scalar RED = Scalar(0,0,255);
// Draw the predicted bounding box.
void draw_label(Mat& input_image, string label, int left, int top)
{
// Display the label at the top of the bounding box.
int baseLine;
Size label_size = getTextSize(label, FONT_FACE, FONT_SCALE, THICKNESS, &baseLine);
top = max(top, label_size.height);
// Top left corner.
Point tlc = Point(left, top);
// Bottom right corner.
Point brc = Point(left + label_size.width, top + label_size.height + baseLine);
// Draw black rectangle.
rectangle(input_image, tlc, brc, BLACK, FILLED);
// Put the label on the black rectangle.
putText(input_image, label, Point(left, top + label_size.height), FONT_FACE, FONT_SCALE, YELLOW, THICKNESS);
}
vector\<Mat\> pre_process(Mat &input_image, Net &net)
{
// Convert to blob.
Mat blob;
blobFromImage(input_image, blob, 1./255., Size(INPUT_WIDTH, INPUT_HEIGHT), Scalar(), true, false);
net.setInput(blob);
// Forward propagate.
vector<Mat> outputs;
net.forward(outputs, net.getUnconnectedOutLayersNames());
return outputs;
}
Mat post_process(Mat &input_image, vector\<Mat\> &outputs, const vector\<string\> &class_name)
{
// Initialize vectors to hold respective outputs while unwrapping detections.
vector\<int\> class_ids;
vector\<float\> confidences;
vector\<Rect\> boxes;
// Resizing factor.
float x_factor = input_image.cols / INPUT_WIDTH;
float y_factor = input_image.rows / INPUT_HEIGHT;
float *data = (float *)outputs[0].data;
const int dimensions = 85;
const int rows = 25200;
// Iterate through 25200 detections.
for (int i = 0; i < rows; ++i)
{
float confidence = data[4];
// Discard bad detections and continue.
if (confidence >= CONFIDENCE_THRESHOLD)
{
float * classes_scores = data + 5;
// Create a 1x85 Mat and store class scores of 80 classes.
Mat scores(1, class_name.size(), CV_32FC1, classes_scores);
// Perform minMaxLoc and acquire index of best class score.
Point class_id;
double max_class_score;
minMaxLoc(scores, 0, &max_class_score, 0, &class_id);
// Continue if the class score is above the threshold.
if (max_class_score > SCORE_THRESHOLD)
{
// Store class ID and confidence in the pre-defined respective vectors.
confidences.push_back(confidence);
class_ids.push_back(class_id.x);
// Center.
float cx = data[0];
float cy = data[1];
// Box dimension.
float w = data[2];
float h = data[3];
// Bounding box coordinates.
int left = int((cx - 0.5 * w) * x_factor);
int top = int((cy - 0.5 * h) * y_factor);
int width = int(w * x_factor);
int height = int(h * y_factor);
// Store good detections in the boxes vector.
boxes.push_back(Rect(left, top, width, height));
}
}
// Jump to the next column.
data += 85;
}
// Perform Non Maximum Suppression and draw predictions.
vector<int> indices;
NMSBoxes(boxes, confidences, SCORE_THRESHOLD, NMS_THRESHOLD, indices);
for (int i = 0; i < indices.size(); i++)
{
int idx = indices[i];
Rect box = boxes[idx];
int left = box.x;
int top = box.y;
int width = box.width;
int height = box.height;
// Draw bounding box.
rectangle(input_image, Point(left, top), Point(left + width, top + height), BLUE, 3*THICKNESS);
// Get the label for the class name and its confidence.
string label = format("%.2f", confidences[idx]);
label = class_name[class_ids[idx]] + ":" + label;
// Draw class labels.
draw_label(input_image, label, left, top);
//cout<<"The Value is "<<label;
//cout<<endl;
}
return input_image;
}
int main()
{
vector<string> class_list;
ifstream ifs("/Users/admin/Documents/C++/First/obj.names");
string line;
while (getline(ifs, line))
{
class_list.push_back(line);
}
// Load image.
Mat frame;
frame = imread("/Users/admin/Documents/C++/First/test.jpg");
// Load model.
Net net;
net = readNet("/Users/admin/Documents/C++/First/my.onnx");
vector<Mat> detections;
detections = pre_process(frame, net);
Mat img = post_process(frame, detections, class_list);
//Mat img = post_process(frame.clone(), detections, class_list);
// Put efficiency information.
// The function getPerfProfile returns the overall time for inference(t) and the timings for each of the layers(in layersTimes)
vector<double> layersTimes;
double freq = getTickFrequency() / 1000;
double t = net.getPerfProfile(layersTimes) / freq;
string label = format("Inference time : %.2f ms", t);
putText(img, label, Point(20, 40), FONT_FACE, FONT_SCALE, RED);
imshow("Output", img);
waitKey(0);
return 0;
}
The photos I use are 640x480. I played around with the size of the photo, thinking it might be related, but the same problem persisted.
The Yolov5 output format is xyxy as can be seen here:
https://github.com/ultralytics/yolov5/blob/bfa1f23045c7c4136a9b8ced9d6be8249ed72692/detect.py#L161
Not xywh as you are assuming in your code
I would like to make a camera rotate around object, but without shifting pivot to it's center. A good example I made with blender:
Link to gif (In this example camera rotates around cursor, but it works as an example)
So what I want is when I click a certain object, I want to rotate around it, but without centering camera pivot to objects position, basically retaining objects position on screen. I found many examples on rotating around objects center, but I can seem to find anything for my problem.
Currently I have working camera rotation and movement, but I don't know how to approach this. I am working in OpenGL with Cinder framework.
I would be grateful for a simple explanation on how would I be able to do it :)
My current code:
void HandleUICameraRotate() {
//selectedObj <- object...has position etc..
float deltaX = (mMousePos.x - mInitialMousePos.x) / -100.0f;
float deltaY = (mMousePos.y - mInitialMousePos.y) / 100.0f;
// Camera direction vector
glm::vec3 mW = glm::normalize(mInitialCam.getViewDirection());
bool invertMotion = (mInitialCam.getOrientation() * mInitialCam.getWorldUp()).y < 0.0f;
// Right axis vector
vec3 mU = normalize(cross(mInitialCam.getWorldUp(), mW));
if (invertMotion) {
deltaX = -deltaX;
deltaY = -deltaY;
}
glm::vec3 rotatedVec = glm::angleAxis(deltaY, mU) * (-mInitialCam.getViewDirection() * mInitialPivotDistance);
rotatedVec = glm::angleAxis(deltaX, mInitialCam.getWorldUp()) * rotatedVec;
mCamera.setEyePoint(mInitialCam.getEyePoint() + mInitialCam.getViewDirection() * mInitialPivotDistance + rotatedVec);
mCamera.setOrientation(glm::angleAxis(deltaX, mInitialCam.getWorldUp()) * glm::angleAxis(deltaY, mU) * mInitialCam.getOrientation());
}
This is how you can do this rotation (look at the function orbit(...) in the code below).
The basic idea is to rotate the position and the lookAt direction of the camera about the target position. When you run the code demo, use the mouse right button to select the target, and move the mouse to rotate the camera around the target.
Hit me up if you need any clarifications.
let renderer;
let canvas;
let camera;
let scene;
const objects = [];
const highlightGroup = new THREE.Group();
const xaxis = new THREE.Vector3(1, 0, 0);
const yaxis = new THREE.Vector3(0, 1, 0);
const zaxis = new THREE.Vector3(0, 0, 1);
const radius = 10;
const fov = 40;
const tanfov = Math.tan(fov * Math.PI / 360.0);
function initCamera() {
const aspect = 2; // the canvas default
const near = 0.1;
const far = 2000;
camera = new THREE.PerspectiveCamera(fov, aspect, near, far);
camera.position.set(0, 0, 500);
}
function initLights() {
const color = 0xFFFFFF;
const intensity = 1;
const light = new THREE.PointLight(color, intensity);
light.position.set(0,0,200)
scene.add(light);
const light1 = new THREE.PointLight(color, intensity);
light1.position.set(100,200,-200)
scene.add(light1);
}
function initObjects() {
const geometry = new THREE.SphereBufferGeometry( radius, 13, 13 );
const yellowMat = new THREE.MeshPhongMaterial( {color: 0xffff00} );
const redMat = new THREE.MeshPhongMaterial( {color: 0xff0000} );
const greenMat = new THREE.MeshPhongMaterial( {color: 0x00ff00} );
const blueMat = new THREE.MeshPhongMaterial( {color: 0x0000ff} );
const magentaMat = new THREE.MeshPhongMaterial( {color: 0xff00ff} );
const cyanMat = new THREE.MeshPhongMaterial( {color: 0x00ffff} );
const lblueMat = new THREE.MeshPhongMaterial( {color: 0x6060ff} );
let sphere
sphere = new THREE.Mesh( geometry, yellowMat );
sphere.position.set(0, 0, 0);
objects.push(sphere);
scene.add(sphere)
sphere = new THREE.Mesh( geometry, redMat );
sphere.position.set(50, 0, 0);
objects.push(sphere);
scene.add(sphere)
sphere = new THREE.Mesh( geometry, blueMat );
sphere.position.set(0, 0, 50);
objects.push(sphere);
scene.add(sphere)
sphere = new THREE.Mesh( geometry, greenMat );
sphere.position.set(0, 50, 0);
objects.push(sphere);
scene.add(sphere)
sphere = new THREE.Mesh( geometry, magentaMat );
sphere.position.set(0, -50, 0);
objects.push(sphere);
scene.add(sphere)
sphere = new THREE.Mesh( geometry, cyanMat );
sphere.position.set(-50, 0, 0);
objects.push(sphere);
scene.add(sphere);
sphere = new THREE.Mesh( geometry, lblueMat );
sphere.position.set(0, 0, -50);
objects.push(sphere);
scene.add(sphere);
scene.add( highlightGroup );
}
function createRenderLoop() {
function render(time) {
time *= 0.001;
renderer.render(scene, camera);
requestAnimationFrame(render);
}
requestAnimationFrame(render);
}
function initEventHandlers() {
function onWindowResize() {
camera.aspect = window.innerWidth / window.innerHeight;
camera.updateProjectionMatrix();
renderer.setSize( window.innerWidth, window.innerHeight );
}
window.addEventListener( 'resize', onWindowResize, false );
onWindowResize()
canvas.addEventListener('contextmenu', event => event.preventDefault());
}
function initOrbitCam() {
const diffToAngle = 0.01;
const hscale = 1.05;
const highlightMat = new THREE.MeshBasicMaterial({
color: 0xffffff,
transparent: true,
opacity: 0.2,
});
let isMouseButtonDown = -1;
let mouseDownPos;
let rightDownDragging = false;
let savedCamPos;
let savedCamLookAt = new THREE.Vector3();
let orbitTarget;
function absScrDist(pos1, pos2) {
return Math.abs(pos1[0] - pos2[0]) + Math.abs(pos1[1] - pos2[1]);
}
function addHighlight(obj) {
const objCopy = obj.clone();
objCopy.material = highlightMat;
objCopy.scale.set(hscale, hscale, hscale);
highlightGroup.add(objCopy);
}
function emptyHighlightGroup() {
highlightGroup.children.slice(0).forEach(child => {
highlightGroup.remove(child);
})
}
function getTarget(camera, event) {
const [x, y] = [event.offsetX, event.offsetY];
const [cw, ch] = [canvas.width, canvas.height];
const mouse3D = new THREE.Vector3( ( x / cw ) * 2 - 1,
-( y / ch ) * 2 + 1,
0.5 );
const raycaster = new THREE.Raycaster();
raycaster.setFromCamera( mouse3D, camera );
const intersects = raycaster.intersectObjects( objects );
console.log(intersects)
if ( intersects.length > 0 ) {
addHighlight(intersects[0].object);
return intersects[0].object.position.clone();
}
const nv = new THREE.Vector3();
camera.getWorldDirection(nv);
return camera.position.clone().add(nv.clone().multiplyScalar(500));
}
function onCanvasMouseDown(event) {
isMouseButtonDown = event.button;
mouseDownPos = [event.offsetX, event.offsetY];
orbitTarget = getTarget(camera, event);
event.preventDefault();
event.stopPropagation();
}
canvas.addEventListener("mousedown", onCanvasMouseDown, false);
function onCanvasMouseUp(event) {
isMouseButtonDown = -1;
rightDownDragging = false;
emptyHighlightGroup();
event.preventDefault();
event.stopPropagation();
}
canvas.addEventListener("mouseup", onCanvasMouseUp, false);
function onCanvasMouseMove(event) {
if (rightDownDragging === false) {
if (isMouseButtonDown === 2) {
const currPos = [event.clientX, event.clientY];
const dragDist = absScrDist(mouseDownPos, currPos);
if (dragDist >= 5) {
rightDownDragging = true;
savedCamPos = camera.position.clone();
camera.getWorldDirection( savedCamLookAt );
}
}
} else {
const xdiff = event.clientX - mouseDownPos[0];
const ydiff = event.clientY - mouseDownPos[1];
const yAngle = xdiff * diffToAngle;
const xAngle = ydiff * diffToAngle;
orbit(-xAngle, -yAngle, savedCamPos.clone(), savedCamLookAt.clone(), orbitTarget)
}
}
canvas.addEventListener("mousemove", onCanvasMouseMove, false);
function orbit(xRot, yRot, camPos, camLookAt, target) {
const newXAxis = camLookAt.clone();
const lx = camLookAt.x;
const lz = camLookAt.z;
newXAxis.x = -lz;
newXAxis.z = lx;
newXAxis.y = 0;
const newCamPos = camPos
.sub(target)
.applyAxisAngle( newXAxis, xRot )
.applyAxisAngle( yaxis, yRot )
.add(target);
camera.position.set(...newCamPos.toArray());
const relLookAt = camLookAt
.applyAxisAngle( newXAxis, xRot )
.applyAxisAngle( yaxis, yRot )
.add(newCamPos);
camera.lookAt(...relLookAt.toArray());
camera.updateProjectionMatrix();
}
}
function setup() {
canvas = document.querySelector('#c');
renderer = new THREE.WebGLRenderer({canvas});
scene = new THREE.Scene();
initCamera();
initLights();
initObjects();
initEventHandlers();
initOrbitCam();
createRenderLoop();
}
setup();
#c {
width: 100vw;
height: 100vh;
display: block;
}
<canvas id="c"></canvas>
<script src="https://unpkg.com/three#0.85.0/examples/js/libs/stats.min.js"></script>
<script src="https://cdnjs.cloudflare.com/ajax/libs/three.js/110/three.min.js"></script>
<script src="https://unpkg.com/three#0.85.0/examples/js/controls/OrbitControls.js"></script>
<script src="https://cdnjs.cloudflare.com/ajax/libs/gsap/3.2.5/gsap.min.js"></script>
I don't exactly understand what you want to do... But maybe this helps...
Transformations in 3d space happen through matrcices, there are different kind of transformation matrices (i.e. translation, scale, rotation, ...) if you want to rotate an object around an axis which is not its own, you will have to move the object to this axis, rotate it this position and than move it back. What will happen is you multply the coordinates of whatever object you want to rotate around something, by the translation matrix, then mutltiply with a rotation matrix and than again multiple with a translation matrix. Luckily according to the rules of linear algebra, we can simply multiply all of these matrices in order, than multply it with the coordinates...
instead of this:
translationMatrix * somePosition;
rotationMatrix * somePosition;
anotherTranslationMatrix * somePosition;
this:
translationMatrix * rotationMatrix * anotherTranslationMatrix * somePosition;
It is a bit vague to explain this like that, but the idea is there. This might seem a like a lot of work, but GPUs are highly optimised to perform matrix multiplications, so if you succeed in lettling the GPU perform these, it will not be an issue performance wise...
If you already knew this: welp...
If you did not know this, research some linear algebra, specifically: coordinate spaces, matrix multiplication and transformation matrices.
cheers!
Note: This question How to put text into a bounding box in OpenCV? is in some ways similar to this one but it is not the same question. The OP of the questions tried to spread a text to the whole size of his image & the code in the answer that gots the mark is just resizing the text using a mask.
I'm using openCV combined with C++ to do some image detection & manipulation.
So I want to align a text with a unknown length at a specific origin. The font-scale should be calculated because I'd like to specify a width factor for the maximum Text-width like you can see in the image below:
This is the code I got so far:
int fontFace = cv::FONT_HERSHEY_DUPLEX,
fontScale = myTextString.size() / 10;
cv::Size textSize = getTextSize(image, fontFace, fontScale, 0, 0);
putText(image, myTextString, cv::Point( (origin.x + textSize.width)/2, (origin.y + textSize.height)/2 ), fontFace, fontScale, Scalar(255, 0, 0));
Something like this should do it. You can change how the margins are calculated to change the horizontal/vertical alignment of the font.
If the height doesn't matter, you can just leave target.height a large number.
void drawtorect(cv::Mat & mat, cv::Rect target, int face, int thickness, cv::Scalar color, const std::string & str)
{
cv::Size rect = cv::getTextSize(str, face, 1.0, thickness, 0);
double scalex = (double)target.width / (double)rect.width;
double scaley = (double)target.height / (double)rect.height;
double scale = std::min(scalex, scaley);
int marginx = scale == scalex ? 0 : (int)((double)target.width * (scalex - scale) / scalex * 0.5);
int marginy = scale == scaley ? 0 : (int)((double)target.height * (scaley - scale) / scaley * 0.5);
cv::putText(mat, str, cv::Point(target.x + marginx, target.y + target.height - marginy), face, scale, color, thickness, 8, false);
}
* edit *
// Sample code
int L = 80; // width and height per square
int M = 60;
cv::Mat m( 5*M, 7*L,CV_8UC3,cv::Scalar(0,0,0) );
// create checkerboard
for ( int y=0,ymax=m.rows-M;y<=ymax; y+=M)
{
int c = (y/M)%2 == 0 ? 0 : 1;
for ( int x=0,xmax=m.cols-L;x<=xmax;x+=L)
{
if ( (c++)%2!=0 )
continue; // skip odd squares
// convenient way to do this
m( cv::Rect(x,y,L,M) ).setTo( cv::Scalar(64,64,64) );
}
}
// fill checkerboard ROIs by some text
int64 id=1;
for ( int y=0,ymax=m.rows-M;y<=ymax; y+=M)
{
for ( int x=0,xmax=m.cols-L;x<=xmax;x+=L)
{
std::stringstream ss;
ss<<(id<<=1); // some increasing text input
drawtorect( m, cv::Rect(x,y,L,M), cv::FONT_HERSHEY_PLAIN,1,cv::Scalar(255,255,255),ss.str() );
}
}
void EyeDetection (Mat Orig_frame)
{
Mat L_crop,R_crop,Gray_frame,Res_frame; vector<Rect>eyes;
vector<Rect>eyes1;
cvtColor(Orig_frame, Gray_frame, CV_BGR2GRAY);
//Converts RGB to GrayScale
equalizeHist(Gray_frame, Gray_frame);
//Using histogram Equalization tech for improving contrast
eye_cascade.detectMultiScale(Gray_frame, eyes, 1.15, 4, 0 | CASCADE_SCALE_IMAGE,Size(10, 10)); //Detect Eyes
eye_cascade.detectMultiScale(Gray_frame, eyes1, 1.15, 4, 0 | CASCADE_SCALE_IMAGE,Size(10, 10)); //Detect Eyes
Rect L_roi,R_roi; //region of interest
int x1, y1; //(x1,y1) is indx of left detected eye
int w1, h1; //width and height of detected eye
int x2, y2; //(x2,y2) is indx of right detected eye
int w2, h2;
int e_x1, e_y1; //(e_x1,e_y1) is indx of left eye after pruning
int e_w1, e_h1; //width and height of eye after pruning
int e_x2, e_y2; //(e_x2,e_y2) is indx of right eye after pruning
int e_w2, e_h2;
if ( !eyes.empty() ) {
if ( eyes[0].width > 0 && eyes[0].height > 0) { //First Detected eyes
x1 = eyes[0].x; //Dimesnions of Left Detected eye in frame
y1 = eyes[0].y;
w1 = eyes[0].width;
h1 = eyes[0].height;
L_roi.x = e_x1 = x1 + .11*w1; //pruning Left eye to eliminate unwanted pixels (resizing)
L_roi.y = e_y1 = y1 + .15*h1;
L_roi.width = e_w1 = .8*w1;
L_roi.height = e_h1 = .65*h1;
Point L_pt1(e_x1,e_y1);
Point L_pt2(e_x1 + e_w1, e_y1 + e_h1);
L_crop = Gray_frame(L_roi);
Mat left;
rectangle(Orig_frame, L_pt1, L_pt2, Scalar(0, 255, 0), 2, 8, 0);
imshow("Left Eye",L_crop);
}
if ( eyes1[0].width > 0 && eyes1[0].height > 0) { //Second Detected eyes
x2 = eyes1[1].x; //Dimension of Right Detected eye in frame
y2 = eyes1[1].y;
w2 = eyes1[1].width;
h2 = eyes1[1].height;
R_roi.x = e_x2 = x2 + .11*w2; //pruning Right eye to eliminate unwanted pixels (resizing)
R_roi.y = e_y2 = y2 + .15*h2;
R_roi.width = e_w2 = .8*w2;
R_roi.height = e_h2 = .65*h2;
Point R_pt1(e_x2, e_y2);
Point R_pt2(e_x2 + e_w2, e_y2 + e_h2);
R_crop = Gray_frame(R_roi);
rectangle(Orig_frame, R_pt1, R_pt2, Scalar(0, 255, 0), 2, 8, 0);
imshow("Right Eye",R_crop);
}
}
}
I trying to do eye tracking with Opencv for my thesis project brothers, but everytime I face with vector out of range problem.Therefore I tried to solve it inside code, I create second vector such eyes1 but it was not work.And when we think, if I close my one eye with my hand,is this will cause to block frame or is there a any connection with the problem?Please guys,I trust you just last change I'll show my teacher :-)I hope we can find problem.Thanks.
http://i.stack.imgur.com/HCWZ9.jpg "Image of error message"
I'd like to rotate an image, but I can't obtain the rotated image without cropping
My original image:
Now I use this code:
#include <opencv2/core/core.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/imgproc/imgproc.hpp>
// Compile with g++ code.cpp -lopencv_core -lopencv_highgui -lopencv_imgproc
int main()
{
cv::Mat src = cv::imread("im.png", CV_LOAD_IMAGE_UNCHANGED);
cv::Mat dst;
cv::Point2f pc(src.cols/2., src.rows/2.);
cv::Mat r = cv::getRotationMatrix2D(pc, -45, 1.0);
cv::warpAffine(src, dst, r, src.size()); // what size I should use?
cv::imwrite("rotated_im.png", dst);
return 0;
}
And obtain the following image:
But I'd like to obtain this:
My answer is inspired by the following posts / blog entries:
Rotate cv::Mat using cv::warpAffine offsets destination image
http://john.freml.in/opencv-rotation
Main ideas:
Adjusting the rotation matrix by adding a translation to the new image center
Using cv::RotatedRect to rely on existing opencv functionality as much as possible
Code tested with opencv 3.4.1:
#include "opencv2/opencv.hpp"
int main()
{
cv::Mat src = cv::imread("im.png", CV_LOAD_IMAGE_UNCHANGED);
double angle = -45;
// get rotation matrix for rotating the image around its center in pixel coordinates
cv::Point2f center((src.cols-1)/2.0, (src.rows-1)/2.0);
cv::Mat rot = cv::getRotationMatrix2D(center, angle, 1.0);
// determine bounding rectangle, center not relevant
cv::Rect2f bbox = cv::RotatedRect(cv::Point2f(), src.size(), angle).boundingRect2f();
// adjust transformation matrix
rot.at<double>(0,2) += bbox.width/2.0 - src.cols/2.0;
rot.at<double>(1,2) += bbox.height/2.0 - src.rows/2.0;
cv::Mat dst;
cv::warpAffine(src, dst, rot, bbox.size());
cv::imwrite("rotated_im.png", dst);
return 0;
}
Just try the code below, the idea is simple:
You need to create a blank image with the maximum size you're expecting while rotating at any angle. Here you should use Pythagoras as mentioned in the above comments.
Now copy the source image to the newly created image and pass it to warpAffine. Here you should use the centre of newly created image for rotation.
After warpAffine if you need to crop exact image for this translate four corners of source image in enlarged image using rotation matrix as described here
Find minimum x and minimum y for top corner, and maximum x and maximum y for bottom corner from the above result to crop image.
This is the code:
int theta = 0;
Mat src,frame, frameRotated;
src = imread("rotate.png",1);
cout<<endl<<endl<<"Press '+' to rotate anti-clockwise and '-' for clockwise 's' to save" <<endl<<endl;
int diagonal = (int)sqrt(src.cols*src.cols+src.rows*src.rows);
int newWidth = diagonal;
int newHeight =diagonal;
int offsetX = (newWidth - src.cols) / 2;
int offsetY = (newHeight - src.rows) / 2;
Mat targetMat(newWidth, newHeight, src.type());
Point2f src_center(targetMat.cols/2.0F, targetMat.rows/2.0F);
while(1){
src.copyTo(frame);
double radians = theta * M_PI / 180.0;
double sin = abs(std::sin(radians));
double cos = abs(std::cos(radians));
frame.copyTo(targetMat.rowRange(offsetY, offsetY + frame.rows).colRange(offsetX, offsetX + frame.cols));
Mat rot_mat = getRotationMatrix2D(src_center, theta, 1.0);
warpAffine(targetMat, frameRotated, rot_mat, targetMat.size());
//Calculate bounding rect and for exact image
//Reference:- https://stackoverflow.com/questions/19830477/find-the-bounding-rectangle-of-rotated-rectangle/19830964?noredirect=1#19830964
Rect bound_Rect(frame.cols,frame.rows,0,0);
int x1 = offsetX;
int x2 = offsetX+frame.cols;
int x3 = offsetX;
int x4 = offsetX+frame.cols;
int y1 = offsetY;
int y2 = offsetY;
int y3 = offsetY+frame.rows;
int y4 = offsetY+frame.rows;
Mat co_Ordinate = (Mat_<double>(3,4) << x1, x2, x3, x4,
y1, y2, y3, y4,
1, 1, 1, 1 );
Mat RotCo_Ordinate = rot_mat * co_Ordinate;
for(int i=0;i<4;i++){
if(RotCo_Ordinate.at<double>(0,i)<bound_Rect.x)
bound_Rect.x=(int)RotCo_Ordinate.at<double>(0,i); //access smallest
if(RotCo_Ordinate.at<double>(1,i)<bound_Rect.y)
bound_Rect.y=RotCo_Ordinate.at<double>(1,i); //access smallest y
}
for(int i=0;i<4;i++){
if(RotCo_Ordinate.at<double>(0,i)>bound_Rect.width)
bound_Rect.width=(int)RotCo_Ordinate.at<double>(0,i); //access largest x
if(RotCo_Ordinate.at<double>(1,i)>bound_Rect.height)
bound_Rect.height=RotCo_Ordinate.at<double>(1,i); //access largest y
}
bound_Rect.width=bound_Rect.width-bound_Rect.x;
bound_Rect.height=bound_Rect.height-bound_Rect.y;
Mat cropedResult;
Mat ROI = frameRotated(bound_Rect);
ROI.copyTo(cropedResult);
imshow("Result", cropedResult);
imshow("frame", frame);
imshow("rotated frame", frameRotated);
char k=waitKey();
if(k=='+') theta+=10;
if(k=='-') theta-=10;
if(k=='s') imwrite("rotated.jpg",cropedResult);
if(k==27) break;
}
Cropped Image
Thanks Robula!
Actually, you do not need to compute sine and cosine twice.
import cv2
def rotate_image(mat, angle):
# angle in degrees
height, width = mat.shape[:2]
image_center = (width/2, height/2)
rotation_mat = cv2.getRotationMatrix2D(image_center, angle, 1.)
abs_cos = abs(rotation_mat[0,0])
abs_sin = abs(rotation_mat[0,1])
bound_w = int(height * abs_sin + width * abs_cos)
bound_h = int(height * abs_cos + width * abs_sin)
rotation_mat[0, 2] += bound_w/2 - image_center[0]
rotation_mat[1, 2] += bound_h/2 - image_center[1]
rotated_mat = cv2.warpAffine(mat, rotation_mat, (bound_w, bound_h))
return rotated_mat
Thanks #Haris! Here's the Python version:
def rotate_image(image, angle):
'''Rotate image "angle" degrees.
How it works:
- Creates a blank image that fits any rotation of the image. To achieve
this, set the height and width to be the image's diagonal.
- Copy the original image to the center of this blank image
- Rotate using warpAffine, using the newly created image's center
(the enlarged blank image center)
- Translate the four corners of the source image in the enlarged image
using homogenous multiplication of the rotation matrix.
- Crop the image according to these transformed corners
'''
diagonal = int(math.sqrt(pow(image.shape[0], 2) + pow(image.shape[1], 2)))
offset_x = (diagonal - image.shape[0])/2
offset_y = (diagonal - image.shape[1])/2
dst_image = np.zeros((diagonal, diagonal, 3), dtype='uint8')
image_center = (diagonal/2, diagonal/2)
R = cv2.getRotationMatrix2D(image_center, angle, 1.0)
dst_image[offset_x:(offset_x + image.shape[0]), \
offset_y:(offset_y + image.shape[1]), \
:] = image
dst_image = cv2.warpAffine(dst_image, R, (diagonal, diagonal), flags=cv2.INTER_LINEAR)
# Calculate the rotated bounding rect
x0 = offset_x
x1 = offset_x + image.shape[0]
x2 = offset_x
x3 = offset_x + image.shape[0]
y0 = offset_y
y1 = offset_y
y2 = offset_y + image.shape[1]
y3 = offset_y + image.shape[1]
corners = np.zeros((3,4))
corners[0,0] = x0
corners[0,1] = x1
corners[0,2] = x2
corners[0,3] = x3
corners[1,0] = y0
corners[1,1] = y1
corners[1,2] = y2
corners[1,3] = y3
corners[2:] = 1
c = np.dot(R, corners)
x = int(c[0,0])
y = int(c[1,0])
left = x
right = x
up = y
down = y
for i in range(4):
x = int(c[0,i])
y = int(c[1,i])
if (x < left): left = x
if (x > right): right = x
if (y < up): up = y
if (y > down): down = y
h = down - up
w = right - left
cropped = np.zeros((w, h, 3), dtype='uint8')
cropped[:, :, :] = dst_image[left:(left+w), up:(up+h), :]
return cropped
Increase the image canvas (equally from the center without changing the image size) so that it can fit the image after rotation, then apply warpAffine:
Mat img = imread ("/path/to/image", 1);
double offsetX, offsetY;
double angle = -45;
double width = img.size().width;
double height = img.size().height;
Point2d center = Point2d (width / 2, height / 2);
Rect bounds = RotatedRect (center, img.size(), angle).boundingRect();
Mat resized = Mat::zeros (bounds.size(), img.type());
offsetX = (bounds.width - width) / 2;
offsetY = (bounds.height - height) / 2;
Rect roi = Rect (offsetX, offsetY, width, height);
img.copyTo (resized (roi));
center += Point2d (offsetX, offsetY);
Mat M = getRotationMatrix2D (center, angle, 1.0);
warpAffine (resized, resized, M, resized.size());
After searching around for a clean and easy to understand solution and reading through the answers above trying to understand them, I eventually came up with a solution using trigonometry.
I hope this helps somebody :)
import cv2
import math
def rotate_image(mat, angle):
height, width = mat.shape[:2]
image_center = (width / 2, height / 2)
rotation_mat = cv2.getRotationMatrix2D(image_center, angle, 1)
radians = math.radians(angle)
sin = math.sin(radians)
cos = math.cos(radians)
bound_w = int((height * abs(sin)) + (width * abs(cos)))
bound_h = int((height * abs(cos)) + (width * abs(sin)))
rotation_mat[0, 2] += ((bound_w / 2) - image_center[0])
rotation_mat[1, 2] += ((bound_h / 2) - image_center[1])
rotated_mat = cv2.warpAffine(mat, rotation_mat, (bound_w, bound_h))
return rotated_mat
EDIT: Please refer to #Remi Cuingnet's answer below.
A python version of rotating an image and take control of the padded black coloured region you can use the scipy.ndimage.rotate. Here is an example:
from skimage import io
from scipy import ndimage
image = io.imread('https://www.pyimagesearch.com/wp-
content/uploads/2019/12/tensorflow2_install_ubuntu_header.jpg')
io.imshow(image)
plt.show()
rotated = ndimage.rotate(image, angle=234, mode='nearest')
rotated = cv2.resize(rotated, (image.shape[:2]))
# rotated = cv2.cvtColor(rotated, cv2.COLOR_BGR2RGB)
# cv2.imwrite('rotated.jpg', rotated)
io.imshow(rotated)
plt.show()
If you have a rotation and a scaling of the image:
#include "opencv2/opencv.hpp"
#include <functional>
#include <vector>
bool compareCoords(cv::Point2f p1, cv::Point2f p2, char coord)
{
assert(coord == 'x' || coord == 'y');
if (coord == 'x')
return p1.x < p2.x;
return p1.y < p2.y;
}
int main(int argc, char** argv)
{
cv::Mat image = cv::imread("lenna.png");
float angle = 45.0; // degrees
float scale = 0.5;
cv::Mat_<float> rot_mat = cv::getRotationMatrix2D( cv::Point2f( 0.0f, 0.0f ), angle, scale );
// Image corners
cv::Point2f pA = cv::Point2f(0.0f, 0.0f);
cv::Point2f pB = cv::Point2f(image.cols, 0.0f);
cv::Point2f pC = cv::Point2f(image.cols, image.rows);
cv::Point2f pD = cv::Point2f(0.0f, image.rows);
std::vector<cv::Point2f> pts = { pA, pB, pC, pD };
std::vector<cv::Point2f> ptsTransf;
cv::transform(pts, ptsTransf, rot_mat );
using namespace std::placeholders;
float minX = std::min_element(ptsTransf.begin(), ptsTransf.end(), std::bind(compareCoords, _1, _2, 'x'))->x;
float maxX = std::max_element(ptsTransf.begin(), ptsTransf.end(), std::bind(compareCoords, _1, _2, 'x'))->x;
float minY = std::min_element(ptsTransf.begin(), ptsTransf.end(), std::bind(compareCoords, _1, _2, 'y'))->y;
float maxY = std::max_element(ptsTransf.begin(), ptsTransf.end(), std::bind(compareCoords, _1, _2, 'y'))->y;
float newW = maxX - minX;
float newH = maxY - minY;
cv::Mat_<float> trans_mat = (cv::Mat_<float>(2,3) << 0, 0, -minX, 0, 0, -minY);
cv::Mat_<float> M = rot_mat + trans_mat;
cv::Mat warpedImage;
cv::warpAffine( image, warpedImage, M, cv::Size(newW, newH) );
cv::imshow("lenna", image);
cv::imshow("Warped lenna", warpedImage);
cv::waitKey();
cv::destroyAllWindows();
return 0;
}
Thanks to everyone for this post, it has been super useful. However, I have found some black lines left and up (using Rose's python version) when rotating 90º. The problem seemed to be some int() roundings. In addition to that, I have changed the sign of the angle to make it grow clockwise.
def rotate_image(image, angle):
'''Rotate image "angle" degrees.
How it works:
- Creates a blank image that fits any rotation of the image. To achieve
this, set the height and width to be the image's diagonal.
- Copy the original image to the center of this blank image
- Rotate using warpAffine, using the newly created image's center
(the enlarged blank image center)
- Translate the four corners of the source image in the enlarged image
using homogenous multiplication of the rotation matrix.
- Crop the image according to these transformed corners
'''
diagonal = int(math.ceil(math.sqrt(pow(image.shape[0], 2) + pow(image.shape[1], 2))))
offset_x = (diagonal - image.shape[0])/2
offset_y = (diagonal - image.shape[1])/2
dst_image = np.zeros((diagonal, diagonal, 3), dtype='uint8')
image_center = (float(diagonal-1)/2, float(diagonal-1)/2)
R = cv2.getRotationMatrix2D(image_center, -angle, 1.0)
dst_image[offset_x:(offset_x + image.shape[0]), offset_y:(offset_y + image.shape[1]), :] = image
dst_image = cv2.warpAffine(dst_image, R, (diagonal, diagonal), flags=cv2.INTER_LINEAR)
# Calculate the rotated bounding rect
x0 = offset_x
x1 = offset_x + image.shape[0]
x2 = offset_x + image.shape[0]
x3 = offset_x
y0 = offset_y
y1 = offset_y
y2 = offset_y + image.shape[1]
y3 = offset_y + image.shape[1]
corners = np.zeros((3,4))
corners[0,0] = x0
corners[0,1] = x1
corners[0,2] = x2
corners[0,3] = x3
corners[1,0] = y0
corners[1,1] = y1
corners[1,2] = y2
corners[1,3] = y3
corners[2:] = 1
c = np.dot(R, corners)
x = int(round(c[0,0]))
y = int(round(c[1,0]))
left = x
right = x
up = y
down = y
for i in range(4):
x = c[0,i]
y = c[1,i]
if (x < left): left = x
if (x > right): right = x
if (y < up): up = y
if (y > down): down = y
h = int(round(down - up))
w = int(round(right - left))
left = int(round(left))
up = int(round(up))
cropped = np.zeros((w, h, 3), dtype='uint8')
cropped[:, :, :] = dst_image[left:(left+w), up:(up+h), :]
return cropped
Go version (using gocv) of #robula and #remi-cuingnet
func rotateImage(mat *gocv.Mat, angle float64) *gocv.Mat {
height := mat.Rows()
width := mat.Cols()
imgCenter := image.Point{X: width/2, Y: height/2}
rotationMat := gocv.GetRotationMatrix2D(imgCenter, -angle, 1.0)
absCos := math.Abs(rotationMat.GetDoubleAt(0, 0))
absSin := math.Abs(rotationMat.GetDoubleAt(0, 1))
boundW := float64(height) * absSin + float64(width) * absCos
boundH := float64(height) * absCos + float64(width) * absSin
rotationMat.SetDoubleAt(0, 2, rotationMat.GetDoubleAt(0, 2) + (boundW / 2) - float64(imgCenter.X))
rotationMat.SetDoubleAt(1, 2, rotationMat.GetDoubleAt(1, 2) + (boundH / 2) - float64(imgCenter.Y))
gocv.WarpAffine(*mat, mat, rotationMat, image.Point{ X: int(boundW), Y: int(boundH) })
return mat
}
I rotate in the same matrice in-memory, make a new matrice if you don't want to alter it
For anyone using Emgu.CV or OpenCvSharp wrapper in .NET, there's a C# implement of Lars Schillingmann's answer:
Emgu.CV:
using Emgu.CV;
using Emgu.CV.CvEnum;
using Emgu.CV.Structure;
public static class MatExtension
{
/// <summary>
/// <see>https://stackoverflow.com/questions/22041699/rotate-an-image-without-cropping-in-opencv-in-c/75451191#75451191</see>
/// </summary>
public static Mat Rotate(this Mat src, float degrees)
{
degrees = -degrees; // counter-clockwise to clockwise
var center = new PointF((src.Width - 1) / 2f, (src.Height - 1) / 2f);
var rotationMat = new Mat();
CvInvoke.GetRotationMatrix2D(center, degrees, 1, rotationMat);
var boundingRect = new RotatedRect(new(), src.Size, degrees).MinAreaRect();
rotationMat.Set(0, 2, rotationMat.Get<double>(0, 2) + (boundingRect.Width / 2f) - (src.Width / 2f));
rotationMat.Set(1, 2, rotationMat.Get<double>(1, 2) + (boundingRect.Height / 2f) - (src.Height / 2f));
var rotatedSrc = new Mat();
CvInvoke.WarpAffine(src, rotatedSrc, rotationMat, boundingRect.Size);
return rotatedSrc;
}
/// <summary>
/// <see>https://stackoverflow.com/questions/32255440/how-can-i-get-and-set-pixel-values-of-an-emgucv-mat-image/69537504#69537504</see>
/// </summary>
public static unsafe void Set<T>(this Mat mat, int row, int col, T value) where T : struct =>
_ = new Span<T>(mat.DataPointer.ToPointer(), mat.Rows * mat.Cols * mat.ElementSize)
{
[(row * mat.Cols) + col] = value
};
public static unsafe T Get<T>(this Mat mat, int row, int col) where T : struct =>
new ReadOnlySpan<T>(mat.DataPointer.ToPointer(), mat.Rows * mat.Cols * mat.ElementSize)
[(row * mat.Cols) + col];
}
OpenCvSharp:
OpenCvSharp already has Mat.Set<> method that functions same as mat.at<> in the original OpenCV, so we don't have to copy these methods from How can I get and set pixel values of an EmguCV Mat image?
using OpenCvSharp;
public static class MatExtension
{
/// <summary>
/// <see>https://stackoverflow.com/questions/22041699/rotate-an-image-without-cropping-in-opencv-in-c/75451191#75451191</see>
/// </summary>
public static Mat Rotate(this Mat src, float degrees)
{
degrees = -degrees; // counter-clockwise to clockwise
var center = new Point2f((src.Width - 1) / 2f, (src.Height - 1) / 2f);
var rotationMat = Cv2.GetRotationMatrix2D(center, degrees, 1);
var boundingRect = new RotatedRect(new(), new Size2f(src.Width, src.Height), degrees).BoundingRect();
rotationMat.Set(0, 2, rotationMat.Get<double>(0, 2) + (boundingRect.Width / 2f) - (src.Width / 2f));
rotationMat.Set(1, 2, rotationMat.Get<double>(1, 2) + (boundingRect.Height / 2f) - (src.Height / 2f));
var rotatedSrc = new Mat();
Cv2.WarpAffine(src, rotatedSrc, rotationMat, boundingRect.Size);
return rotatedSrc;
}
}
Also, you may want to mutate the src param instead of returning a new clone of it during rotation, for that you can just set the det param of WrapAffine() as the same with src: c++, opencv: Is it safe to use the same Mat for both source and destination images in filtering operation?
CvInvoke.WarpAffine(src, src, rotationMat, boundingRect.Size);
This is being called as in-place mode: https://answers.opencv.org/question/24/do-all-opencv-functions-support-in-place-mode-for-their-arguments/
Can the OpenCV function cvtColor be used to convert a matrix in place?
If it is just to rotate 90 degrees, maybe this code could be useful.
Mat img = imread("images.jpg");
Mat rt(img.rows, img.rows, CV_8U);
Point2f pc(img.cols / 2.0, img.rows / 2.0);
Mat r = getRotationMatrix2D(pc, 90, 1);
warpAffine(img, rt, r, rt.size());
imshow("rotated", rt);
Hope it's useful.
By the way, for 90º rotations only, here is a more efficient + accurate function:
def rotate_image_90(image, angle):
angle = -angle
rotated_image = image
if angle == 0:
pass
elif angle == 90:
rotated_image = np.rot90(rotated_image)
elif angle == 180 or angle == -180:
rotated_image = np.rot90(rotated_image)
rotated_image = np.rot90(rotated_image)
elif angle == -90:
rotated_image = np.rot90(rotated_image)
rotated_image = np.rot90(rotated_image)
rotated_image = np.rot90(rotated_image)
return rotated_image