As in the subject how to include JpegBitmapDecoder^ in C++/CLI project. I have tried inlude namespace but I get:
Error 1 error C3083: 'Windows': the symbol to the left of a '::' must be a type C:\Users\Duke\Documents\Visual Studio 2010\Projects\Jpg\Jpg\Jpg.cpp 8 1 Jpg
Error 2 error C3083: 'Media': the symbol to the left of a '::' must be a type C:\Users\Duke\Documents\Visual Studio 2010\Projects\Jpg\Jpg\Jpg.cpp 8 1 Jpg
Error 3 error C2039: 'Imaging' : is not a member of 'System' C:\Users\Duke\Documents\Visual Studio 2010\Projects\Jpg\Jpg\Jpg.cpp 8 1 Jpg
Error 4 error C2871: 'Imaging' : a namespace with this name does not exist C:\Users\Duke\Documents\Visual Studio 2010\Projects\Jpg\Jpg\Jpg.cpp 8 1 Jpg
Jpg
// Jpg.cpp : Defines the entry point for the console application.
//
#include "stdafx.h"
#using <mscorlib.dll> //requires CLI
using namespace System;
using namespace System::IO;
using namespace System::Windows::Media::Imaging;
int _tmain(int argc, _TCHAR* argv[])
{
// Open a Stream and decode a JPEG image
Stream^ imageStreamSource = gcnew FileStream("C:\Users\Duke\Desktop\heart.jpg", FileMode::Open, FileAccess::Read, FileShare::Read);
JpegBitmapDecoder^ decoder = gcnew JpegBitmapDecoder(imageStreamSource, BitmapCreateOptions::PreservePixelFormat, BitmapCacheOption::Default);//i want that decoder
BitmapSource^ bitmapSource = decoder->Frames[0];//< --mamy bitmape
// Draw the Image
Image^ myImage = gcnew Image();
myImage->Source = bitmapSource;
myImage->Stretch = Stretch::None;
myImage->Margin = System::Windows::Thickness(20);
//
int width = 128;
int height = width;
int stride = width / 8;
array<System::Byte>^ pixels = gcnew array<System::Byte>(height * stride);
// Define the image palette
BitmapPalette^ myPalette = BitmapPalettes::Halftone256;
// Creates a new empty image with the pre-defined palette.
BitmapSource^ image = BitmapSource::Create(
width, height,
96, 96,
PixelFormats::Indexed1,
myPalette,
pixels,
stride);
System::IO::FileStream^ stream = gcnew System::IO::FileStream("new.jpg", FileMode::Create);
JpegBitmapEncoder^ encoder = gcnew JpegBitmapEncoder();
TextBlock^ myTextBlock = gcnew System::Windows::Controls::TextBlock();
myTextBlock->Text = "Codec Author is: " + encoder->CodecInfo->Author->ToString();
encoder->FlipHorizontal = true;
encoder->FlipVertical = false;
encoder->QualityLevel = 30;
encoder->Rotation = Rotation::Rotate90;
encoder->Frames->Add(BitmapFrame::Create(image));
encoder->Save(stream);
return 0;
}
Use this instead:
using namespace System::Windows::Media::Imaging;
Clearly, the line above that should have hinted that in C++/CLI, you use :: rather than ..
Related
I am trying to use the sinusoidal pattern tool in C++ with Visual Studio. I have placed the code that I am testing this with below. In visual studio everything looks fine bar the red squiggle under params in the following line:
Ptr<structured_light::SinusoidalPattern> sinus = structured_light::SinusoidalPattern::create(params);
When I try to build I get the following error message:
Severity Code Description Project File Line Suppression State Error (active)
no suitable user-defined conversion from
"cv::structured_light::SinusoidalPattern::Params" to
"cv::Ptr<cv::structured_light::SinusoidalPattern::Params>" exists Structured_Light_Test
c:\Users\ianco\Desktop\CPlusPlus_Programming\Structured_Light_Test\Structured_Light_Test\Main.cpp 70
I would be very grateful if anyone could offer some advice on how I could get round this issue or suggest another method.
CODE:
#include <opencv2/highgui.hpp>
#include <vector>
#include <iostream>
#include <fstream>
#include <opencv2/core.hpp>
#include <opencv2/core/utility.hpp>
#include <opencv2/imgproc.hpp>
#include <opencv2/calib3d.hpp>
#include <opencv2/structured_light.hpp>
#include <opencv2/phase_unwrapping.hpp>
using namespace cv;
using namespace std;
int main(int argc, char **argv)
{
structured_light::SinusoidalPattern::Params params;
params.width = 1080;
params.height = 700;
params.nbrOfPeriods = 5;
params.setMarkers = true;
params.horizontal = false;
params.methodId = 2;
params.shiftValue = static_cast<float>(2 * CV_PI / 3);
params.nbrOfPixelsBetweenMarkers = 70;
String outputPatternPath = "C:/Users/ianco/Desktop/CPlusPlus_Programming";
String outputWrappedPhasePath = "C:/Users/ianco/Desktop/CPlusPlus_Programming";
String outputUnwrappedPhasePath = "C:/Users/ianco/Desktop/CPlusPlus_Programming";
Ptr<structured_light::SinusoidalPattern> sinus = structured_light::SinusoidalPattern::create(params);
// Storage for patterns
vector<Mat> patterns;
//Generate sinusoidal patterns
sinus->generate(patterns);
cv::Mat blue, green, red;
std::vector<cv::Mat> images(3);
// OpenCV works natively with BGR ordering
images.at(0) = patterns[0];
images.at(1) = patterns[1];
images.at(2) = patterns[2];
cv::Mat color;
cv::merge(images, color);
namedWindow("pattern", WINDOW_NORMAL);
setWindowProperty("pattern", WND_PROP_FULLSCREEN, WINDOW_FULLSCREEN);
imshow("pattern", color);
waitKey(3000);
}
The documentation tells you that params should also be a Ptr but you passed the object...
try using makePtr
Change this line:
structured_light::SinusoidalPattern::Params params;
with this:
Ptr<cv::structured_light::SinusoidalPattern::Params> params = makePtr< SinusoidalPattern::Params >();
you will have to change . to -> for each use of params like params.width = 1080; would be params->width = 1080;, since it will be a pointer now.
The rest of the code should be ok.
I'm trying to make a simple ANN network with opencv in QT and develop it more later ,
I tried with simple data and i get an error says : OpenCV Error : asserion failed ((unsigned)(i1 *datatype<_tp>::channels)) < unsigned(size.p[1]* channels())) in cv::mat::at
here's the code I wrote
#include <iostream>
#include <opencv2/ml.hpp>
#include <opencv/cv.h>
#include <opencv2/core.hpp>
#include <opencv2/imgcodecs.hpp>
#include <opencv2/highgui/highgui.hpp>
#include "nnet.h"
using namespace std;
using namespace cv;
int main()
{
string filename="data.csv";
Ptr<cv::ml::TrainData> tdata = cv::ml::TrainData::loadFromCSV(filename,0,-1,-1);
Mat trainData = tdata->getTrainSamples();
Mat trainLabels = tdata->getTrainResponses();
int numClasses = 3;
Mat hot(trainLabels.rows, numClasses, CV_32F, 0.0f);
for (int i=0; i<trainLabels.rows; i++) {
int id = (int)trainLabels.at<float>(i);
hot.at<float>(i, id) = 1.0f;
}
int input_neurons = 5;
int hidden_neurons = 5;
int output_neurons = 3;
Mat layerSizes = Mat(3, 1, CV_32SC1);
layerSizes.row(0) = Scalar(input_neurons);
layerSizes.row(1) = Scalar(hidden_neurons);
layerSizes.row(2) = Scalar(output_neurons);
Ptr<cv::ml::ANN_MLP> myNetwork = cv::ml::ANN_MLP::create();
myNetwork->setLayerSizes(layerSizes);
myNetwork->setTrainMethod(ml::ANN_MLP::SIGMOID_SYM);
myNetwork->setTermCriteria(TermCriteria(CV_TERMCRIT_ITER | CV_TERMCRIT_EPS, 1000, 0.00001f));
myNetwork->setTrainMethod(ml::ANN_MLP::BACKPROP,0.1f,0.1f);
myNetwork->setActivationFunction(ml::ANN_MLP::SIGMOID_SYM, 1, 1);
myNetwork->train(trainData, 0, hot);
string testfilename="test-data.csv";
Ptr<cv::ml::TrainData> testdata = cv::ml::TrainData::loadFromCSV(testfilename, 0,0,-1);
Mat testData = testdata->getTrainSamples();
Mat testLabels = testdata->getTrainResponses();
Mat testResults;
myNetwork->predict(testData, testResults);
float accuracy = float(countNonZero(testResults == testLabels)) / testLabels.rows;
printf("%f",accuracy);
return 0;
}
and for the data set i have
data.csv contains
1,2,3,7,2
7,1,7,7,5
9,7,5,3,2
12,21,32,71,8
and data-test.csv contains :
1,2,1,1,2,
9,1,2,12,5,
11,28,14,50,8,
3,1,2,12,5,
11,28,24,20,8,
thank you in advance for your help.
i found the solution to my problem , in the csv file i've i have 3 classes and the response values should be between [0..2] and i gave random numbers 5 and 8 so changing them solved this problem
Used stride by me generates exception. I don't know what stride is correct. The input image is 32 bit JPG. Please tell me what values(I tried many things but they where generating exceptions or corrupted JPG) i should put into:
array<System::Byte>^ pixels = gcnew array<System::Byte>(WHAT VALUE);
bitmapSource->CopyPixels(pixels, WHAT VALUE, 0);
// Jpg.cpp : Defines the entry point for the console application.
//
#include "stdafx.h"
#include <iostream>
#using <mscorlib.dll> //requires CLI
using namespace System;
using namespace System::IO;
using namespace System::Windows::Media::Imaging;
using namespace System::Windows::Media;
using namespace System::Windows::Controls;
using namespace std;
[System::STAThread]
int _tmain(int argc, _TCHAR* argv[])
{
// Open a Stream and decode a JPEG image
Stream^ imageStreamSource = gcnew FileStream("C:/heart2.jpg",
FileMode::Open, FileAccess::Read, FileShare::Read);
JpegBitmapDecoder^ decoder = gcnew JpegBitmapDecoder(
imageStreamSource, BitmapCreateOptions::PreservePixelFormat,
BitmapCacheOption::Default);
BitmapSource^ bitmapSource = decoder->Frames[0];//< --mamy bitmape
// Draw the Image
Image^ myImage = gcnew Image();
myImage->Source = bitmapSource;
myImage->Stretch = Stretch::None;
myImage->Margin = System::Windows::Thickness(20);
int width = bitmapSource->PixelWidth;
int height = bitmapSource->PixelHeight;
int stride = (width * bitmapSource->Format.BitsPerPixel + 31) / 32;
array<System::Byte>^ pixels
= gcnew array<System::Byte>(height * width * stride);
bitmapSource->CopyPixels(pixels, stride, 0);
int x;
cin >> x;
return 0;
}
Google
http://msdn.microsoft.com/en-us/library/system.drawing.imaging.bitmapdata.stride.aspx
The stride is the width of a single row of pixels (a scan line), rounded up to a four-byte boundary.
So the correct value depends on how many bits per pixel you have in your image.
I get weird information about invalidOperationException in PresentationCore.dll while constructing an image by gcnew Image().
I attach project and the JPG file (which can be put in C:\) It actually cannot be checked other way because configuration of project (references) took a long time, and just copied code will not work.
http://www.speedyshare.com/Vrr84/Jpg.zip
How can I solve that problem?
// Jpg.cpp : Defines the entry point for the console application.
//
#include "stdafx.h"
#using <mscorlib.dll> //requires CLI
using namespace System;
using namespace System::IO;
using namespace System::Windows::Media::Imaging;
using namespace System::Windows::Media;
using namespace System::Windows::Controls;
int _tmain(int argc, _TCHAR* argv[])
{
// Open a Stream and decode a JPEG image
Stream^ imageStreamSource = gcnew FileStream("C:/heart.jpg", FileMode::Open, FileAccess::Read, FileShare::Read);
JpegBitmapDecoder^ decoder = gcnew JpegBitmapDecoder(imageStreamSource, BitmapCreateOptions::PreservePixelFormat, BitmapCacheOption::Default);
BitmapSource^ bitmapSource = decoder->Frames[0];//< --mamy bitmape
// Draw the Image
Image^ myImage = gcnew Image();//<----------- ERROR
myImage->Source = bitmapSource;
myImage->Stretch = Stretch::None;
myImage->Margin = System::Windows::Thickness(20);
//
int width = 128;
int height = width;
int stride = width / 8;
array<System::Byte>^ pixels = gcnew array<System::Byte>(height * stride);
// Define the image paletteo
BitmapPalette^ myPalette = BitmapPalettes::Halftone256;
// Creates a new empty image with the pre-defined palette.
BitmapSource^ image = BitmapSource::Create(
width, height,
96, 96,
PixelFormats::Indexed1,
myPalette,
pixels,
stride);
System::IO::FileStream^ stream = gcnew System::IO::FileStream("new.jpg", FileMode::Create);
JpegBitmapEncoder^ encoder = gcnew JpegBitmapEncoder();
TextBlock^ myTextBlock = gcnew System::Windows::Controls::TextBlock();
myTextBlock->Text = "Codec Author is: " + encoder->CodecInfo->Author->ToString();
encoder->FlipHorizontal = true;
encoder->FlipVertical = false;
encoder->QualityLevel = 30;
encoder->Rotation = Rotation::Rotate90;
encoder->Frames->Add(BitmapFrame::Create(image));
encoder->Save(stream);
return 0;
}
The core issue is there:
The calling thread must be STA [...]
Your main thread must be marked as a single-threaded apartment (STA for short) for WPF to function correctly. The fix? Add [System::STAThread] to your _tmain, thus informing the runtime that the main thead has to be STA.
[System::STAThread]
int _tmain(int argc, _TCHAR* argv[])
{
// the rest of your code doesn't change
}
I am going to port some screenshot grabbing code (C++) for linux to osx. The current solution run graphical applications in xvfb and then use xlib to grab screenshots from the display. (That will also support if we are running without xvfb).
So as I understood osx is moving away from X11 so my question is what to use besides xlib to implement it now ? I have found Quartz Display Services. Is that what makes sense to use now ? Will that work with xvfb ?
Yes, you will be able to call functions like CGDisplayCreateImage (documentation linked for you) by linking the Application Services framework to your C++ tool.
I have written an example for capturing the pc display screen and convert to opencv Mat.
#include <iostream>
#include <opencv2/opencv.hpp>
#include <unistd.h>
#include <stdio.h>
#include <ApplicationServices/ApplicationServices.h>
using namespace std;
using namespace cv;
int main (int argc, char * const argv[])
{
size_t width = CGDisplayPixelsWide(CGMainDisplayID());
size_t height = CGDisplayPixelsHigh(CGMainDisplayID());
Mat im(cv::Size(width,height), CV_8UC4);
Mat bgrim(cv::Size(width,height), CV_8UC3);
Mat resizedim(cv::Size(width,height), CV_8UC3);
CGColorSpaceRef colorSpace = CGColorSpaceCreateDeviceRGB();
CGContextRef contextRef = CGBitmapContextCreate(
im.data, im.cols, im.rows,
8, im.step[0],
colorSpace, kCGImageAlphaPremultipliedLast|kCGBitmapByteOrderDefault);
while (true)
{
CGImageRef imageRef = CGDisplayCreateImage(CGMainDisplayID());
CGContextDrawImage(contextRef,
CGRectMake(0, 0, width, height),
imageRef);
cvtColor(im, bgrim, CV_RGBA2BGR);
resize(bgrim, resizedim,cv::Size(),0.5,0.5);
imshow("test", resizedim);
cvWaitKey(10);
CGImageRelease(imageRef);
}
// CGContextRelease(contextRef);
// CGColorSpaceRelease(colorSpace);
return 0;
}
and then, the result is here.
I had expected my current display would be captured, but only the back wallpaper was captured actually.
What the CGMainDisplayID() refers would be a hint to this problem.
Anyway, I hope this may approach your goal a bit.
void captureScreen(){
CGImageRef image_ref = CGDisplayCreateImage(CGMainDisplayID());
CGDataProviderRef provider = CGImageGetDataProvider(image_ref);
CFDataRef dataref = CGDataProviderCopyData(provider);
size_t width, height; width = CGImageGetWidth(image_ref);
height = CGImageGetHeight(image_ref);
size_t bpp = CGImageGetBitsPerPixel(image_ref) / 8;
uint8 *pixels = malloc(width * height * bpp);
memcpy(pixels, CFDataGetBytePtr(dataref), width * height * bpp);
CFRelease(dataref);
CGImageRelease(image_ref);
FILE *stream = fopen("/Users/username/Desktop/screencap.raw", "w+");
fwrite(pixels, bpp, width * height, stream);
fclose(stream);
free(pixels);
}
or in C#:
// https://stackoverflow.com/questions/1537587/capture-screen-image-in-c-on-osx
// https://github.com/Acollie/C-Screenshot-OSX/blob/master/C%2B%2B-screenshot/C%2B%2B-screenshot/main.cpp
// https://github.com/ScreenshotMonitor/ScreenshotCapture/blob/master/src/Pranas.ScreenshotCapture/ScreenshotCapture.cs
// https://screenshotmonitor.com/blog/capturing-screenshots-in-net-and-mono/
namespace rtaStreamingServer
{
// https://github.com/xamarin/xamarin-macios
// https://qiita.com/shimshimkaz/items/18bcf4767143ea5897c7
public static class OSxScreenshot
{
private const string LIBCOREGRAPHICS = "/System/Library/Frameworks/CoreGraphics.framework/CoreGraphics";
[System.Runtime.InteropServices.DllImport(LIBCOREGRAPHICS)]
private static extern System.IntPtr CGDisplayCreateImage(System.UInt32 displayId);
[System.Runtime.InteropServices.DllImport(LIBCOREGRAPHICS)]
private static extern void CFRelease(System.IntPtr handle);
public static void TestCapture()
{
Foundation.NSNumber mainScreen = (Foundation.NSNumber)AppKit.NSScreen.MainScreen.DeviceDescription["NSScreenNumber"];
using (CoreGraphics.CGImage cgImage = CreateImage(mainScreen.UInt32Value))
{
// https://stackoverflow.com/questions/17334786/get-pixel-from-the-screen-screenshot-in-max-osx/17343305#17343305
// Get byte-array from CGImage
// https://gist.github.com/zhangao0086/5fafb1e1c0b5d629eb76
AppKit.NSBitmapImageRep bitmapRep = new AppKit.NSBitmapImageRep(cgImage);
// var imageData = bitmapRep.representationUsingType(NSBitmapImageFileType.NSPNGFileType, properties: [:])
Foundation.NSData imageData = bitmapRep.RepresentationUsingTypeProperties(AppKit.NSBitmapImageFileType.Png);
long len = imageData.Length;
byte[] bytes = new byte[len];
System.Runtime.InteropServices.GCHandle pinnedArray = System.Runtime.InteropServices.GCHandle.Alloc(bytes, System.Runtime.InteropServices.GCHandleType.Pinned);
System.IntPtr pointer = pinnedArray.AddrOfPinnedObject();
// Do your stuff...
imageData.GetBytes(pointer, new System.IntPtr(len));
pinnedArray.Free();
using (AppKit.NSImage nsImage = new AppKit.NSImage(cgImage, new System.Drawing.SizeF(cgImage.Width, cgImage.Height)))
{
// ImageView.Image = nsImage;
// And now ? How to get the image bytes ?
// https://theconfuzedsourcecode.wordpress.com/2016/02/24/convert-android-bitmap-image-and-ios-uiimage-to-byte-array-in-xamarin/
// https://stackoverflow.com/questions/5645157/nsimage-from-byte-array
// https://stackoverflow.com/questions/53060723/nsimage-source-from-byte-array-cocoa-app-xamarin-c-sharp
// https://gist.github.com/zhangao0086/5fafb1e1c0b5d629eb76
// https://www.quora.com/What-is-a-way-to-convert-UIImage-to-a-byte-array-in-Swift?share=1
// https://stackoverflow.com/questions/17112314/converting-uiimage-to-byte-array
} // End Using nsImage
} // End Using cgImage
} // End Sub TestCapture
public static CoreGraphics.CGImage CreateImage(System.UInt32 displayId)
{
System.IntPtr handle = System.IntPtr.Zero;
try
{
handle = CGDisplayCreateImage(displayId);
return new CoreGraphics.CGImage(handle);
}
finally
{
if (handle != System.IntPtr.Zero)
{
CFRelease(handle);
}
}
} // End Sub CreateImage
} // End Class OSxScreenshot
} // End Namespace rtaStreamingServer