opencv Mat convert to AVFrame - c++

I have an image in OpenCV which I can view and save correctly, I want to take this image and pass it to FFMPEG so I can encode it, but when I save the jpg output from ffpmeg I get a empty image, which probably means I am not copying the data correctly over to AVFrame.
What am I doing wrong...
Any help is greatly appreciated.
This is how I set up the final_frame and yuv422 frame
final_frame = avcodec_alloc_frame();
int num_bytes = avpicture_get_size(PIX_FMT_BGR24, 1600, 720);
final_frame1_buffer = (uint8_t *)av_malloc(num_bytes*sizeof(uint8_t));
avpicture_fill((AVPicture*)final_frame, final_frame1_buffer, PIX_FMT_BGR24, 1600, 720);
yuv422_final_frame = avcodec_alloc_frame();
int yuv422_num_bytes = avpicture_get_size( PIX_FMT_YUV422P, 1600, 720 );
final_frame2_buffer = (uint8_t *)av_malloc(yuv422_num_bytes*sizeof(uint8_t));
avpicture_fill((AVPicture*)yuv422_final_frame, final_frame2_buffer, PIX_FMT_YUVJ422P, 1600, 720);
I have attached the code below
cv::imshow("output image", im3); <---------- Image shows correctly
cv::Mat rgb_frame;
cv::cvtColor( im3 , rgb_frame, CV_BGR2RGB ) ;
if (final_sws_ctx == NULL)
{
final_sws_ctx = sws_getContext(1600, 720,
AV_PIX_FMT_BGR24, 1600, 720,
AV_PIX_FMT_YUVJ422P, SWS_FAST_BILINEAR, 0, 0, 0);
}
imwrite( "rgbjpeg.jpg", rgb_frame ); <----- Image Saves correctly here too
avpicture_fill((AVPicture*)final_frame, rgb_frame.data, PIX_FMT_RGB24, 1600, 720);
sws_scale(final_sws_ctx, final_frame->data,
final_frame->linesize,
0, 720,
yuv422_final_frame->data,
yuv422_final_frame->linesize);
AVPacket encode_packet;
int got_output = 0;
av_init_packet(&encode_packet);
encode_packet.data = NULL;
encode_packet.size = 0;
int ret = avcodec_encode_video2(final_codec_context,
&encode_packet,
yuv422_final_frame,
&got_output);
if (got_output ) {
CString temp;
temp.Format( "test%u.jpg", counter);
FILE* outputFile = fopen(temp, "wb");
printf("Write frame (size=%5d)\n", encode_packet.size);
fwrite(encode_packet.data, 1, encode_packet.size, outputFile);
av_free_packet(&encode_packet);
fclose(outputFile);
counter++;
}

Try to explicitly set the stride value for the final_frame:
final_frame = avcodec_alloc_frame();
avcodec_get_frame_defaults(final_frame);
final_frame->format = PIX_FMT_RGB24;
final_frame->width = 1600;
final_frame->height = 720;
final_frame->data[0] = rgb_frame.data;
final_frame->linesize[0] = rgb_frame.step; // <<< stride
sws_scale(final_sws_ctx, final_frame->data, // etc...

Related

sws_scale, YUV to RGB conversion

I need convert YUV to RGB. I also need the RGB values to be in the limited range (16-235).
I try to use sws_scale function for this task.
My code you can see below. But after conversion I got the black pixel is (0, 0, 0) instead of (16, 16, 16).
Maybe there are some options to tell sws_scale function to calculate the limited range.
AVFrame* frameRGB = avFrameConvertPixelFormat(_decodedBuffer[i].pAVFrame, AV_PIX_FMT_RGB24);
AVFrame* Decoder::avFrameConvertPixelFormat(const AVFrame* src, AVPixelFormat dstFormat) {
int width = src->width;
int height = src->height;
AVFrame* dst = allocPicture(dstFormat, width, height);
SwsContext* conversion = sws_getContext(width,
height,
(AVPixelFormat)src->format,
width,
height,
dstFormat,
SWS_FAST_BILINEAR,
NULL,
NULL,
NULL);
sws_scale(conversion, src->data, src->linesize, 0, height, dst->data, dst->linesize);
sws_freeContext(conversion);
dst->format = dstFormat;
dst->width = src->width;
dst->height = src->height;
return dst;
}
Also I tried convert YUV pixel to RGB pixel manualy with formula and I got correct result. From YUV (16, 128, 128) I got (16, 16, 16) in RGB.
cmpR = y + 1.402 * (v - 128);
cmpG = y - 0.3441 * (u - 128) - 0.7141 * (v - 128);
cmpB = y + 1.772 * (u - 128);
You may the source format to "full scale" YUVJ.
As far as I know, sws_scale has no option for selecting Studio RGB as output format.
Changing the input format is the best solution I can think of.
The color conversion formula of "JPEG: YUV -> RGB" is the same as the formula in your post.
Examples for setting the source format:
If src->format is PIX_FMT_YUV420P, set the format to PIX_FMT_YUVJ420P.
If src->format is PIX_FMT_YUV422P, set the format to PIX_FMT_YUVJ422P.
If src->format is PIX_FMT_YUV444P, set the format to PIX_FMT_YUVJ444P.
If PIX_FMT_YUV440P, use PIX_FMT_YUVJ440P.
I know the solution is not covering all the possibilists, and there might be some output pixels exceeding the range of [16, 235], so it's not the most general solution...
yuv to rgb conversion using FFMPEG I see lot of information given already for this above. However for code completeness I am re-sharing the code with missing allocPicture() function, header & library to include, it works for me like a charm. Thanks to #Валентин Никин & #Rotem for most of the info & code.
Headers:
#include <libswscale/swscale.h>
Link FFMPEG Library:
libswscale
static AVFrame* allocPicture(enum AVPixelFormat pix_fmt, int width, int height)
{
// Allocate a frame
AVFrame* frame = av_frame_alloc();
if (frame == NULL)
{
fprintf(stderr, "avcodec_alloc_frame failed");
}
if (av_image_alloc(frame->data, frame->linesize, width, height, pix_fmt, 1) < 0)
{
fprintf(stderr, "av_image_alloc failed");
}
frame->width = width;
frame->height = height;
frame->format = pix_fmt;
return frame;
}
static AVFrame* avFrameConvertPixelFormat(const AVFrame* src, enum AVPixelFormat dstFormat)
{
int width = src->width;
int height = src->height;
AVFrame* dst = allocPicture(dstFormat, width, height);
struct SwsContext* conversion = sws_getContext(width,
height,
(enum AVPixelFormat)src->format,
width,
height,
dstFormat,
SWS_FAST_BILINEAR | SWS_FULL_CHR_H_INT | SWS_ACCURATE_RND,
NULL,
NULL,
NULL);
sws_scale(conversion, src->data, src->linesize, 0, height, dst->data, dst->linesize);
sws_freeContext(conversion);
dst->format = dstFormat;
dst->width = src->width;
dst->height = src->height;
return dst;
}
// convert yuv420p10le to rgb24 (or any other RGB formats)
AVFrame* frame = avFrameConvertPixelFormat(frame, AV_PIX_FMT_RGB24);

C++ GDI+ SelectPalette

I am playing with GDI+. Trying to use
pDC->SelectPalette(CPalette::FromHandle(hLogPal), FALSE);
pDC->RealizePalette();
instead of
memcpy(newBitmapInfo + sizeof(BITMAPINFO), rgbquad, palettesize);
But it seem that with it's working with memcpy(newBitmapInfo + sizeof(BITMAPINFO), rgbquad, palettesize); but with SelectPalette only black screen.
I thought that information about color can be used from bitmapinfo or from pallet.
All code:
void ConvertTo8BitImage(BYTE** pBitmapInfo, BYTE** imageData)
{
Gdiplus::GdiplusStartupInput tmp;
ULONG_PTR token;
Gdiplus::GdiplusStartup(&token, &tmp, NULL);
Gdiplus::Bitmap *source = Gdiplus::Bitmap::FromFile(L"D:/TestImage.bmp");
Gdiplus::Bitmap *destination = source->Clone(0, 0, source->GetWidth(), source->GetHeight(),
PixelFormat8bppIndexed);
int width = source->GetWidth();
int height = source->GetHeight();
HBITMAP hBitmap;
Gdiplus::Color color;
destination->GetHBITMAP(color, &hBitmap);
int palettesize = 256 * sizeof(RGBQUAD);
CLSID clsid_bmp;
CLSIDFromString(L"{557cf400-1a04-11d3-9a73-0000f81ef32e}", &clsid_bmp);
*pBitmapInfo = new BYTE[(sizeof(BITMAPINFO) + palettesize)];
BITMAPINFO* ptr = (BITMAPINFO*)*pBitmapInfo;
ptr->bmiHeader.biSize = sizeof(BITMAPINFOHEADER);
ptr->bmiHeader.biWidth = width;
ptr->bmiHeader.biHeight = height;
ptr->bmiHeader.biPlanes = 1;
ptr->bmiHeader.biBitCount = 8;
ptr->bmiHeader.biCompression = BI_RGB;
ptr->bmiColors[0].rgbRed = 0;
DWORD size = ((width * 8 + 31) / 32) * 4 * height;
*imageData = new BYTE[size];
HDC hdc = GetDC(0);
GetDIBits(hdc, hBitmap, 0, height, *imageData, (BITMAPINFO*)*pBitmapInfo, DIB_PAL_COLORS);
ReleaseDC(0, hdc);
Gdiplus::GdiplusShutdown(token);
}
void CMFCApplicationColorsView::OnDraw(CDC* pDC)
{
CMFCApplicationColorsDoc* pDoc = GetDocument();
ASSERT_VALID(pDoc);
if (!pDoc)
return;
BYTE *bitmapInfo = NULL;
BYTE *imageData = NULL;
ConvertTo8BitImage(&bitmapInfo, &imageData);
int palettesize = 256 * sizeof(RGBQUAD);
BYTE *newBitmapInfo = new BYTE[(sizeof(BITMAPINFO) + palettesize)];
ZeroMemory(newBitmapInfo, (sizeof(BITMAPINFO) + palettesize));
BITMAPINFO *ptr = (BITMAPINFO*)newBitmapInfo;
ptr->bmiHeader.biBitCount = ((BITMAPINFO*)bitmapInfo)->bmiHeader.biBitCount;
ptr->bmiHeader.biClrImportant = ((BITMAPINFO*)bitmapInfo)->bmiHeader.biClrImportant;
ptr->bmiHeader.biClrUsed = ((BITMAPINFO*)bitmapInfo)->bmiHeader.biClrUsed;
ptr->bmiHeader.biCompression = ((BITMAPINFO*)bitmapInfo)->bmiHeader.biCompression;
ptr->bmiHeader.biHeight = ((BITMAPINFO*)bitmapInfo)->bmiHeader.biHeight;
ptr->bmiHeader.biPlanes = ((BITMAPINFO*)bitmapInfo)->bmiHeader.biPlanes;
ptr->bmiHeader.biSize = ((BITMAPINFO*)bitmapInfo)->bmiHeader.biSize;
ptr->bmiHeader.biSizeImage = ((BITMAPINFO*)bitmapInfo)->bmiHeader.biSizeImage;
ptr->bmiHeader.biWidth = ((BITMAPINFO*)bitmapInfo)->bmiHeader.biWidth;
ptr->bmiHeader.biXPelsPerMeter = ((BITMAPINFO*)bitmapInfo)->bmiHeader.biXPelsPerMeter;
ptr->bmiHeader.biYPelsPerMeter = ((BITMAPINFO*)bitmapInfo)->bmiHeader.biYPelsPerMeter;
ptr->bmiColors[0] = ((BITMAPINFO*)bitmapInfo)->bmiColors[0];
RGBQUAD rgbquad[256];
memcpy(rgbquad, bitmapInfo + sizeof(BITMAPINFO), palettesize);
//memcpy(newBitmapInfo + sizeof(BITMAPINFO), rgbquad, palettesize);
NPLOGPALETTE pPal = (NPLOGPALETTE)LocalAlloc(LMEM_FIXED,
(sizeof(LOGPALETTE) +
(sizeof(PALETTEENTRY) * (palettesize))));
pPal->palVersion = 0x300;
pPal->palNumEntries = 256;
for (int i = 0; i < 256; i++)
{
pPal->palPalEntry[i].peRed = rgbquad[i].rgbRed;
pPal->palPalEntry[i].peGreen = rgbquad[i].rgbGreen;
pPal->palPalEntry[i].peBlue = rgbquad[i].rgbBlue;
pPal->palPalEntry[i].peFlags = 0;
}
HPALETTE hLogPal = CreatePalette((LPLOGPALETTE)pPal);
pDC->SelectPalette(CPalette::FromHandle(hLogPal), FALSE);
pDC->RealizePalette();
StretchDIBits(pDC->GetSafeHdc(), 0, 0, 1920, 1080, 0, 0, 1920, 1080,
imageData, ptr, DIB_PAL_COLORS, SRCCOPY);
delete[] bitmapInfo;
delete[] imageData;
}
HBITMAP hBitmap;
Gdiplus::Color color;
destination->GetHBITMAP(color, &hBitmap);
You did convert to 8-bit bitmap, however GetHBITMAP will return a bitmap handle compatible with your video card, which is probably 32-bit. GDI+ has already processed the palette and returned a bitmap handle which is turned back in to 32-bit. HBITMAP handle can be painted directly, for example using CreateCompatibleDC and BitBlt. So there is no need to obtaining the palette and passing it to GDI, and no need for 8-bit conversion in the first place.
If this is necessary for some reason, you can get the bits and palette from 32-bit bitmap, put that in 8-bit bitmap, and draw with StretchDIBits
The main issue in your code is that it should use DIB_RGB_COLORS flag for GetDIBits/StretchDIBits, because the device context is most likely 32-bit. There is no need for SelectPalette/RealizePalette either (unless it's 8-bit display from 30 years ago)
It makes more sense to get the bits directly from GDI+ using LockBits, and get the palette directly using GetPalette, as seen in the example below.
Aside, source and destination have to be deleted before exit.
void draw(HDC hdc)
{
Gdiplus::Bitmap *source = Gdiplus::Bitmap::FromFile(L"D:/TestImage.bmp");
if(!source)
return;
int width = source->GetWidth();
int height = source->GetHeight();
Gdiplus::Bitmap *destination = source->Clone(0, 0, width, height,
PixelFormat8bppIndexed);
//get bitmap bits from GDI+
Gdiplus::BitmapData data;
Gdiplus::Rect rect(0, 0, width, height);
destination->LockBits(&rect, Gdiplus::ImageLockModeRead,
destination->GetPixelFormat(), &data);
int bufsize = data.Stride * data.Height;
BYTE *buf = new BYTE[bufsize];
memcpy(buf, data.Scan0, bufsize);
destination->UnlockBits(&data);
//setup BITMAPINFO
int bmpinfo_size = sizeof(BITMAPINFO) + 256 * 4;
BITMAPINFO* bmpinfo = (BITMAPINFO*)new BYTE[bmpinfo_size];
memset(bmpinfo, 0, bmpinfo_size);
bmpinfo->bmiHeader.biSize = sizeof(BITMAPINFOHEADER);
bmpinfo->bmiHeader.biWidth = width;
bmpinfo->bmiHeader.biHeight = -height;
bmpinfo->bmiHeader.biPlanes = 1;
bmpinfo->bmiHeader.biBitCount = 8;
bmpinfo->bmiHeader.biCompression = BI_RGB;
bmpinfo->bmiHeader.biSizeImage = bufsize;
//get palette from GDI+
int palsize = destination->GetPaletteSize();
Gdiplus::ColorPalette *palette = (Gdiplus::ColorPalette*)new BYTE[palsize];
destination->GetPalette(palette, palsize);
//set palette for BITMAPINFO
memset(&bmpinfo->bmiColors[0], 0, 256 * 4);
for(int i = 0; i < palette->Count; i++)
{
auto clr = Gdiplus::Color(palette->Entries[i]);
bmpinfo->bmiColors[i].rgbRed = clr.GetR();
bmpinfo->bmiColors[i].rgbGreen = clr.GetG();
bmpinfo->bmiColors[i].rgbBlue = clr.GetB();
bmpinfo->bmiColors[i].rgbReserved = 0;
}
StretchDIBits(hdc, 0, 0, width, height, 0, 0, width, height,
buf, bmpinfo, DIB_RGB_COLORS, SRCCOPY);
delete[] buf;
delete[] bmpinfo;
delete[] palette;
delete destination;
delete source;
}
void CMFCApplicationColorsView::OnDraw(CDC* pDC)
{
CMFCApplicationColorsDoc* pDoc = GetDocument();
ASSERT_VALID(pDoc);
if (!pDoc)
return;
Gdiplus::GdiplusStartupInput tmp;
ULONG_PTR token;
Gdiplus::GdiplusStartup(&token, &tmp, NULL);
draw(pDC->GetSafeHdc());
Gdiplus::GdiplusShutdown(token);
}

FFMPEG and DirectX Capture in C++

I have a system that allows me to capture a window and save it as a mp4, using ffmpeg. I use gdigrab to capture the frame, but it is fairly slow (60ms per av_read_frame calls)
I know I can capture a game using the DirectX API, but I don't know how to convert the resulting BMP to an AVFrame.
The following code is the DirectX code I use to capture the frame
extern void* pBits;
extern IDirect3DDevice9* g_pd3dDevice;
IDirect3DSurface9* pSurface;
g_pd3dDevice->CreateOffscreenPlainSurface(ScreenWidth, ScreenHeight,
D3DFMT_A8R8G8B8, D3DPOOL_SCRATCH,
&pSurface, NULL);
g_pd3dDevice->GetFrontBufferData(0, pSurface);
D3DLOCKED_RECT lockedRect;
pSurface->LockRect(&lockedRect,NULL,
D3DLOCK_NO_DIRTY_UPDATE|
D3DLOCK_NOSYSLOCK|D3DLOCK_READONLY)));
for( int i=0 ; i < ScreenHeight ; i++)
{
memcpy( (BYTE*) pBits + i * ScreenWidth * BITSPERPIXEL / 8 ,
(BYTE*) lockedRect.pBits + i* lockedRect.Pitch ,
ScreenWidth * BITSPERPIXEL / 8);
}
g_pSurface->UnlockRect();
pSurface->Release();
And here is my read loop :
{
while (1) {
if (av_read_frame(pFormatCtx, &packet) < 0 || exit)
break;
if (packet.stream_index == videoindex) {
// Decode video frame
av_packet_rescale_ts(&packet, { 1, std::stoi(pParser->GetVal("video-fps")) }, pCodecCtx->time_base);
avcodec_decode_video2(pCodecCtx, pFrame, &frameFinished, &packet);
if (frameFinished) {
pFrame->pts = i;
i++;
sws_scale(sws_ctx, (uint8_t const * const *)pFrame->data, pFrame->linesize, 0, pCodecCtx->height, pFrameRGB->data, pFrameRGB->linesize);
pFrameRGB->pts = pFrame->pts;
enc.encodeFrame(pFrameRGB);
}
// Free the packet that was allocated by av_read_frame
av_free_packet(&packet);
}
How can I create an AVFrame using the bmp I have, without using the av_read_frame ?

image created on Cimg display different on a pdf when saved using GDI+ with pdf created with jagPDF

What I need to do is very simple, I need to plot a vector using CIMG and then save the graph ina jpg and add the jpg to a PDF document using JAGPDF. In order to save CIMG as JPG, the program uses an external program called Image Magick.
I wanted to avoid using that program and use GDI+ instead by first saving the CIMG as a BMP (it does that natively) and then saving the jpg from the bmp.
MCVE program looks like this
#include "CImg.h"
#include <jagpdf/api.h>
#include <vector>
using namespace jag;
using namespace cimg_library;
int main(int argc, char** const argv)
{
const float x0 = 0;
const float x1 = 9;
const int resolution = 5000;
// Create plot data.
CImg<double> values(1, resolution, 1, 1, 0);
const unsigned int r = resolution - 1;
for (int i1 = 0; i1 < resolution; ++i1)
{
double xtime = x0 + i1*(x1 - x0) / r;
values(0, i1) = 2 * sin(xtime);
}
CImg<unsigned char> graph;
graph.assign(750, 240, 1, 3, 255);
static const unsigned char black[] = { 0, 0, 0 }, white[] = { 255, 255, 255 };
static const unsigned char red[] = { 255, 200, 200 }, bred[] = { 255, 0, 0 };
graph.draw_grid(6, 6, 0, 0, false, true, red, 10.0f, 0xFFFFFFFF, 0xFFFFFFFF);
graph.draw_grid(30, 30, 0, 0, false, true, bred, 10.0f, 0xFFFFFFFF, 0xFFFFFFFF);
graph.draw_graph(values, black, 1, 1, 1, 2, -2, 0xFFFFFFFF);;
//////////////Method 1: Using Image Magick////////////////
graph.save_jpeg("plot2.jpg");
pdf::Document doc(pdf::create_file("report.pdf"));
doc.page_start(848.68, 597.6);
pdf::Image imag2 = doc.image_load_file("plot2.jpg");
doc.page().canvas().image(imag2, 50, 50);
doc.page_end();
doc.finalize();
//////////////Method 2: Using GDI+////////////////
graph.save("plot.bmp");
SaveFile();
pdf::Document doc2(pdf::create_file("report2.pdf"));
doc2.page_start(848.68, 597.6);
pdf::Image imag = doc2.image_load_file("plot.jpg");
doc2.page().canvas().image(imag, 50, 50);
doc2.page_end();
doc2.finalize();
return 0;
}
With SaveFile() being the following function using GDI+ to convert from plot.bmp to plot.jpg
#include <windows.h>
#include <objidl.h>
#include <gdiplus.h>
#include "GdiplusHelperFunctions.h"
#pragma comment (lib,"Gdiplus.lib")
VOID SaveFile()
{
// Initialize GDI+.
Gdiplus::GdiplusStartupInput gdiplusStartupInput;
ULONG_PTR gdiplusToken;
GdiplusStartup(&gdiplusToken, &gdiplusStartupInput, NULL);
CLSID encoderClsid;
Status stat;
Image* image = new Gdiplus::Image(L"plot.bmp");
// Get the CLSID of the PNG encoder.
GetEncoderClsid(L"image/jpeg", &encoderClsid);
stat = image->Save(L"plot.jpg", &encoderClsid, NULL);
if (stat == Ok)
printf("plot.jpg was saved successfully\n");
else
printf("Failure: stat = %d\n", stat);
delete image;
GdiplusShutdown(gdiplusToken);
}
Both methods save jpgs that in properties seems to have the same size but the first put the image correctly in the pdf while the second puts a huge image in the pdf even though they are supossed to be the same size. How can I fix this?
Attached is scrrenshots of report1 and report2
SOLUTION
With your suggestions, I was able to modify the SaveFile function in order to be able to control de DPI, I post the new code in case someone needs it.
VOID SaveFile()
{
// Initialize GDI+.
Gdiplus::GdiplusStartupInput gdiplusStartupInput;
ULONG_PTR gdiplusToken;
GdiplusStartup(&gdiplusToken, &gdiplusStartupInput, NULL);
CLSID encoderClsid;
Status stat;
EncoderParameters encoderParameters;
ULONG quality;
Gdiplus::Bitmap* bitmap = new Gdiplus::Bitmap(L"plot.bmp");
Gdiplus::REAL dpi = 96;
bitmap->SetResolution(dpi,dpi);
// Get the CLSID of the PNG encoder.
GetEncoderClsid(L"image/jpeg", &encoderClsid);
encoderParameters.Count = 1;
encoderParameters.Parameter[0].Guid = EncoderQuality;
encoderParameters.Parameter[0].Type = EncoderParameterValueTypeLong;
encoderParameters.Parameter[0].NumberOfValues = 1;
quality = 100;
encoderParameters.Parameter[0].Value = &quality;
stat = bitmap->Save(L"plot.jpg", &encoderClsid, &encoderParameters);
if (stat == Ok)
printf("plot.jpg was saved successfully\n");
else
printf("Failure: stat = %d\n", stat);
delete bitmap;
GdiplusShutdown(gdiplusToken);
return;
}
I would guess ImageMagick include some perks that filter the image to fit the canvas. The smartass.
I'd try resizing the image before exporting to JPEG. You might give a go to this guide. It basically says you can resize the bmp (in the example it checks w/h ratio but well...). THe goal should be to specify the size you need for the canvas is exactly that.
Gdiplus::Bitmap* GDIPlusImageProcessor::ResizeClone(Bitmap *bmp, INT width, INT height)
{
UINT o_height = bmp->GetHeight();
UINT o_width = bmp->GetWidth();
INT n_width = width;
INT n_height = height;
double ratio = ((double)o_width) / ((double)o_height);
if (o_width > o_height) {
// Resize down by width
n_height = static_cast<UINT>(((double)n_width) / ratio);
} else {
n_width = static_cast<UINT>(n_height * ratio);
}
Gdiplus::Bitmap* newBitmap = new Gdiplus::Bitmap(n_width, n_height, bmp->GetPixelFormat());
Gdiplus::Graphics graphics(newBitmap);
graphics.DrawImage(bmp, 0, 0, n_width, n_height);
return newBitmap;
}
And then, save it using the encoder. ALso, you'd like to check whether you might need to set the quality of the resulting JPEG using encoderparameters as shown in the official documentation.
// Get the CLSID of the JPEG encoder.
GetEncoderClsid(L"image/jpeg", &encoderClsid);
// Before we call Image::Save, we must initialize an
// EncoderParameters object. The EncoderParameters object
// has an array of EncoderParameter objects. In this
// case, there is only one EncoderParameter object in the array.
// The one EncoderParameter object has an array of values.
// In this case, there is only one value (of type ULONG)
// in the array. We will let this value vary from 0 to 100.
encoderParameters.Count = 1;
encoderParameters.Parameter[0].Guid = EncoderQuality;
encoderParameters.Parameter[0].Type = EncoderParameterValueTypeLong;
encoderParameters.Parameter[0].NumberOfValues = 1;
// Save the image as a JPEG with quality level 0.
quality = 0;
encoderParameters.Parameter[0].Value = &quality;
stat = image->Save(L"Shapes001.jpg", &encoderClsid, &encoderParameters);
if(stat == Ok)
wprintf(L"%s saved successfully.\n", L"Shapes001.jpg");
else
wprintf(L"%d Attempt to save %s failed.\n", stat, L"Shapes001.jpg");
// Save the image as a JPEG with quality level 50.
quality = 50;
encoderParameters.Parameter[0].Value = &quality;
stat = image->Save(L"Shapes050.jpg", &encoderClsid, &encoderParameters);
if(stat == Ok)
wprintf(L"%s saved successfully.\n", L"Shapes050.jpg");
else
wprintf(L"%d Attempt to save %s failed.\n", stat, L"Shapes050.jpg");
EDIT: JAGPDF also says image DPI is taken into account when painting. SO we probably are on the right path.
Let's say we would like to tile a region of the page with our image.
To do so we need to know the image dimensions. Because width() and
width() return size in pixels we need to recalculate these to user
space units.
Image DPI is taken into account when the image is painted onto a
canvas. An image usually specifies its DPI. If it is not so a value of
images.default_dpi is used
img_width = img.width() / img.dpi_x() * 72
img_height = img.height() / img.dpi_y() * 72
for x in range(7):
for y in range(15):
canvas.image(img, 90 + x * img_width, 100 + y * img_height)
You might try changing DPI using this SO answer.
If I understand your question correctly, your aim is to remove the dependency on ImageMagick.
You can do that more simply by telling CImg to use its built-in support for JPEG. All you need to do is
define cimg_use_jpeg
link with libjpeg
So your compilation command becomes:
g++ -Dcimg_use_jpeg ... -ljpeg

Process AVFrame using opencv mat causing encoding error

I'm trying to decode a video file using ffmpeg, grab the AVFrame object, convert it to opencv mat object, do some processing then convert it back to AVFrame object and encode it back to a video file.
Well, the program can run, but it produces bad result.
I Keep getting errors like "top block unavailable for requested intra mode at 7 19", "error while decoding MB 7 19, bytestream 358", "concealing 294 DC, 294AC, 294 MV errors in P frame" etc.
And the result video got glithes all over it. like this,
I'm guessing it's because my AVFrame to Mat and Mat to AVFrame methods, and here they are
//unspecified function
temp_rgb_frame = avcodec_alloc_frame();
int numBytes = avpicture_get_size(PIX_FMT_RGB24, width, height);
uint8_t * frame2_buffer = (uint8_t *)av_malloc(numBytes * sizeof(uint8_t));
avpicture_fill((AVPicture*)temp_rgb_frame, frame2_buffer, PIX_FMT_RGB24, width, height);
void CoreProcessor::Mat2AVFrame(cv::Mat **input, AVFrame *output)
{
//create a AVPicture frame from the opencv Mat input image
avpicture_fill((AVPicture *)temp_rgb_frame,
(uint8_t *)(*input)->data,
AV_PIX_FMT_RGB24,
(*input)->cols,
(*input)->rows);
//convert the frame to the color space and pixel format specified in the sws context
sws_scale(
rgb_to_yuv_context,
temp_rgb_frame->data,
temp_rgb_frame->linesize,
0, height,
((AVPicture *)output)->data,
((AVPicture *)output)->linesize);
(*input)->release();
}
void CoreProcessor::AVFrame2Mat(AVFrame *pFrame, cv::Mat **mat)
{
sws_scale(
yuv_to_rgb_context,
((AVPicture*)pFrame)->data,
((AVPicture*)pFrame)->linesize,
0, height,
((AVPicture *)temp_rgb_frame)->data,
((AVPicture *)temp_rgb_frame)->linesize);
*mat = new cv::Mat(pFrame->height, pFrame->width, CV_8UC3, temp_rgb_frame->data[0]);
}
void CoreProcessor::process_frame(AVFrame *pFrame)
{
cv::Mat *mat = NULL;
AVFrame2Mat(pFrame, &mat);
Mat2AVFrame(&mat, pFrame);
}
Am I doing something wrong with the memory? Because if I remove the processing part, just decode and then encode the frame, the result is correct.
Well, it turns out I made a mistake at the initialization of temp_rgb_frame,if should be like this,
temp_rgb_frame = avcodec_alloc_frame();
int numBytes = avpicture_get_size(PIX_FMT_RGB24, width, height);
uint8_t * frame2_buffer = (uint8_t *)av_malloc(numBytes * sizeof(uint8_t));
avpicture_fill((AVPicture*)temp_rgb_frame, frame2_buffer, PIX_FMT_RGB24, width, height);