Related
For example, my computer has two monitors, the primary monitor's resolution is 800*600, the other monitor's resolution is 1600*900.
I would like define the resolution on one monitor only ?
The function 'SetDisplayConfig' change the resolution screen on the two minotors...
https://learn.microsoft.com/fr-fr/windows-hardware/drivers/display/ccd-apis
https://learn.microsoft.com/fr-fr/windows/win32/api/winuser/nf-winuser-setdisplayconfig
[DllImport("User32.dll")]
public static extern int SetDisplayConfig(
uint numPathArrayElements,
[In] DisplayConfigPathInfo[] pathArray,
uint numModeInfoArrayElements,
[In] DisplayConfigModeInfo[] modeInfoArray,
SdcFlags flags
);
private static Boolean SetDisplaySettings(int Id_Monitor, uint Width, uint Height, uint Scaling)
{
try
{
CCDWrapper.DisplayConfigPathInfo[] pathInfoArray = new CCDWrapper.DisplayConfigPathInfo[0] { };
CCDWrapper.DisplayConfigModeInfo[] modeInfoArray = new CCDWrapper.DisplayConfigModeInfo[0] { };
CCDWrapper.MonitorAdditionalInfo[] additionalInfo = new CCDWrapper.MonitorAdditionalInfo[0] { };
bool Status = GetDisplaySettings(ref pathInfoArray, ref modeInfoArray, ref additionalInfo, true);
CCDWrapper.DisplayConfigPathInfo[] pathInfoArrayCurrent = new CCDWrapper.DisplayConfigPathInfo[0] { };
CCDWrapper.DisplayConfigModeInfo[] modeInfoArrayCurrent = new CCDWrapper.DisplayConfigModeInfo[0] { };
CCDWrapper.MonitorAdditionalInfo[] additionalInfoCurrent = new CCDWrapper.MonitorAdditionalInfo[0] { };
bool StatusCurrent = GetDisplaySettings(ref pathInfoArrayCurrent, ref modeInfoArrayCurrent, ref additionalInfoCurrent, false);
if (StatusCurrent)
{
for (int iPathInfo = 0; iPathInfo <= pathInfoArray.Length-1; iPathInfo++)
//for (int iPathInfo = 0; iPathInfo <= pathInfoArray.Length - 1; iPathInfo++)
{
for (int iPathInfoCurrent = 0; iPathInfoCurrent <= pathInfoArrayCurrent.Length - 1; iPathInfoCurrent++)
{
if ((pathInfoArray[iPathInfo].sourceInfo.id == pathInfoArrayCurrent[iPathInfoCurrent].sourceInfo.id) && (pathInfoArray[iPathInfo].targetInfo.id == pathInfoArrayCurrent[iPathInfoCurrent].targetInfo.id))
{
pathInfoArray[iPathInfo].sourceInfo.adapterId.LowPart = pathInfoArrayCurrent[iPathInfoCurrent].sourceInfo.adapterId.LowPart;
pathInfoArray[iPathInfo].targetInfo.adapterId.LowPart = pathInfoArrayCurrent[iPathInfoCurrent].targetInfo.adapterId.LowPart;
pathInfoArray[iPathInfo].targetInfo.scaling = (CCDWrapper.DisplayConfigScaling)Scaling;
break;
}
}
}
for (int iModeInfo = 0; iModeInfo <= modeInfoArray.Length - 1; iModeInfo++)
{
for (int iPathInfo = 0; iPathInfo <= pathInfoArray.Length - 1; iPathInfo++)
{
if ((modeInfoArray[iModeInfo].id == pathInfoArray[iPathInfo].targetInfo.id) && (modeInfoArray[iModeInfo].infoType == CCDWrapper.DisplayConfigModeInfoType.Target))
{
for (int iModeInfoSource = 0; iModeInfoSource <= modeInfoArray.Length - 1; iModeInfoSource++)
{
if ((modeInfoArray[iModeInfoSource].id == pathInfoArray[iPathInfo].sourceInfo.id) && (modeInfoArray[iModeInfoSource].adapterId.LowPart == modeInfoArray[iModeInfo].adapterId.LowPart) && (modeInfoArray[iModeInfoSource].infoType == CCDWrapper.DisplayConfigModeInfoType.Source))
{
modeInfoArray[iModeInfoSource].adapterId.LowPart = pathInfoArray[iPathInfo].sourceInfo.adapterId.LowPart;
modeInfoArray[iModeInfoSource].sourceMode.height = Height;
modeInfoArray[iModeInfoSource].sourceMode.width = Width;
break;
}
}
modeInfoArray[iModeInfo].adapterId.LowPart = pathInfoArray[iPathInfo].targetInfo.adapterId.LowPart;
break;
}
}
}
uint numPathArrayElements = System.Convert.ToUInt32(pathInfoArray.Length);
uint numModeInfoArrayElements = System.Convert.ToUInt32(modeInfoArray.Length);
long Result = CCDWrapper.SetDisplayConfig(numPathArrayElements, pathInfoArray, numModeInfoArrayElements, modeInfoArray, CCDWrapper.SdcFlags.Apply | CCDWrapper.SdcFlags.UseSuppliedDisplayConfig | CCDWrapper.SdcFlags.SaveToDatabase | CCDWrapper.SdcFlags.NoOptimization | CCDWrapper.SdcFlags.AllowChanges);
if (Result == 0)
return true;
else
return false;
}
else
return false;
}
catch (Exception ex)
{
EventLog.WriteEntry("ResolutionEcran", "Erreur SetDisplaySettings : " + ex.Message, EventLogEntryType.Error);
return false;
}
}
private static Boolean GetDisplaySettings(ref CCDWrapper.DisplayConfigPathInfo[] pathInfoArray, ref CCDWrapper.DisplayConfigModeInfo[] modeInfoArray, ref CCDWrapper.MonitorAdditionalInfo[] additionalInfo, Boolean ActiveOnly, [System.Runtime.InteropServices.Optional] int ID_Monitor)
{
uint numPathArrayElements;
uint numModeInfoArrayElements;
CCDWrapper.QueryDisplayFlags queryFlags = CCDWrapper.QueryDisplayFlags.AllPaths;
if (ActiveOnly)
{
//queryFlags = CCDWrapper.QueryDisplayFlags.OnlyActivePaths;
queryFlags = CCDWrapper.QueryDisplayFlags.OnlyActivePaths;
}
var status = CCDWrapper.GetDisplayConfigBufferSizes(queryFlags, out numPathArrayElements, out numModeInfoArrayElements);
if (status == 0)
{
pathInfoArray = new CCDWrapper.DisplayConfigPathInfo[numPathArrayElements];
modeInfoArray = new CCDWrapper.DisplayConfigModeInfo[numModeInfoArrayElements];
additionalInfo = new CCDWrapper.MonitorAdditionalInfo[numModeInfoArrayElements];
status = CCDWrapper.QueryDisplayConfig(queryFlags, ref numPathArrayElements, pathInfoArray, ref numModeInfoArrayElements, modeInfoArray, IntPtr.Zero);
if (status == 0)
{
for (var iMode = 0; iMode < numModeInfoArrayElements; iMode++)
{
if (modeInfoArray[iMode].infoType == CCDWrapper.DisplayConfigModeInfoType.Target)
{
try
{
additionalInfo[iMode] = CCDWrapper.GetMonitorAdditionalInfo(modeInfoArray[iMode].adapterId, modeInfoArray[iMode].id);
}
catch (Exception)
{
additionalInfo[iMode].valid = false;
}
}
}
return true;
}
else
{
//Erreur : Querying display;
}
}
else
{
//Erreur : Taille Buffer;
}
return false;
}
Use ChangeDisplaySettingsEx function to change the settings of the specified display device to the specified graphics mode. The following is an example you can refer to.
#include <windows.h>
int main()
{
for (DWORD devNum = 0; ; devNum++)
{
DISPLAY_DEVICE dev = {0};
dev.cb = sizeof(DISPLAY_DEVICE);
if (!EnumDisplayDevices(NULL, devNum, &dev, EDD_GET_DEVICE_INTERFACE_NAME))
break;
wprintf(L"Display name: %s \n", dev.DeviceName);
DEVMODE dMode = { 0 };
dMode.dmSize = sizeof(dMode);
if (!EnumDisplaySettings(dev.DeviceName, ENUM_CURRENT_SETTINGS, &dMode))
{
wprintf(L"EnumDisplaySettings error: %d \n", GetLastError());
continue;
}
wprintf(L"Display old settings: \n");
wprintf(L"dmBitsPerPel: %d \n", dMode.dmBitsPerPel);
wprintf(L"dmPelsWidth: %d \n", dMode.dmPelsWidth);
wprintf(L"dmPelsHeight: %d \n", dMode.dmPelsHeight);
wprintf(L"dmDisplayFlags: %x \n", dMode.dmDisplayFlags);
wprintf(L"dmDisplayFrequency: %d \n", dMode.dmDisplayFrequency);
dMode.dmPelsWidth = 800;
dMode.dmPelsHeight = 600;
ChangeDisplaySettingsEx(dev.DeviceName, &dMode, NULL, 0, NULL);
DEVMODE dModeNew = { 0 };
dModeNew.dmSize = sizeof(DEVMODE);
if (!EnumDisplaySettings(dev.DeviceName, ENUM_CURRENT_SETTINGS, &dModeNew))
{
wprintf(L"EnumDisplaySettings error: %d \n", GetLastError());
continue;
}
wprintf(L"Display new settings: \n");
wprintf(L"dmBitsPerPel: %d \n", dModeNew.dmBitsPerPel);
wprintf(L"dmPelsWidth: %d \n", dModeNew.dmPelsWidth);
wprintf(L"dmPelsHeight: %d \n", dModeNew.dmPelsHeight);
wprintf(L"dmDisplayFlags: %x \n", dModeNew.dmDisplayFlags);
wprintf(L"dmDisplayFrequency: %d \n", dModeNew.dmDisplayFrequency);
}
getchar();
}
I set dwflags to 0 let the graphics mode for the current screen change dynamically. Refer to dwflags parameter part of ChangeDisplaySettingsEx's document for deciding how you would like the graphics mode should be changed.
Sorry for my late response...I was absent.
I also need to change the scaling of monitor (Black bars removed on certain resolutions).
[Flags]
public enum DisplayConfigScaling : uint
{
Zero = 0x0,
Identity = 1,
Centered = 2,
Stretched = 3,
Aspectratiocenteredmax = 4,
Custom = 5,
Preferred = 128,
ForceUint32 = 0xFFFFFFFF
}
The function 'ChangeDisplaySettingsEx' may change the scale of monitor ?
Thank you very for your help.
I'm working on video encoding that will be used in a Unity plugin. I have made image encoding work, but now I'm at the audio. So trying only with the audio in to a mp4 file with AAC encoding. And I'm stuck. The resulting file does not contain anything. Also, from what I understand, AAC in ffmpeg only supports AV_SAMPLE_FMT_FLTP, that's why I use it. Here's my code:
Setup:
int initialize_encoding_audio(const char *filename)
{
int ret;
AVCodecID aud_codec_id = AV_CODEC_ID_AAC;
AVSampleFormat sample_fmt = AV_SAMPLE_FMT_FLTP;
avcodec_register_all();
av_register_all();
aud_codec = avcodec_find_encoder(aud_codec_id);
avcodec_register(aud_codec);
if (!aud_codec)
return COULD_NOT_FIND_AUD_CODEC;
aud_codec_context = avcodec_alloc_context3(aud_codec);
if (!aud_codec_context)
return CONTEXT_CREATION_ERROR;
aud_codec_context->bit_rate = 192000;
aud_codec_context->sample_rate = select_sample_rate(aud_codec);
aud_codec_context->sample_fmt = sample_fmt;
aud_codec_context->channel_layout = AV_CH_LAYOUT_STEREO;
aud_codec_context->channels = av_get_channel_layout_nb_channels(aud_codec_context->channel_layout);
aud_codec_context->codec = aud_codec;
aud_codec_context->codec_id = aud_codec_id;
ret = avcodec_open2(aud_codec_context, aud_codec, NULL);
if (ret < 0)
return COULD_NOT_OPEN_AUD_CODEC;
outctx = avformat_alloc_context();
ret = avformat_alloc_output_context2(&outctx, NULL, "mp4", filename);
outctx->audio_codec = aud_codec;
outctx->audio_codec_id = aud_codec_id;
audio_st = avformat_new_stream(outctx, aud_codec);
audio_st->codecpar->bit_rate = aud_codec_context->bit_rate;
audio_st->codecpar->sample_rate = aud_codec_context->sample_rate;
audio_st->codecpar->channels = aud_codec_context->channels;
audio_st->codecpar->channel_layout = aud_codec_context->channel_layout;
audio_st->codecpar->codec_id = aud_codec_id;
audio_st->codecpar->codec_type = AVMEDIA_TYPE_AUDIO;
audio_st->codecpar->format = sample_fmt;
audio_st->codecpar->frame_size = aud_codec_context->frame_size;
audio_st->codecpar->block_align = aud_codec_context->block_align;
audio_st->codecpar->initial_padding = aud_codec_context->initial_padding;
outctx->streams = new AVStream*[1];
outctx->streams[0] = audio_st;
av_dump_format(outctx, 0, filename, 1);
if (!(outctx->oformat->flags & AVFMT_NOFILE))
{
if (avio_open(&outctx->pb, filename, AVIO_FLAG_WRITE) < 0)
return COULD_NOT_OPEN_FILE;
}
ret = avformat_write_header(outctx, NULL);
aud_frame = av_frame_alloc();
aud_frame->nb_samples = aud_codec_context->frame_size;
aud_frame->format = aud_codec_context->sample_fmt;
aud_frame->channel_layout = aud_codec_context->channel_layout;
int buffer_size = av_samples_get_buffer_size(NULL, aud_codec_context->channels, aud_codec_context->frame_size,
aud_codec_context->sample_fmt, 0);
av_frame_get_buffer(aud_frame, buffer_size / aud_codec_context->channels);
if (!aud_frame)
return COULD_NOT_ALLOCATE_FRAME;
aud_frame_counter = 0;
return 0;
}
Encoding:
int encode_audio_samples(uint8_t **aud_samples)
{
int ret;
int buffer_size = av_samples_get_buffer_size(NULL, aud_codec_context->channels, aud_codec_context->frame_size,
aud_codec_context->sample_fmt, 0);
for (size_t i = 0; i < buffer_size / aud_codec_context->channels; i++)
{
aud_frame->data[0][i] = aud_samples[0][i];
aud_frame->data[1][i] = aud_samples[1][i];
}
aud_frame->pts = aud_frame_counter++;
ret = avcodec_send_frame(aud_codec_context, aud_frame);
if (ret < 0)
return ERROR_ENCODING_SAMPLES_SEND;
AVPacket pkt;
av_init_packet(&pkt);
pkt.data = NULL;
pkt.size = 0;
fflush(stdout);
while (true)
{
ret = avcodec_receive_packet(aud_codec_context, &pkt);
if (!ret)
{
av_packet_rescale_ts(&pkt, aud_codec_context->time_base, audio_st->time_base);
pkt.stream_index = audio_st->index;
av_write_frame(outctx, &pkt);
av_packet_unref(&pkt);
}
if (ret == AVERROR(EAGAIN))
break;
else if (ret < 0)
return ERROR_ENCODING_SAMPLES_RECEIVE;
else
break;
}
return 0;
}
Finish encoding:
int finish_audio_encoding()
{
AVPacket pkt;
av_init_packet(&pkt);
pkt.data = NULL;
pkt.size = 0;
fflush(stdout);
int ret = avcodec_send_frame(aud_codec_context, NULL);
if (ret < 0)
return ERROR_ENCODING_FRAME_SEND;
while (true)
{
ret = avcodec_receive_packet(aud_codec_context, &pkt);
if (!ret)
{
if (pkt.pts != AV_NOPTS_VALUE)
pkt.pts = av_rescale_q(pkt.pts, aud_codec_context->time_base, audio_st->time_base);
if (pkt.dts != AV_NOPTS_VALUE)
pkt.dts = av_rescale_q(pkt.dts, aud_codec_context->time_base, audio_st->time_base);
av_write_frame(outctx, &pkt);
av_packet_unref(&pkt);
}
if (ret == -AVERROR(AVERROR_EOF))
break;
else if (ret < 0)
return ERROR_ENCODING_FRAME_RECEIVE;
}
av_write_trailer(outctx);
}
Main:
void get_audio_frame(float_t *left_samples, float_t *right_samples, int frame_size, float* t, float* tincr, float* tincr2)
{
int j, i;
float v;
for (j = 0; j < frame_size; j++)
{
v = sin(*t);
*left_samples = v;
*right_samples = v;
left_samples++;
right_samples++;
*t += *tincr;
*tincr += *tincr2;
}
}
int main()
{
int frame_rate = 30; // this should be like 96000 / 1024 or somthing i guess?
float t, tincr, tincr2;
initialize_encoding_audio("audio.mp4");
int sec = 50;
float_t** aud_samples;
int src_samples_linesize;
int src_nb_samples = 1024;
int src_channels = 2;
int ret = av_samples_alloc_array_and_samples((uint8_t***)&aud_samples, &src_samples_linesize, src_channels,
src_nb_samples, AV_SAMPLE_FMT_FLTP, 0);
t = 0;
tincr = 0;
tincr2 = 0;
for (size_t i = 0; i < frame_rate * sec; i++)
{
get_audio_frame(aud_samples[0], aud_samples[1], src_nb_samples, &t, &tincr, &tincr2);
encode_audio_samples((uint8_t **)aud_samples);
}
finish_audio_encoding();
//cleanup();
return 0;
}
I guess the first thing that I would want to make sure I got right is the synthetic sound generation and how I transfer that to the AVFrame. Are my conversions correct? But feel free to point out anything that might be wrong.
Thanks in advance!
Edit: the whole source: http://pastebin.com/jYtmkhek
Edit2: Added initialization of tincr & tincr2
Unless I'm missing something from the pastebin, you forgot to initialize a few variables. You're using garbage to generate your samples.
float t, tincr, tincr2;
[...]
get_audio_frame(aud_samples[0], aud_samples[1], src_nb_samples, &t, &tincr, &tincr2);
You probably want to start with t=0 and increment by 2 * PI * frequency / sample rate for a sine wave.
Also, avformat_new_stream() creates the stream for you, don't do it with new.
Update:
I removed all the c++ stuff to test this. Here's the code that works: pastebin
And here's the resulting file: audio.mp4
ffmpeg -i audio.mp4 -filter_complex "showwaves=s=640x120:mode=line:colors=white" -frames:v 1 wave.jpg
Diff:
1,6d0
< #include "encoder.h"
< #include <algorithm>
< #include <iterator>
<
< extern "C"
< {
14a9
> #include <math.h>
40,41c35,36
< SwsContext *sws_ctx;
< SwrContext *swr_ctx = NULL;
---
> struct SwsContext *sws_ctx;
> struct SwrContext *swr_ctx = NULL;
76,77c71,72
< AVCodecID aud_codec_id = AV_CODEC_ID_AAC;
< AVSampleFormat sample_fmt = AV_SAMPLE_FMT_FLTP;
---
> enum AVCodecID aud_codec_id = AV_CODEC_ID_AAC;
> enum AVSampleFormat sample_fmt = AV_SAMPLE_FMT_FLTP;
125,126c120,121
< outctx->streams = new AVStream*[1];
< outctx->streams[0] = audio_st;
---
> //outctx->streams = new AVStream*[1];
> //outctx->streams[0] = audio_st;
182c177
< while (true)
---
> while (1)
216c211
< while (true)
---
> while (1)
291c286
< float t, tincr, tincr2;
---
> float t = 0, tincr = 2 * M_PI * 440.0 / 96000, tincr2 = 0;
317d311
< }
I want to extract a thumbnail from an image, so I tried to use GDI+. What I did was to create a new small Bitmap to hold the thumbnail, and using Graphics::DrawImage() to draw the bitmap into it (and hence getting a thumbnail).
But when using this approach on a large number of images, it gets really slow. So is there a way to speed it up (by using only Windows API and not an external library).
Note: I know that some images store a thumbnail inside of them, but I am looking to extract a thumbnail from the ones that don't.
You can increase speed by loading thumbnail from EXIF (for JPEG and some other formats). But GetThumbnailImage's result isn't good. Here is another implementation:
Usage:
Gdiplus::Bitmap* thumb = GetThumbnail(filename, thumbWidth, thumbHeight);
Code:
PropertyItem* GetPropertyItemFromImage(Gdiplus::Image* bm, PROPID propId) {
UINT itemSize = bm->GetPropertyItemSize(propId);
if (!itemSize) {
return 0;
}
PropertyItem* item = reinterpret_cast<PropertyItem*>(malloc(itemSize));
if (bm->GetPropertyItem(propId, itemSize, item) != Ok) {
free(item);
return 0;
}
return item;
}
UINT VoidToInt(void* data, unsigned int size) {
switch (size) {
case 8:
return *reinterpret_cast<UINT*>(data);
case 4:
return *reinterpret_cast<DWORD*>(data);
case 2:
return *reinterpret_cast<WORD*>(data);
default:
return *reinterpret_cast<BYTE*>(data);
}
}
typedef IStream * (STDAPICALLTYPE *SHCreateMemStreamFuncType)(const BYTE *pInit, UINT cbInit);
SHCreateMemStreamFuncType SHCreateMemStreamFunc = 0;
bool IsVista() {
static int isVista = -1;
if (isVista == -1)
{
OSVERSIONINFO osver;
osver.dwOSVersionInfoSize = sizeof(OSVERSIONINFO);
isVista = (::GetVersionEx(&osver) &&
osver.dwPlatformId == VER_PLATFORM_WIN32_NT &&
(osver.dwMajorVersion >= 6));
}
return isVista != FALSE;
}
Gdiplus::Bitmap* BitmapFromMemory(BYTE* data, unsigned int imageSize) {
if (WinUtils::IsVista()) {
if (!SHCreateMemStreamFunc) {
HMODULE lib = LoadLibrary(_T("Shlwapi.dll"));
SHCreateMemStreamFunc = reinterpret_cast<SHCreateMemStreamFuncType>(GetProcAddress(lib, "SHCreateMemStream"));
if (!SHCreateMemStreamFunc) {
return 0;
}
}
Gdiplus::Bitmap * bitmap;
IStream* pStream = SHCreateMemStreamFunc(data, imageSize);
if (pStream) {
bitmap = Gdiplus::Bitmap::FromStream(pStream);
pStream->Release();
if (bitmap) {
if (bitmap->GetLastStatus() == Gdiplus::Ok) {
return bitmap;
}
delete bitmap;
}
}
} else {
HGLOBAL buffer = ::GlobalAlloc(GMEM_MOVEABLE, imageSize);
if (buffer) {
void* pBuffer = ::GlobalLock(buffer);
if (pBuffer) {
Gdiplus::Bitmap * bitmap;
CopyMemory(pBuffer, data, imageSize);
IStream* pStream = NULL;
if (::CreateStreamOnHGlobal(buffer, FALSE, &pStream) == S_OK) {
bitmap = Gdiplus::Bitmap::FromStream(pStream);
pStream->Release();
if (bitmap) {
if (bitmap->GetLastStatus() == Gdiplus::Ok) {
return bitmap;
}
delete bitmap;
}
}
::GlobalUnlock(buffer);
}
::GlobalFree(buffer);
}
}
return 0;
}
// Based on original method from http://danbystrom.se/2009/01/05/imagegetthumbnailimage-and-beyond/
Gdiplus::Bitmap* GetThumbnail(Gdiplus::Image* bm, int width, int height, Gdiplus::Size* realSize = 0) {
using namespace Gdiplus;
if (realSize) {
realSize->Width = bm->GetWidth();
realSize->Height = bm->GetHeight();
}
Size sz = AdaptProportionalSize(Size(width, height), Size(bm->GetWidth(), bm->GetHeight()));
Bitmap* res = new Bitmap(sz.Width, sz.Height);
Graphics gr(res);
gr.SetInterpolationMode(Gdiplus::InterpolationModeHighQualityBicubic);
UINT size = bm->GetPropertyItemSize(PropertyTagThumbnailData);
if (size) {
// Loading thumbnail from EXIF data (fast)
enum ThumbCompression { ThumbCompressionJPEG, ThumbCompressionRGB, ThumbCompressionYCbCr, ThumbCompressionUnknown }
compression = ThumbCompressionJPEG;
PropertyItem* thumbnailFormatItem = GetPropertyItemFromImage(bm, PropertyTagThumbnailFormat);
if (thumbnailFormatItem) {
UINT format = VoidToInt(thumbnailFormatItem->value, thumbnailFormatItem->length);
if (format == 0) {
compression = ThumbCompressionRGB;
} else if (format == 1) {
compression = ThumbCompressionJPEG;
} else {
compression = ThumbCompressionUnknown;
}
free(thumbnailFormatItem);
} else {
PropertyItem* compressionItem = GetPropertyItemFromImage(bm, PropertyTagThumbnailCompression);
if (compressionItem) {
WORD compressionTag = *reinterpret_cast<WORD*>(compressionItem->value);
if (compressionTag == 1) {
compression = ThumbCompressionRGB;
PropertyItem* photometricInterpretationItem = GetPropertyItemFromImage(bm, PropertyTagPhotometricInterp);
if (photometricInterpretationItem) {
WORD photoMetricInterpretationTag = VoidToInt(photometricInterpretationItem->value, photometricInterpretationItem->length);
free(photometricInterpretationItem);
if (photoMetricInterpretationTag == 6) {
compression = ThumbCompressionYCbCr;
}
}
} else if (compressionTag == 6) {
compression = ThumbCompressionJPEG;
}
free(compressionItem);
}
}
int originalThumbWidth = 0, originalThumbHeight = 0;
if (compression == ThumbCompressionJPEG || compression == ThumbCompressionRGB) {
PropertyItem* thumbDataItem = GetPropertyItemFromImage(bm, PropertyTagThumbnailData);
if (thumbDataItem) {
if (compression == ThumbCompressionJPEG) {
Bitmap* src = BitmapFromMemory(reinterpret_cast<BYTE*>(thumbDataItem->value), thumbDataItem->length);
if (src) {
gr.DrawImage(src, 0, 0, sz.Width, sz.Height);
delete src;
free(thumbDataItem);
return res;
}
} else if (compression == ThumbCompressionRGB) {
PropertyItem* widthItem = GetPropertyItemFromImage(bm, PropertyTagThumbnailImageWidth);
if (widthItem) {
originalThumbWidth = VoidToInt(widthItem->value, widthItem->length);
free(widthItem);
}
PropertyItem* heightItem = GetPropertyItemFromImage(bm, PropertyTagThumbnailImageHeight);
if (heightItem) {
originalThumbHeight = VoidToInt(heightItem->value, heightItem->length);
free(heightItem);
}
if (originalThumbWidth && originalThumbHeight) {
BITMAPINFOHEADER bih;
memset(&bih, 0, sizeof(bih));
bih.biSize = sizeof(bih);
bih.biWidth = originalThumbWidth;
bih.biHeight = -originalThumbHeight;
bih.biPlanes = 1;
bih.biBitCount = 24;
BITMAPINFO bi;
memset(&bi, 0, sizeof(bi));
bi.bmiHeader = bih;
BYTE* data = reinterpret_cast<BYTE*>(thumbDataItem->value);
BYTE temp;
// Convert RGB to BGR
for (unsigned int offset = 0; offset < thumbDataItem->length; offset += 3) {
temp = data[offset];
data[offset] = data[offset + 2];
data[offset + 2] = temp;
}
Bitmap src(&bi, thumbDataItem->value);
if (src.GetLastStatus() == Ok) {
gr.DrawImage(&src, 0, 0, sz.Width, sz.Height);
free(thumbDataItem);
return res;
}
}
} else {
// other type of compression not implemented
}
free(thumbDataItem);
}
}
}
// Fallback - Load full image and draw it (slow)
gr.DrawImage(bm, 0, 0, sz.Width, sz.Height);
return res;
}
Gdiplus::Bitmap* GetThumbnail(const CString& filename, int width, int height, Gdiplus::Size* realSize) {
using namespace Gdiplus;
Image bm(filename);
if (bm.GetLastStatus() != Ok) {
return 0;
}
return GetThumbnail(&bm, width, height, realSize);
}
Gdiplus::Size AdaptProportionalSize(const Gdiplus::Size& szMax, const Gdiplus::Size& szReal)
{
int nWidth;
int nHeight;
double sMaxRatio;
double sRealRatio;
if (szMax.Width < 1 || szMax.Height < 1 || szReal.Width < 1 || szReal.Height < 1)
return Size();
sMaxRatio = szMax.Width / static_cast<double>(szMax.Height);
sRealRatio = szReal.Width / static_cast<double>(szReal.Height);
if (sMaxRatio < sRealRatio) {
nWidth = min(szMax.Width, szReal.Width);
nHeight = static_cast<int>(round(nWidth / sRealRatio));
} else {
nHeight = min(szMax.Height, szReal.Height);
nWidth = static_cast<int>(round(nHeight * sRealRatio));
}
return Size(nWidth, nHeight);
}
I'm developing a project where I need to convert PCM 16-bits 2 channels sound into a IEEE Float 32-bits 2 channels.
To do this I'm using the following code:
void CAudioConverter::ConvI16ToF32(BYTE* pcmFrom, BYTE* floatTo, int length)
{
short* src = reinterpret_cast<short*>(pcmFrom);
float* dst = reinterpret_cast<float*>(floatTo);
for (int n = 0; n < length; n++)
{
dst[n] = static_cast<float>(src[n]) / 32768.0f;
}
}
I have initialized the variable __pcm32_bytesPerFrame with:
WAVEFORMATEX* closestFormat;
ws->default_pb_dev->GetMixFormat(&closestFormat);
__pcm32_bytesPerFrame = closestFormat->nAvgBytesPerSec * (prm->samples_per_frame * 1000 / (prm->clock_rate * closestFormat->nChannels)) / 1000;
strm->pb_max_frame_count is:
hr = ws->default_pb_dev->GetBufferSize(&ws->pb_max_frame_count);
I have a while loop in a dedicated thread the does something like:
hr = strm->default_pb_dev->GetCurrentPadding(&padding);
incoming_frame = __pcm32_bytesPerFrame / 4;
frame_to_render = strm->pb_max_frame_count - padding;
if (frame_to_render >= incoming_frame)
{
frame_to_render = incoming_frame;
} else {
/* Don't get new frame because there's no space */
frame_to_render = 0;
}
if (frame_to_render > 0)
{
pjmedia_frame frame;
hr = strm->pb_client->GetBuffer(frame_to_render, &cur_pb_buf);
if (FAILED(hr)) {
continue;
}
void* destBuffer = (void*)malloc(strm->bytes_per_frame*frame_to_render*sizeof(pj_uint16_t));
if (strm->fmt_id == PJMEDIA_FORMAT_L16) {
/* PCM mode */
frame.type = PJMEDIA_FRAME_TYPE_AUDIO;
frame.size = strm->bytes_per_frame;
frame.timestamp.u64 = strm->pb_timestamp.u64;
frame.bit_info = 0;
frame.buf = destBuffer;
}
status = (*strm->pb_cb)(strm->user_data, &frame);
CAudioConverter* conv = new CAudioConverter();
conv->ConvI16ToF32((BYTE*)destBuffer, cur_pb_buf, frame_to_render);
hr = strm->pb_client->ReleaseBuffer(frame_to_render, 0);
(...)
But, to send the sound to the WASAPI capture buffer I need a BYTE*.
How can I fill my 'floatTo' argument?
Any ideas?
Thanks
What about this:
void CAudioConverter::ConvI16ToF32(BYTE* pcmFrom, BYTE* floatTo, int length)
{
short* src = reinterpret_cast<short*>(pcmFrom);
float* dst = reinterpret_cast<float*>(floatTo);
for (int n = 0; n < length; n++)
{
dst[n] = static_cast<float>(src[n]) / 32768.0f;
}
}
Additionally make sure length indicates the number of elments in pcmFrom and floatTo, and not the number of bytes allocated. In you case pcmFrom should have allocated length*2 bytes and floatTo needs room for length*4 bytes.
so, my end game is getting album art from my audio files. i'm using taglib and c++. i've found info about how to extract embedded image data, but it seems windows media player does not embed the image itself. instead, it saves a jpg named something like AlbumArt_{E3208100-4FAA-4030-BB9D-6DA5F9D93D18}_Large.jpg. clearly it's using a guid, which i believe it saves in a PRIV tag. my question to you folks is how can i get to it? i thought something like this might work:
ID3v2::PrivateFrame* privFrame = static_cast<ID3v2::PrivateFrame*>(*privIter);
if(privFrame != NULL)
{
std::string owner = privFrame->owner().toCString();
if (owner == "WM/WMCollectionID" || owner == "WM/WMCollectionGroupID")
{
const char* data = privFrame->render().data();
GUID guid;
memcpy(&guid.Data1, data, sizeof(long));
}
}
but data doesn't seem to have anything useful. any ideas?
for those with similar problems, as far as i can tell taglib's PrivateFrame is broken. thus, i have rolled my own:
bool MetadataReader::getAlbumGUID(const char* filename, GUID &guid)
{
const int HEADER_LENGTH = 10;
const int TAG_INDICATOR_LENGTH = 3;
const string TAG_TYPE = "ID3";
const int TAG_MAJOR = 3;
const int TAG_MINOR = 0;
const int HEADER_SIZE_BEGIN = 6;
const int HEADER_SIZE_END = 9;
const int EXTENDED_HEADER_POS = 5;
const int EXTENDED_HEADER_SIZE_LENGTH = 4;
const int FRAME_HEADER_LENGTH = 10;
const int FRAME_ID_LENGTH = 4;
const string FRAME_MEDIA_PLAYER_OWNER1 = "WM/WMCollectionID";
const string FRAME_MEDIA_PLAYER_OWNER2 = "WM/WMCollectionGroupID";
const string FRAME_MEDIA_PLAYER_ID = "PRIV";
const int FRAME_SIZE_BEGIN = 4;
const int FRAME_SIZE_END = 7;
bool retValue = false;
try
{
// read in file
ifstream infile(filename, ios::binary);
infile.seekg(0, ios::beg);
char header[HEADER_LENGTH];
infile.read(header, HEADER_LENGTH);
string tagType(TAG_INDICATOR_LENGTH, '0');
for (int i = 0; i < TAG_INDICATOR_LENGTH; i++)
{
tagType[i] = header[i];
}
int majorVersion = (int)header[TAG_INDICATOR_LENGTH];
int minorVersion = (int)header[TAG_INDICATOR_LENGTH + 1];
if (tagType == TAG_TYPE && majorVersion == TAG_MAJOR && minorVersion == TAG_MINOR)
{
// get extended header bit
bool extHeader = (header[EXTENDED_HEADER_POS] & 0x0010000);
// get size of extended header
int extHeaderSize = 0;
if (extHeader)
{
char extHeaderData[EXTENDED_HEADER_SIZE_LENGTH];
infile.seekg(HEADER_LENGTH, ios::beg);
infile.read(extHeaderData, EXTENDED_HEADER_SIZE_LENGTH);
for (int i = 0; i <= EXTENDED_HEADER_SIZE_LENGTH; i++)
{
extHeaderSize = extHeaderSize << 8;
extHeaderSize |= (unsigned char)extHeaderData[i];
}
extHeaderSize += EXTENDED_HEADER_SIZE_LENGTH;
}
// get size of tag.
// 4 bytes with the most significant bit ignored
int tagSize = 0;
for (int i = HEADER_SIZE_BEGIN; i <= HEADER_SIZE_END; i++)
{
tagSize = tagSize << 7;
tagSize |= (header[i] & 0x7f);
}
// tagSize includes extended header
tagSize += HEADER_LENGTH;
// read tags
int currPos = HEADER_LENGTH + extHeaderSize;
while (currPos < tagSize)
{
infile.seekg(currPos, ios::beg);
char frameHeader[FRAME_HEADER_LENGTH];
infile.read(frameHeader, FRAME_HEADER_LENGTH);
// frame id
string frameType(FRAME_ID_LENGTH, '0');
for (int i = 0; i < FRAME_ID_LENGTH; i++)
{
frameType[i] = frameHeader[i];
}
// frame length
int frameLength = 0;
for (int i = FRAME_SIZE_BEGIN; i <= FRAME_SIZE_END; i++)
{
frameLength = frameLength << 8;
frameLength |= (unsigned char)frameHeader[i];
}
if (frameType == FRAME_MEDIA_PLAYER_ID)
{
char* frameData = new char[frameLength];
infile.read(frameData, frameLength);
string owner = frameData;
if (owner == FRAME_MEDIA_PLAYER_OWNER1 || owner == FRAME_MEDIA_PLAYER_OWNER2)
{
memcpy(&guid, frameData + owner.length() + 1, sizeof(GUID));
retValue = true;
}
delete[] frameData;
}
currPos += FRAME_HEADER_LENGTH + frameLength;
}
infile.close();
}
}
catch (...)
{
retValue = false;
}
return retValue;
}