I am trying to play mp3/wma using xaudio2. I managed to use the Media Foundation Source Reader object to do the decoding. My problem is, it is not playing the full audio; I could get only a part of the audio played.
What I am trying to do is, get the next sample from IMFSourceReader and submit this as the next buffer of sourcevoice. This is repeated util all the data is read from IMFSourceReader.
while (true)
{
DWORD dwFlags = 0;
// Read the next sample.
hr = pReader->ReadSample(
(DWORD)MF_SOURCE_READER_FIRST_AUDIO_STREAM,
0, NULL, &dwFlags, NULL, &pSample );
if (dwFlags & MF_SOURCE_READERF_CURRENTMEDIATYPECHANGED)
{
printf("Type change - not supported by WAVE file format.\n");
break;
}
if (dwFlags & MF_SOURCE_READERF_ENDOFSTREAM)
{
printf("End of input file.\n");
break;
}
if (pSample == NULL)
{
printf("No sample\n");
continue;
}
// Get a pointer to the audio data in the sample.
hr = pSample->ConvertToContiguousBuffer(&pBuffer);
if (FAILED(hr)) { break; }
hr = pBuffer->Lock(&pAudioData, NULL, &cbBuffer);
if (FAILED(hr)) { break; }
// Make sure not to exceed the specified maximum size.
if (cbMaxAudioData - cbAudioData < cbBuffer)
{
cbBuffer = cbMaxAudioData - cbAudioData;
}
// Write this data to the output file.
hr = WriteToFile(hFile, pAudioData, cbBuffer);
int audioBufferLength = cbBuffer;
if (FAILED(hr)) { break; }
SubmitBuffer(pAudioData, audioBufferLength);
// Unlock the buffer.
hr = pBuffer->Unlock();
pAudioData = NULL;
if (FAILED(hr)) { break; }
// Update running total of audio data.
cbAudioData += cbBuffer;
if (cbAudioData >= cbMaxAudioData)
{
break;
}
SafeRelease(&pSample);
SafeRelease(&pBuffer);
}
void AudioDecoder::SubmitBuffer(byte *pAudioData, int audioBufferLength)
{
byte * pAudioBuffer = new byte[audioBufferLength];
CopyMemory(pAudioBuffer, pAudioData, audioBufferLength);
if (pAudioBuffer != nullptr)
{
// Create an XAUDIO2_BUFFER for submitting audio data
XAUDIO2_BUFFER buffer = {0};
buffer.AudioBytes = audioBufferLength;
buffer.pAudioData = pAudioBuffer;
buffer.pContext = pAudioBuffer;
HRESULT hresult = m_pSourceVoice->SubmitSourceBuffer(&buffer);
}
}
After this I am calling m_pSourceVoice->Start(). This will start the audio, but not playing the full audio. Do I need to add anything else?
This loop doesn't look like it accounts for if any buffers have been completed before submitting more, so could be running into the limit of XAUDIO2_MAX_QUEUED_BUFFERS. Can you create a counter on your while loop to see how many buffers are submitted to the source voice?
If you've hit a limit you could start playback before fully decoding the file and submit additional buffers via source voice callbacks.
http://msdn.microsoft.com/en-us/library/windows/desktop/ee415769(v=vs.85).aspx
Related
I'm working on a WASAPI UWP audio application with cpp/winrt which needs to take audio from an input and send it to an output after being processed.
I want to set my audio thread characteristics with AvSetMmThreadCharacteristicsW(L"Pro Audio", &taskIndex), but I just noticed this function (and most of avrt.h) is limited to WINAPI_PARTITION_DESKTOP and WINAPI_PARTITION_GAMES.
I think I need this because when my code is integrated into my UWP app, the audio input is full of discontinuity, and I have no issue in my test code which uses the avrt API.
Is there another way to configure my thread for audio processing?
Edit: here is my test program https://github.com/loics2/test-wasapi. The interesting part happens in the AudioStream class. I can't share my UWP app, but I can copy as is these classes into a Windows Runtime Component.
Edit 2: here's the audio thread code :
void AudioStream::StreamWorker()
{
WAVEFORMATEX* captureFormat = nullptr;
WAVEFORMATEX* renderFormat = nullptr;
RingBuffer<float> captureBuffer;
RingBuffer<float> renderBuffer;
BYTE* streamBuffer = nullptr;
unsigned int streamBufferSize = 0;
unsigned int bufferFrameCount = 0;
unsigned int numFramesPadding = 0;
unsigned int inputBufferSize = 0;
unsigned int outputBufferSize = 0;
DWORD captureFlags = 0;
winrt::hresult hr = S_OK;
// m_inputClient is a winrt::com_ptr<IAudioClient3>
if (m_inputClient) {
hr = m_inputClient->GetMixFormat(&captureFormat);
// m_audioCaptureClient is a winrt::com_ptr<IAudioCaptureClient>
if (!m_audioCaptureClient) {
hr = m_inputClient->Initialize(
AUDCLNT_SHAREMODE_SHARED,
AUDCLNT_STREAMFLAGS_EVENTCALLBACK,
0,
0,
captureFormat,
nullptr);
hr = m_inputClient->GetService(__uuidof(IAudioCaptureClient), m_audioCaptureClient.put_void());
hr = m_inputClient->SetEventHandle(m_inputReadyEvent.get());
hr = m_inputClient->Reset();
hr = m_inputClient->Start();
}
}
hr = m_inputClient->GetBufferSize(&inputBufferSize);
// multiplying the buffer size by the number of channels
inputBufferSize *= 2;
// m_outputClient is a winrt::com_ptr<IAudioClient3>
if (m_outputClient) {
hr = m_outputClient->GetMixFormat(&renderFormat);
// m_audioRenderClientis a winrt::com_ptr<IAudioRenderClient>
if (!m_audioRenderClient) {
hr = m_outputClient->Initialize(
AUDCLNT_SHAREMODE_SHARED,
AUDCLNT_STREAMFLAGS_EVENTCALLBACK,
0,
0,
captureFormat,
nullptr);
hr = m_outputClient->GetService(__uuidof(IAudioRenderClient), m_audioRenderClient.put_void());
hr = m_outputClient->SetEventHandle(m_outputReadyEvent.get());
hr = m_outputClient->Reset();
hr = m_outputClient->Start();
}
}
hr = m_outputClient->GetBufferSize(&outputBufferSize);
// multiplying the buffer size by the number of channels
outputBufferSize *= 2;
while (m_isRunning)
{
// ===== INPUT =====
// waiting for the capture event
WaitForSingleObject(m_inputReadyEvent.get(), INFINITE);
// getting the input buffer data
hr = m_audioCaptureClient->GetNextPacketSize(&bufferFrameCount);
while (SUCCEEDED(hr) && bufferFrameCount > 0) {
m_audioCaptureClient->GetBuffer(&streamBuffer, &bufferFrameCount, &captureFlags, nullptr, nullptr);
if (bufferFrameCount != 0) {
captureBuffer.write(reinterpret_cast<float*>(streamBuffer), bufferFrameCount * 2);
hr = m_audioCaptureClient->ReleaseBuffer(bufferFrameCount);
if (FAILED(hr)) {
m_audioCaptureClient->ReleaseBuffer(0);
}
}
else
{
m_audioCaptureClient->ReleaseBuffer(0);
}
hr = m_audioCaptureClient->GetNextPacketSize(&bufferFrameCount);
}
// ===== CALLBACK =====
auto size = captureBuffer.size();
float* userInputData = (float*)calloc(size, sizeof(float));
float* userOutputData = (float*)calloc(size, sizeof(float));
captureBuffer.read(userInputData, size);
OnData(userInputData, userOutputData, size / 2, 2, 48000);
renderBuffer.write(userOutputData, size);
free(userInputData);
free(userOutputData);
// ===== OUTPUT =====
// waiting for the render event
WaitForSingleObject(m_outputReadyEvent.get(), INFINITE);
// getting information about the output buffer
hr = m_outputClient->GetBufferSize(&bufferFrameCount);
hr = m_outputClient->GetCurrentPadding(&numFramesPadding);
// adjust the frame count with the padding
bufferFrameCount -= numFramesPadding;
if (bufferFrameCount != 0) {
hr = m_audioRenderClient->GetBuffer(bufferFrameCount, &streamBuffer);
auto count = (bufferFrameCount * 2);
if (renderBuffer.read(reinterpret_cast<float*>(streamBuffer), count) < count) {
// captureBuffer is not full enough, we should fill the remainder with 0
}
hr = m_audioRenderClient->ReleaseBuffer(bufferFrameCount, 0);
if (FAILED(hr)) {
m_audioRenderClient->ReleaseBuffer(0, 0);
}
}
else
{
m_audioRenderClient->ReleaseBuffer(0, 0);
}
}
exit:
// Cleanup code
}
I removed the error handling code for clarity, most of it is :
if (FAILED(hr))
goto exit;
#IInspectable was right, there's something wrong with my code : the audio processing is done by a library which then calls callbacks with some results.
In my callback, I try to raise a winrt::event, but it sometimes takes more than 50ms. When it happens, it blocks the audio thread, and creates discontinuity...
As a college project we have to develop a Server-Client music streaming application using the DirectSound API. However, due to lack of information, guides or tutorials online, the only source I can gather info about it is the piece of code provided below (which was the only thing provided by the lecturer). Can anyone help me understand the general purpose of these functions and the order they should be implemented in?
Thanks in advance.
IDirectSound8 * directSound = nullptr;
IDirectSoundBuffer * primaryBuffer = nullptr;
IDirectSoundBuffer8 * secondaryBuffer = nullptr;
BYTE * dataBuffer = nullptr;
DWORD dataBufferSize;
DWORD averageBytesPerSecond;
// Search the file for the chunk we want
// Returns the size of the chunk and its location in the file
HRESULT FindChunk(HANDLE fileHandle, FOURCC fourcc, DWORD & chunkSize, DWORD & chunkDataPosition)
{
HRESULT hr = S_OK;
DWORD chunkType;
DWORD chunkDataSize;
DWORD riffDataSize = 0;
DWORD fileType;
DWORD bytesRead = 0;
DWORD offset = 0;
if (SetFilePointer(fileHandle, 0, NULL, FILE_BEGIN) == INVALID_SET_FILE_POINTER)
{
return HRESULT_FROM_WIN32(GetLastError());
}
while (hr == S_OK)
{
if (ReadFile(fileHandle, &chunkType, sizeof(DWORD), &bytesRead, NULL) == 0)
{
hr = HRESULT_FROM_WIN32(GetLastError());
}
if (ReadFile(fileHandle, &chunkDataSize, sizeof(DWORD), &bytesRead, NULL) == 0)
{
hr = HRESULT_FROM_WIN32(GetLastError());
}
switch (chunkType)
{
case fourccRIFF:
riffDataSize = chunkDataSize;
chunkDataSize = 4;
if (ReadFile(fileHandle, &fileType, sizeof(DWORD), &bytesRead, NULL) == 0)
{
hr = HRESULT_FROM_WIN32(GetLastError());
}
break;
default:
if (SetFilePointer(fileHandle, chunkDataSize, NULL, FILE_CURRENT) == INVALID_SET_FILE_POINTER)
{
return HRESULT_FROM_WIN32(GetLastError());
}
}
offset += sizeof(DWORD) * 2;
if (chunkType == fourcc)
{
chunkSize = chunkDataSize;
chunkDataPosition = offset;
return S_OK;
}
offset += chunkDataSize;
if (bytesRead >= riffDataSize)
{
return S_FALSE;
}
}
return S_OK;
}
// Read a chunk of data of the specified size from the file at the specifed location into the
supplied buffer
HRESULT ReadChunkData(HANDLE fileHandle, void * buffer, DWORD buffersize, DWORD bufferoffset)
{
HRESULT hr = S_OK;
DWORD bytesRead;
if (SetFilePointer(fileHandle, bufferoffset, NULL, FILE_BEGIN) == INVALID_SET_FILE_POINTER)
{
return HRESULT_FROM_WIN32(GetLastError());
}
if (ReadFile(fileHandle, buffer, buffersize, &bytesRead, NULL) == 0)
{
hr = HRESULT_FROM_WIN32(GetLastError());
}
return hr;
}
bool Initialise()
{
HRESULT result;
DSBUFFERDESC bufferDesc;
WAVEFORMATEX waveFormat;
// Initialize the direct sound interface pointer for the default sound device.
result = DirectSoundCreate8(NULL, &directSound, NULL);
if (FAILED(result))
{
return false;
}
// Set the cooperative level to priority so the format of the primary sound buffer can be modified.
// We use the handle of the desktop window since we are a console application. If you do write a
// graphical application, you should use the HWnd of the graphical application.
result = directSound->SetCooperativeLevel(GetDesktopWindow(), DSSCL_PRIORITY);
if (FAILED(result))
{
return false;
}
// Setup the primary buffer description.
bufferDesc.dwSize = sizeof(DSBUFFERDESC);
bufferDesc.dwFlags = DSBCAPS_PRIMARYBUFFER | DSBCAPS_CTRLVOLUME;
bufferDesc.dwBufferBytes = 0;
bufferDesc.dwReserved = 0;
bufferDesc.lpwfxFormat = NULL;
bufferDesc.guid3DAlgorithm = GUID_NULL;
// Get control of the primary sound buffer on the default sound device.
result = directSound->CreateSoundBuffer(&bufferDesc, &primaryBuffer, NULL);
if (FAILED(result))
{
return false;
}
// Setup the format of the primary sound bufffer.
// In this case it is a .WAV file recorded at 44,100 samples per second in 16-bit stereo (cd audio
format).
// Really, we should set this up from the wave file format loaded from the file.
waveFormat.wFormatTag = WAVE_FORMAT_PCM;
waveFormat.nSamplesPerSec = 44100;
waveFormat.wBitsPerSample = 16;
waveFormat.nChannels = 2;
waveFormat.nBlockAlign = (waveFormat.wBitsPerSample / 8) * waveFormat.nChannels;
waveFormat.nAvgBytesPerSec = waveFormat.nSamplesPerSec * waveFormat.nBlockAlign;
waveFormat.cbSize = 0;
// Set the primary buffer to be the wave format specified.
result = primaryBuffer->SetFormat(&waveFormat);
if (FAILED(result))
{
return false;
}
return true;
}
void Shutdown()
{
// Destroy the data buffer
if (dataBuffer != nullptr)
{
delete[] dataBuffer;
dataBuffer = nullptr;
}
// Release the primary sound buffer pointer.
if (primaryBuffer != nullptr)
{
primaryBuffer->Release();
primaryBuffer = nullptr;
}
// Release the direct sound interface pointer.
if (directSound != nullptr)
{
directSound->Release();
directSound = nullptr;
}
}
// Load the wave file into memory and setup the secondary buffer.
bool LoadWaveFile(TCHAR * filename)
{
WAVEFORMATEXTENSIBLE wfx = { 0 };
WAVEFORMATEX waveFormat;
DSBUFFERDESC bufferDesc;
HRESULT result;
IDirectSoundBuffer * tempBuffer;
DWORD chunkSize;
DWORD chunkPosition;
DWORD filetype;
HRESULT hr = S_OK;
// Open the wave file
HANDLE fileHandle = CreateFile(filename, GENERIC_READ, FILE_SHARE_READ, NULL, OPEN_EXISTING, 0,
NULL);
if (fileHandle == INVALID_HANDLE_VALUE)
{
return false;
}
if (SetFilePointer(fileHandle, 0, NULL, FILE_BEGIN) == INVALID_SET_FILE_POINTER)
{
return false;
}
// Make sure we have a RIFF wave file
FindChunk(fileHandle, fourccRIFF, chunkSize, chunkPosition);
ReadChunkData(fileHandle, &filetype, sizeof(DWORD), chunkPosition);
if (filetype != fourccWAVE)
{
return false;
}
// Locate the 'fmt ' chunk, and copy its contents into a WAVEFORMATEXTENSIBLE structure.
FindChunk(fileHandle, fourccFMT, chunkSize, chunkPosition);
ReadChunkData(fileHandle, &wfx, chunkSize, chunkPosition);
// Find the audio data chunk
FindChunk(fileHandle, fourccDATA, chunkSize, chunkPosition);
dataBufferSize = chunkSize;
// Read the audio data from the 'data' chunk. This is the data that needs to be copied into
// the secondary buffer for playing
dataBuffer = new BYTE[dataBufferSize];
ReadChunkData(fileHandle, dataBuffer, dataBufferSize, chunkPosition);
CloseHandle(fileHandle);
// Set the wave format of the secondary buffer that this wave file will be loaded onto.
// The value of wfx.Format.nAvgBytesPerSec will be very useful to you since it gives you
// an approximate value for how many bytes it takes to hold one second of audio data.
waveFormat.wFormatTag = wfx.Format.wFormatTag;
waveFormat.nSamplesPerSec = wfx.Format.nSamplesPerSec;
waveFormat.wBitsPerSample = wfx.Format.wBitsPerSample;
waveFormat.nChannels = wfx.Format.nChannels;
waveFormat.nBlockAlign = wfx.Format.nBlockAlign;
waveFormat.nAvgBytesPerSec = wfx.Format.nAvgBytesPerSec;
waveFormat.cbSize = 0;
// Set the buffer description of the secondary sound buffer that the wave file will be loaded onto.
// In this example, we setup a buffer the same size as that of the audio data. For the assignment,
// your secondary buffer should only be large enough to hold approximately four seconds of data.
bufferDesc.dwSize = sizeof(DSBUFFERDESC);
bufferDesc.dwFlags = DSBCAPS_CTRLVOLUME | DSBCAPS_GLOBALFOCUS | DSBCAPS_CTRLPOSITIONNOTIFY;
bufferDesc.dwBufferBytes = dataBufferSize;
bufferDesc.dwReserved = 0;
bufferDesc.lpwfxFormat = &waveFormat;
bufferDesc.guid3DAlgorithm = GUID_NULL;
// Create a temporary sound buffer with the specific buffer settings.
result = directSound->CreateSoundBuffer(&bufferDesc, &tempBuffer, NULL);
if (FAILED(result))
{
return false;
}
// Test the buffer format against the direct sound 8 interface and create the secondary buffer.
result = tempBuffer->QueryInterface(IID_IDirectSoundBuffer8, (void**)&secondaryBuffer);
if (FAILED(result))
{
return false;
}
// Release the temporary buffer.
tempBuffer->Release();
tempBuffer = nullptr;
return true;
}
void ReleaseSecondaryBuffer()
{
// Release the secondary sound buffer.
if (secondaryBuffer != nullptr)
{
(secondaryBuffer)->Release();
secondaryBuffer = nullptr;
}
}
bool PlayWaveFile()
{
HRESULT result;
unsigned char * bufferPtr1;
unsigned long bufferSize1;
unsigned char * bufferPtr2;
unsigned long bufferSize2;
BYTE * dataBufferPtr = dataBuffer;
DWORD soundBytesOutput = 0;
bool fillFirstHalf = true;
LPDIRECTSOUNDNOTIFY8 directSoundNotify;
DSBPOSITIONNOTIFY positionNotify[2];
// Set position of playback at the beginning of the sound buffer.
result = secondaryBuffer->SetCurrentPosition(0);
if (FAILED(result))
{
return false;
}
// Set volume of the buffer to 100%.
result = secondaryBuffer->SetVolume(DSBVOLUME_MAX);
if (FAILED(result))
{
return false;
}
// Create an event for notification that playing has stopped. This is only useful
// when your audio file fits in the entire secondary buffer (as in this example).
// For the assignment, you are going to need notifications when the playback has reached the
// first quarter of the buffer or the third quarter of the buffer so that you know when
// you should copy more data into the secondary buffer.
HANDLE playEventHandles[1];
playEventHandles[0] = CreateEvent(NULL, FALSE, FALSE, NULL);
result = secondaryBuffer->QueryInterface(IID_IDirectSoundNotify8, (LPVOID*)&directSoundNotify);
if (FAILED(result))
{
return false;
}
// This notification is used to indicate that we have finished playing the buffer of audio. In
// the assignment, you will need two different notifications as mentioned above.
positionNotify[0].dwOffset = DSBPN_OFFSETSTOP;
positionNotify[0].hEventNotify = playEventHandles[0];
directSoundNotify->SetNotificationPositions(1, positionNotify);
directSoundNotify->Release();
// Now we can fill our secondary buffer and play it. In the assignment, you will not be able to fill
// the buffer all at once since the secondary buffer will not be large enough. Instead, you will need to
// loop through the data that you have retrieved from the server, filling different sections of the
// secondary buffer as you receive notifications.
// Lock the first part of the secondary buffer to write wave data into it. In this case, we lock the entire
// buffer, but for the assignment, you will only want to lock the half of the buffer that is not being played.
// You will definately want to look up the methods for the IDIRECTSOUNDBUFFER8 interface to see what these
// methods do and what the parameters are used for.
result = secondaryBuffer->Lock(0, dataBufferSize, (void**)&bufferPtr1, (DWORD*)&bufferSize1, (void**)&bufferPtr2, (DWORD*)&bufferSize2, 0);
if (FAILED(result))
{
return false;
}
// Copy the wave data into the buffer. If you need to insert some silence into the buffer, insert values of 0.
memcpy(bufferPtr1, dataBuffer, bufferSize1);
if (bufferPtr2 != NULL)
{
memcpy(bufferPtr2, dataBuffer, bufferSize2);
}
// Unlock the secondary buffer after the data has been written to it.
result = secondaryBuffer->Unlock((void*)bufferPtr1, bufferSize1, (void*)bufferPtr2, bufferSize2);
if (FAILED(result))
{
return false;
}
// Play the contents of the secondary sound buffer. If you want play to go back to the start of the buffer
// again, set the last parameter to DSBPLAY_LOOPING instead of 0. If play is already in progress, then
// play will just continue.
result = secondaryBuffer->Play(0, 0, 0);
if (FAILED(result))
{
return false;
}
// Wait for notifications. In this case, we only have one notification so we could use WaitForSingleObject,
// but for the assignment you will need more than one notification, so you will need WaitForMultipleObjects
result = WaitForMultipleObjects(1, playEventHandles, FALSE, INFINITE);
// In this case, we have been notified that playback has finished so we can just finish. In the assignment,
// you should use the appropriate notification to determine which part of the secondary buffer needs to be
// filled and handle it accordingly.
CloseHandle(playEventHandles[0]);
return true;
}
DirectSound is deprecated. See below for recommended replacements.
Documentation can be found on Microsoft Docs. The last time samples for DirectSound were shipped was in the legacy DirectX SDK (November 2007) release which is why you are having a hard time finding them. You can find them on GitHub. The headers and link libraries for DirectSound are in the Windows SDK.
Recommendations
For 'real-time mixing and effects' often used in games, the modern replacement is XAudio2. XAudio 2.9 is included in Windows 10, and is available through a simple side-by-side redistribution model for Windows 7, Windows 8.0, and Windows 8.1. Documentation can be found here, samples can be found here, and the
redist can be found here. You may also want to take a look at DirectX Tool Kit for Audio.
For other audio output and input, see Windows Core Audio APIs (WASAPI) which is supported on Windows Vista, Windows 7, Windows 8.0, Windows 8.1, and Windows 10. Documentation can be found here. Some samples can be found on GitHub in Xbox-ATG-Samples and Windows-universal-samples--while these are all UWP samples, the API also supports Win32 desktop.
There's also a new Microsoft Spatial Sounds API on Windows 10 (a.k.a. Windows Sonic). Documentation can be found here. Samples can be found on GitHub in Xbox-ATG-Samples.
I am implementing sample application using Windows Media Foundation.
I have created one example application as described in below link:
https://msdn.microsoft.com/en-us/library/windows/desktop/ms703190(v=vs.85).aspx
In the above example I have added two video streams using MFCreateAggregateSource.
In the EVR renderer I am able to hear audio of both the videos but I am not able to see only one reference stream video or which is first loaded.
As per the below link,
https://msdn.microsoft.com/en-us/library/windows/desktop/aa965265(v=vs.85).aspx
The EVR media sink initially has one stream sink, which corresponds to the reference stream. To add new stream sinks, call IMFMediaSink::AddStreamSink.
In my application I am using MFCreateVideoRendererActivate.
How can I use IMFMediaSink::AddStreamSink to add streams to my EVR.
So that I can see two video stream playing in one renderer.
** Update **
I have modified below example code and added code to add video
HRESULT CreateMediaSinkActivate(
IMFStreamDescriptor *pSourceSD, // Pointer to the stream descriptor.
DWORD iStream,
HWND hVideoWindow, // Handle to the video clipping window.
IMFActivate **ppActivate
)
{
IMFMediaTypeHandler *pHandler = NULL;
IMFActivate *pActivate = NULL;
// Get the media type handler for the stream.
HRESULT hr = pSourceSD->GetMediaTypeHandler(&pHandler);
if (FAILED(hr))
{
goto done;
}
// Get the major media type.
GUID guidMajorType;
hr = pHandler->GetMajorType(&guidMajorType);
if (FAILED(hr))
{
goto done;
}
// Create an IMFActivate object for the renderer, based on the media type.
if (MFMediaType_Audio == guidMajorType)
{
// Create the audio renderer.
hr = MFCreateAudioRendererActivate(&pActivate);
}
else if (MFMediaType_Video == guidMajorType) // Added this else if case
{
// Create the video renderer.
hr = MFCreateVideoRendererActivate(hVideoWindow, &pActivate);
IMFMediaSink* pVideoSink = NULL;
HRESULT hrMS = pActivate->ActivateObject(IID_IMFMediaSink, (void**)&pVideoSink);
if (SUCCEEDED(hrMS))
{
IMFStreamSink* pStreamSink = NULL;
hrMS = pVideoSink->AddStreamSink(iStream, NULL, &pStreamSink);
if (SUCCEEDED(hrMS))
{
DWORD dwID=10;
hrMS = pStreamSink->GetIdentifier(&dwID);
if (SUCCEEDED(hrMS))
{
printf("\n%d", dwID);
SafeRelease(&pStreamSink);
}
}
}
}
else
{
// Unknown stream type.
hr = E_FAIL;
// Optionally, you could deselect this stream instead of failing.
}
if (FAILED(hr))
{
goto done;
}
// Return IMFActivate pointer to caller.
*ppActivate = pActivate;
(*ppActivate)->AddRef();
done:
SafeRelease(&pHandler);
SafeRelease(&pActivate);
return hr;
}
But problem is that I am not able to see two video stream in the video window.
I am currently trying to use BlackMagic SDK with a stero camera using a DeckLink 4K pro card on Linux.
I already used it with a monocular one and successfully get the image and convert it in an opencv::mat but no matter what I try with this one I have nothing but empty matrix corresponding to left and right frames. I suspect there is an issue with the mode and/or flags I use but I cannot figure it out... When I try to see the frames with mediaExpress, I have satisfying results so I guess it is not a hardware problem.
I tried to do something that look like to the different sample given by the SDK and I convert the final frames in opencv mat to see the result. When !isStereo, eveything goes well and I get a fusion of both right and left frames but when it's true, I have empty matrix for both left and right frames.
Here is the corresponding part of my code (the things commented are things I tried: definitively, I am almost sure it's something linked with the flags I am using in g_deckLinkInput->EnableVideoInput(...) ):
...
void VideoInputFromBlackMagic::runInput(){
this->running=true;
int m_deckLinkIndex;
int idx;
//Check result
HRESULT result;
IDeckLink* deckLink = NULL;
IDeckLinkInput* g_deckLinkInput = NULL;
IDeckLinkAttributes* deckLinkAttributes = NULL;
IDeckLinkIterator* deckLinkIterator = CreateDeckLinkIteratorInstance();
IDeckLinkInputCallback* callBack;
IDeckLinkDisplayModeIterator* displayModeIterator = NULL;
IDeckLinkDisplayMode* displayMode = NULL;
char* displayModeName = NULL;
BMDDisplayModeSupport displayModeSupported;
bool formatDetectionSupported;
if (!deckLinkIterator)
{
fprintf(stderr, "This application requires the DeckLink drivers installed.\n");
return;
}
//Get the DeckLink Inputs
result = deckLinkIterator->Next(&deckLink);
result = deckLink->QueryInterface(IID_IDeckLinkInput, (void**)&g_deckLinkInput);
if(result != S_OK){
fprintf(stdout, "Cannot get the Input : DeckLink Error\n");
return;
}
//Get the DeckLink attributes (that may not correctly work: format detection does not properly work)
result = deckLink->QueryInterface(IID_IDeckLinkAttributes, (void**)&deckLinkAttributes);
if (!(result == S_OK)){
fprintf(stdout, "Cannot get the DeckLink attributes : DeckLink Error\n");
return;
}
//Format detection
result = deckLinkAttributes->GetFlag(BMDDeckLinkSupportsInputFormatDetection, &formatDetectionSupported);
if (result != S_OK || !formatDetectionSupported){
fprintf(stdout,"Cannot get the format input: DeckLink Error\n");
return;
}
//Index for the different inputs
idx = 0;
//Get all the displayModes
result = g_deckLinkInput->GetDisplayModeIterator(&displayModeIterator);
if (result != S_OK){
fprintf(stdout,"Cannot set an iterator on the different display modes: DeckLink Error\n");
}
//Set idx
while ((result = displayModeIterator->Next(&displayMode)) == S_OK)
{
if (idx == 0)
break;
--idx;
displayMode->Release();
}
if (result != S_OK || displayMode == NULL){
fprintf(stdout,"Cannot get the main display mode: DeckLink Error\n");
return;
}
//Get Mode name: useless
result = displayMode->GetName((const char**)&displayModeName);
// Check display mode is supported with given options
if(this->isStereo){
//result = g_deckLinkInput->DoesSupportVideoMode(bmdModeHD1080p30, bmdFormat8BitYUV, bmdDisplayModeSupports3D | bmdDisplayModeColorspaceRec709, &displayModeSupported, NULL);
//result = g_deckLinkInput->DoesSupportVideoMode(bmdModeHD1080p30, bmdFormat8BitYUV, bmdDisplayModeColorspaceRec709 & bmdDisplayModeSupports3D, &displayModeSupported, NULL);
result = g_deckLinkInput->DoesSupportVideoMode(bmdModeHD1080p30, bmdFormat8BitYUV, bmdVideoInputDualStream3D, &displayModeSupported, NULL);
//result = g_deckLinkInput->DoesSupportVideoMode(bmdModeHD1080p30, bmdFormat8BitYUV, bmdDisplayModeColorspaceRec709 | bmdVideoInputDualStream3D, &displayModeSupported, NULL);
} else {
result = g_deckLinkInput->DoesSupportVideoMode(bmdModeHD1080p30, bmdFormat8BitYUV, bmdDisplayModeColorspaceRec709, &displayModeSupported, NULL);
}
if (result != S_OK){
fprintf(stdout,"Video Mode not supported : aborted\n");
return;
}
if (displayModeSupported == bmdDisplayModeNotSupported)
{
fprintf(stdout, "The display mode %s is not supported with the selected pixel format\n", displayModeName);
return;
}
//Set the callback on this ( will defined callback on VideoInputFrameArrived and others functions when images arrives or when other events happens
g_deckLinkInput->SetCallback(this);
//Enable the video input with the selected format
if(this->isStereo){
//result = g_deckLinkInput->EnableVideoInput(bmdModeHD1080p30, bmdFormat8BitYUV, bmdDisplayModeSupports3D | bmdDisplayModeColorspaceRec709);
//result = g_deckLinkInput->EnableVideoInput(bmdModeHD1080p30, bmdFormat8BitYUV, bmdDisplayModeColorspaceRec709 & bmdDisplayModeSupports3D);
result = g_deckLinkInput->EnableVideoInput(bmdModeHD1080p30, bmdFormat8BitYUV, bmdVideoInputDualStream3D);
//result = g_deckLinkInput->EnableVideoInput(bmdModeHD1080p30, bmdFormat8BitYUV, bmdDisplayModeColorspaceRec709 | bmdVideoInputDualStream3D);
//result = g_deckLinkInput->EnableVideoInput(bmdModeHD1080p30, bmdFormat8BitYUV, bmd3DPreviewFormatRightOnly);
} else {
result = g_deckLinkInput->EnableVideoInput(bmdModeHD1080p30, bmdFormat8BitYUV, bmdDisplayModeColorspaceRec709);
}
if (result != S_OK)
{
fprintf(stderr, "Failed to enable video input. Maybe another application is using the card.\n");
return;
}
//Disable the audio
result = g_deckLinkInput->DisableAudioInput();
//Start the stream
result = g_deckLinkInput->StartStreams();
if (result != S_OK){
fprintf(stdout,"Error while starting the streaming : aborted\n");
}
while(this->running){
}
}
...
HRESULT VideoInputFromBlackMagic::VideoInputFrameArrived(IDeckLinkVideoInputFrame* videoFrame, IDeckLinkAudioInputPacket* audioFrame){
if (!videoFrame){
fprintf(stdout,"Update: No video frame\n");
return S_FALSE;
}
void* data;
void* dataRight;
IDeckLinkVideoFrame3DExtensions* threeDExtensions = NULL;
IDeckLinkVideoFrame* rightEyeFrame = NULL;
if (FAILED(videoFrame->GetBytes(&data))){
fprintf(stdout,"Fail obtaining the data from videoFrame\n");
return S_FALSE;
}
if( this->isStereo ){
if ((videoFrame->QueryInterface(IID_IDeckLinkVideoFrame3DExtensions, (void **) &threeDExtensions) != S_OK) || (threeDExtensions->GetFrameForRightEye(&rightEyeFrame) != S_OK)){
fprintf(stdout,"Fail obtaining right eye frame\n");
return S_FALSE;
}
if (FAILED(rightEyeFrame->GetBytes(&dataRight))){
fprintf(stdout,"Fail obtaining the data from videoFrame\n");
return S_FALSE;
}
}
cv::Mat loadedImage;
cv::Mat mat = cv::Mat(videoFrame->GetHeight(), videoFrame->GetWidth(), CV_8UC2, data, videoFrame->GetRowBytes());
cv::cvtColor(mat, loadedImage, CV_YUV2BGR_UYVY);
//Right eye
cv::Mat loadedImageRight;
cv::Mat matRight;
if(this->isStereo){
matRight = cv::Mat(rightEyeFrame->GetHeight(), rightEyeFrame->GetWidth(), CV_8UC2, dataRight, rightEyeFrame->GetRowBytes());
if (rightEyeFrame){
rightEyeFrame->Release();
}
cv::cvtColor(matRight, loadedImageRight, CV_YUV2BGR_UYVY);
}
if (!loadedImage.data){
fprintf(stdout,"No frame loaded from the video : mainImage will not be updated\n");
} else {
this->currentImage = loadedImage;
//this->currentImage = loadedImageRight;
if(this->isStereo){
//this->currentImageRight = loadedImageRight;
this->currentImageRight = loadedImage;
}
this->initialized = true;
if(this->debugMode){
if(this->isStereo){
cv::imshow("DEBUG : right eye", this->currentImageRight);
cv::imshow("DEBUG : left eye", this->currentImage);
} else {
cv::imshow("DEBUG", this->currentImage);
}
}
if(this->debugMode){
int kp = cv::waitKey(1);
if(kp == 1048603){
//Remove debugMode and remove the window
this->setDebugMode(false);
}
}
}
return S_OK;
}
...
Any idea?
Edit: Are there any tutorials on how to use WIA or TWAIN in c++, that explain how to scan pages, adjust settings (DPI, using automatic feeder etc.) and save them as PNG files?
I'd like to use WIA to scan pages and store them as png files. If the scanner supports automatic feeding I'd also like to use that feature. Currently I am following the steps of this tutorial and am stuck at the section Transferring Image Data in WIA 2.0.
So far my scanner has been found and I am able to create the device, and an IWiaItem2* has been created. How can use it to scan at 300dpi and store the result as png file?
The tutorial is not clear about how to start the scan process or how to set dpi for scanning, so I hope someone can help me with the code.
This is essentially the code for getting all local devices:
bool init(IWiaDevMgr2* devMgr)
{
//creating the device manager
*devMgr = 0;
CoCreateInstance( CLSID_WiaDevMgr2, 0, CLSCTX_LOCAL_SERVER, IID_IWiaDevMgr2, (void**)&devMgr);
//enumerating wia devices
IEnumWIA_DEV_INFO* enumDevInfo = 0;
HRESULT hr = devMgr->EnumDeviceInfo( WIA_DEVINFO_ENUM_LOCAL, &enumDevInfo);
if(SUCCEEDED(hr))
{
//loop until an error occurs or end of list
while(hr == S_OK)
{
IWiaPropertyStorage* storage = 0;
hr = enumDevInfo->Next( 1, &storage, 0);
if(hr == S_OK)
{
readProperties(storage);
storage->Release();
storage = 0;
}
}
//set hr to ok, so no error code is returned
if(hr == S_FALSE) hr = S_OK;
enumDevInfo->Release();
enumDevInfo = 0;
}
return SUCCEEDED(hr);
}
void readProperties(IWiaPropertyStorage* storage)
{
PROPSPEC propSpec[2] = {0};
PROPVARIANT propVar[2] = {0};
const ULONG propCount = sizeof(propSpec) / sizeof(propSpec[0]);
propSpec[0].ulKind = PRSPEC_PROPID;
propSpec[0].propid = WIA_DIP_DEV_ID;
propSpec[1].ulKind = PRSPEC_PROPID;
propSpec[1].propid = WIA_DIP_DEV_NAME;
HRESULT hr = storage->ReadMultiple(propCount, propSpec, propVar);
if(SUCCEEDED(hr))
{
Device* dev = new Device(propVar[0].bstrVal, propVar[1].bstrVal);
devices.push_back( dev );
FreePropVariantArray( propCount, propVar );
}
}
Afterwards a device is initialized like this:
bool createDevice(BSTR id, IWiaItem2** item)
{
*item = 0;
HRESULT hr = devMgr->CreateDevice( 0, deviceId, item);
return SUCCEEDED(hr);
}
Then the items are enumerated:
bool enumerateItems(IWiaItem2* item)
{
LONG itemType = 0;
HRESULT hr = item->GetItemType(&itemType);
if(SUCCEEDED(hr))
{
if(itemType & WiaItemTypeFolder || itemType & WiaItemTypeHasAttachments)
{
IEnumWiaItem2* enumItem = 0;
hr = item->EnumChildItems(0, &enumItem );
while(hr == S_OK)
{
IWiaItem2* child = 0;
hr = enumItem->Next( 1, &child, 0 );
if(hr == S_OK)
{
hr = enumerateItems( child );
child->Release();
child = 0;
}
}
if(hr == S_FALSE) hr = S_OK;
enumItem->Release();
enumItem = 0;
}
}
return SUCCEEDED(hr);
}
Now that everything has been initialized I'd like to implement a scan function. However, the code provided at the tutorial is for transferring files and folders and not for scanning images.
void scanAndSaveAsPNG(IWiaItem2* item, unsigned int dpi, std::string targetPath)
{
}
EDIT:
I installed the latest version available of the scanner driver (WIA and TWAIN) and after checking the supported commands using this code
void printCommands(IWiaItem2* i)
{
IEnumWIA_DEV_CAPS* caps = 0;
HRESULT h = item->EnumDeviceCapabilities(WIA_DEVICE_COMMANDS, &caps);
if(SUCCEEDED(h))
{
ULONG count = 0;
caps->GetCount(&count);
if(count > 0)
{
WIA_DEV_CAP* cap = new WIA_DEV_CAP[ count ];
ULONG fetched;
caps->Next(count, cap, &fetched);
for(int i = 0; i < fetched; i++)
{
std::cout << bstr_t( cap[i].bstrName ) << "\n";
}
}
caps->Release();
}
}
I noticed it only lists WIA Synchronize command. I am not sure if I didn't initialize the device correctly, or if the device doesn't support all WIA commands although the driver is installed.
So unless this problem is solved I am alternatively also looking for the same code based on TWAIN.
You want to use IWiaItem2::DeviceCommand which sends a command to the image capture device. The list of commands you can send are listed here.