I have a DirectShow graph with a "Microsoft DVBT Network Provider", "AVerMedia BDA DVBT Tuner", "AVerMEdia BDA Digital Capture", "Sample Grabber" and "NULL Renderer".
These filters are connected.
Beside that I also have an "MPEG-2 Demultiplexer" and a "BDA MPEG2 Transport Information Filter", but these two filters are NOT connected! It seems like they have to be here in order to run the graph.
When I start the graph, I'm receiving TS data, but no matter what I do, I'm not able to put the tuning request. I can only capture the MUX data from the last tuned frequency with some other application like Windows Media Center.
Here is the code for putting the tune request:
// creating tuning space
CComPtr<IDVBTuningSpace> pDVBTuningSpace;<br>
hr = pDVBTuningSpace.CoCreateInstance( __uuidof( DVBTuningSpace ) );
WCHAR szFriendlyName[ 64 ] = L"Local DVB-T Digital Antenna";<br> BSTR bstrFriendlyName = SysAllocString( szFriendlyName );
hr = pDVBTuningSpace->put_UniqueName( bstrFriendlyName );<br>
hr = pDVBTuningSpace->put_FriendlyName( bstrFriendlyName );
SysFreeString( bstrFriendlyName );
CComBSTR clsid_dvbt = ("{216C62DF-6D7F-4e9a-8571-05F14EDB766A}");<br>
hr = pDVBTuningSpace->put_NetworkType( clsid_dvbt );<br>
hr = pDVBTuningSpace->put_SystemType( DVB_Terrestrial );<br>
// creating tune request<br>
CComPtr<ITuneRequest> pTuneRequest;
hr = pDVBTuningSpace->CreateTuneRequest( &pTuneRequest );
CComQIPtr<IDVBTuneRequest> pDVBTuneRequest( pTuneRequest );
hr = pDVBTuneRequest->put_ONID( -1 );<br>
hr = pDVBTuneRequest->put_TSID( -1 );<br>
hr = pDVBTuneRequest->put_SID( -1 );
// locator<br>
CComPtr<IDVBTLocator> pDVBTLocator;
hr = pDVBTLocator.CoCreateInstance( __uuidof( DVBTLocator ) );<br>
hr = pDVBTLocator->put_Bandwidth( 8 );<br>
hr = pDVBTLocator->put_CarrierFrequency( 506000 );
hr = pDVBTuneRequest->put_Locator( pDVBTLocator );
CComQIPtr<ITuner> pTuner( pNetworkProvider_ );
hr = pTuner->put_TuneRequest( pDVBTuneRequest );
This is executed immediately after adding the "Microsoft DVBT Network Provider" filter in the graph.
All "hr" values from the above code are S_OK.
What am I doing wrong? Or, did I miss something big in this "tune request" thing.
(Bandwidth and frequency values are correct)
I think put_Bandwidth( 8 ) is wrong, it should be a bandwidth in Hz. Anyway, I show you some code I use. Maybe it helps.
HRESULT hr;
CComBSTR TuningName;
hr = pDVBTuningSpace2.CoCreateInstance(CLSID_DVBTuningSpace);
hr = pDVBTuningSpace2->put_SystemType(DVB_Terrestrial);
TuningName = L"My DVB-T";
hr = pDVBTuningSpace2->put__NetworkType(CLSID_DVBTNetworkProvider);
CComPtr <IDVBTLocator> pDVBTLocator;
hr = pDVBTLocator.CoCreateInstance(CLSID_DVBTLocator);
hr = pDVBTLocator->put_CarrierFrequency(config->GetFreq());
hr = pDVBTLocator->put_Bandwidth(config->GetSymbolRate());
hr = pDVBTuningSpace2->put_DefaultLocator(pDVBTLocator);
hr = pDVBTuningSpace2->put_UniqueName(TuningName);
hr = pDVBTuningSpace2->put_FriendlyName(TuningName);
hr = pDVBTuningSpace2->put_FrequencyMapping(L"");
CComPtr <ITuningSpaceContainer> pTuningSpaceContainer;
hr = pTuningSpaceContainer.CoCreateInstance(CLSID_SystemTuningSpaces);
VARIANT tiIndex;
hr = pTuningSpaceContainer->Add(pDVBTuningSpace2,&tiIndex);
if (!SUCCEEDED(hr)) {
// Get the enumerator for the collection.
CComPtr<IEnumTuningSpaces> pTuningSpaceEnum;
hr = pTuningSpaceContainer->get_EnumTuningSpaces(&pTuningSpaceEnum);
if (SUCCEEDED(hr)) {
// Loop through the collection.
CComPtr<ITuningSpace> pTuningSpace;
//ITuningSpace *pTuningSpace;
tiIndex.intVal=0;
while (S_OK == pTuningSpaceEnum->Next(1, &pTuningSpace, NULL)) {
USES_CONVERSION;
BSTR Name;
hr = pTuningSpace->get_UniqueName(&Name);
if (SUCCEEDED(hr)) {
if (wcscmp(OLE2W(Name), TuningName) == 0) {
hr = pTuningSpaceContainer->put_Item(tiIndex,pDVBTuningSpace2);
}
SysFreeString(Name);
}
tiIndex.intVal++;
//pTuningSpace->Release();
pTuningSpace.Release();
}
}
}
CComPtr<ITuneRequest> pTuneRequest;
hr = pDVBTuningSpace2->CreateTuneRequest(&pTuneRequest);
CComQIPtr<IDVBTuneRequest> pDVBTuneRequest(pTuneRequest);
if(pDVBTuneRequest) {
hr = pDVBTuneRequest->put_SID(config->GetSid());
hr = pDVBTuneRequest->put_TSID(config->GetTsid());
hr = pDVBTuneRequest->put_ONID(config->GetOnid());
}
GUID CLSIDNetworkType;
hr = pDVBTuningSpace2->get__NetworkType(&CLSIDNetworkType);
hr = CoCreateInstance(CLSIDNetworkType, NULL, CLSCTX_INPROC_SERVER,
IID_IBaseFilter, (void **) &pNetworkProvider);
hr = graph->AddFilter(pNetworkProvider,L"Network Provider");
// Query for ITuner.
CComQIPtr<ITuner> pTuner(pNetworkProvider);
if (pTuner) {
// Submit the tune request to the network provider.
hr = pTuner->put_TuneRequest(pTuneRequest);
}
hr = graph->AddFilter(pBdaNetworkTuner,L"BDA Source");
hr = ConnectFilters(pNetworkProvider,pBdaNetworkTuner);
CComPtr<IBaseFilter> pBdaReceiver;
hr = FindDevice(KSCATEGORY_BDA_RECEIVER_COMPONENT, &pBdaReceiver, 0, 0, 0);
hr = graph->AddFilter(pBdaReceiver,L"BDA Receiver");
hr = ConnectFilters(pBdaNetworkTuner,pBdaReceiver);
CComPtr<IBaseFilter> pMpegDemux;
hr = pMpegDemux.CoCreateInstance(CLSID_MPEG2Demultiplexer);
hr = graph->AddFilter(pMpegDemux,L"MPEG Demux");
hr = ConnectFilters(pBdaReceiver,pMpegDemux);
You are doing some things in a different order, but I'm not sure if it matters.
Related
I could capture the image from webcam and save it as bitmap by using sampleGrabber. And I know I could use the IAMStreamConfig interface to GetFormat and SetFormat the video resolution. My question is, I using FindInterface() to get IAMStreamConfig* but always failed.Is it because I place it in a wrong place or something else I didn't notice. I placed it before RenderStream. Here are some code below, thanks for your patient and help!
INT USBDeviceApp::GetInterfaces()
{
HRESULT hr;
hr = CoCreateInstance (CLSID_FilterGraph, NULL, CLSCTX_INPROC,
IID_IGraphBuilder, (void **) &pGraphBuilder);
if (FAILED(hr))
return hr;
hr = CoCreateInstance (CLSID_CaptureGraphBuilder2 , NULL, CLSCTX_INPROC,
IID_ICaptureGraphBuilder2, (void **) &pCaptureGraphBuilder2);
if (FAILED(hr))
return hr;
hr = pGraphBuilder->QueryInterface(IID_IMediaControl,(LPVOID *)
&pMediaControl);
if (FAILED(hr))
return hr;
hr = pGraphBuilder->QueryInterface(IID_IVideoWindow, (LPVOID *)
&pVideoWindow);
if(FAILED(hr))
{
return hr;
}
hr = pGraphBuilder->QueryInterface(IID_IMediaEvent,(LPVOID *)
&pMediaEvent);
if(FAILED(hr))
{
return hr;
}
// ------------------------
// Create the Sample Grabber.
hr = CoCreateInstance(CLSID_SampleGrabber, NULL, CLSCTX_INPROC_SERVER,
IID_IBaseFilter, (void**)&pGrabberF);
if (FAILED(hr))
{
return hr;
}
hr = pGrabberF->QueryInterface(IID_ISampleGrabber,
(void**)&pSampleGrabber);
if(FAILED(hr))
{
AfxMessageBox(_T("Error SampleGrabber QueryInterface"));
}
return 1;
}
INT USBDeviceApp::InitMonikers()
{
HRESULT hr;
ULONG cFetched;
ICreateDevEnum *pCreateDevEnum;
hr = CoCreateInstance(CLSID_SystemDeviceEnum, NULL,
CLSCTX_INPROC_SERVER, IID_ICreateDevEnum, (void**)&pCreateDevEnum);
if (FAILED(hr))
{
return hr;
}
IEnumMoniker *pEnumMoniker;
hr = pCreateDevEnum->
CreateClassEnumerator(CLSID_VideoInputDeviceCategory,&pEnumMoniker, 0);
if (FAILED(hr) || !pEnumMoniker)
{
return -1;
}
hr = pEnumMoniker->Next(1, &pMonikerVideo, &cFetched);
if (S_OK == hr)
{
hr = pMonikerVideo->BindToObject(0,0,IID_IBaseFilter,
(void**)&pVideoCaptureFilter);
if (FAILED(hr))
{
return hr;
}
}
pEnumMoniker->Release();
return 1;
}
INT USBDeviceApp::CaptureVideo()
{
HRESULT hr = CoInitialize(NULL);
hr = GetInterfaces();
if (FAILED(hr))
{
AfxMessageBox(_T("Failed to get video interfaces!"));
return hr;
}
hr = pCaptureGraphBuilder2->SetFiltergraph(pGraphBuilder);
if (FAILED(hr))
{
AfxMessageBox(_T("Failed to attach the filter graph to the capture graph!"));
return hr;
}
//IAMStreamConfig *pConfig
hr = pCaptureGraphBuilder2->FindInterface(&PIN_CATEGORY_PREVIEW,
&MEDIATYPE_Video,
pVideoCaptureFilter,IID_IAMStreamConfig, (void **)&pConfig);
if (FAILED(hr))
{
AfxMessageBox(_T("Couldn't initialize IAMStreamConfig!"));
}
else
{////
int iCount = 0,iSize = 0;
hr = pConfig->GetNumberOfCapabilities(&iCount,&iSize);
if(iSize == sizeof(VIDEO_STREAM_CONFIG_CAPS))
{
for(int iFormat = 0;iFormat < iCount;iFormat++)
{
VIDEO_STREAM_CONFIG_CAPS scc;
AM_MEDIA_TYPE *pmtConfig;
hr = pConfig->GetStreamCaps(iFormat, &pmtConfig, (BYTE*)&scc);
if(hr)
{
if((pmtConfig->majortype == MEDIATYPE_Video) &&
(pmtConfig->subtype == MEDIASUBTYPE_RGB24) &&
(pmtConfig->formattype == FORMAT_VideoInfo) &&
(pmtConfig->cbFormat >= sizeof (VIDEOINFOHEADER)) &&
(pmtConfig->pbFormat != NULL))
{
VIDEOINFOHEADER *pVih = (VIDEOINFOHEADER*)pmtConfig->pbFormat;
pVih->bmiHeader.biWidth = 1280;
pVih->bmiHeader.biHeight = 720;
pVih->bmiHeader.biSizeImage = DIBSIZE(pVih->bmiHeader);
hr = pConfig->SetFormat(pmtConfi);
}
DeleteMediaType(pmtConfig);
}
}
}
}////
hr = InitMonikers();
if(FAILED(hr))
{
return hr;
}
hr = pGraphBuilder->AddFilter(pVideoCaptureFilter, L"Video Capture");
if (FAILED(hr))
{
pVideoCaptureFilter->Release();
return hr;
}
AM_MEDIA_TYPE mt;
ZeroMemory(&mt, sizeof(AM_MEDIA_TYPE));
mt.majortype = MEDIATYPE_Video;
mt.subtype = MEDIASUBTYPE_RGB24;
hr = pSampleGrabber->SetMediaType(&mt);
hr = pSampleGrabber->SetOneShot(FALSE);
hr = pSampleGrabber->SetBufferSamples(TRUE);
hr = pGraphBuilder->AddFilter(pGrabberF, L"Sample Grabber");
if (FAILED(hr))
{
return hr;
}
hr = pCaptureGraphBuilder2->RenderStream(&PIN_CATEGORY_PREVIEW, &MEDIATYPE_Video, pVideoCaptureFilter, pGrabberF, 0 );
if (FAILED(hr))
{
pVideoCaptureFilter->Release();
return hr;
}
hr = pSampleGrabber->GetConnectedMediaType( &mt );
if(FAILED( hr ))
{
return -1;
}
VIDEOINFOHEADER * vih = (VIDEOINFOHEADER*) mt.pbFormat;
pVih = (VIDEOINFOHEADER*) mt.pbFormat;
CSampleGrabberCB *CB = new CSampleGrabberCB() ;
if(!FAILED( hr ))
{
CB->Width = vih->bmiHeader.biWidth;
CB->Height = vih->bmiHeader.biHeight;
FreeMediaType( mt );
}
hr = pSampleGrabber->SetCallback( CB, 1 );
pVideoCaptureFilter->Release();
this->SetUpVideoWindow();
hr = pMediaControl->Run();
if (FAILED(hr))
{
return hr;
}
return hr;
}
hr = pCaptureGraphBuilder2->FindInterface()
always failed to get the IAMStreamConfig interface, I really don't know why. Can someone help me, thanks so much!
Your API call below
pCaptureGraphBuilder2->FindInterface(&PIN_CATEGORY_PREVIEW,
&MEDIATYPE_Video,
pVideoCaptureFilter,IID_IAMStreamConfig, (void **) &pConfig);
applies several restrictions to the search, including pin category: you are looking for a preview pin. For example, the graph below features three video capture devices and none of them has the dedicated preview pin: preview pin is optional.
You need to take this into account and either relax the search criteria or I would rather suggest that you locate the pin of your interest directly on the capture filter. Then you will set it and and connect it with downstream peer filters. FindInterface is powerful but it also adds chances to get into confusion.
I have taken a code from the net to capture a frame from a video file and modified to capture all frames and store it as bmp images.
HRESULT GrabVideoBitmap(PCWSTR pszVideoFile)
{
IGraphBuilder *pGraph = NULL;
IMediaControl *pControl = NULL;
IMediaEventEx *pEvent = NULL;
IBaseFilter *pGrabberF = NULL;
ISampleGrabber *pGrabber = NULL;
IBaseFilter *pSourceF = NULL;
IEnumPins *pEnum = NULL;
IPin *pPin = NULL;
IBaseFilter *pNullF = NULL;
long evCode;
wchar_t temp[10];
wchar_t framename[50] = IMAGE_FILE_PATH; // L"D:\\sampleframe";
BYTE *pBuffer = NULL;
HRESULT hr = CoInitialize(NULL);
if (FAILED(hr))
return 0;
hr = CoCreateInstance(CLSID_FilterGraph, NULL, CLSCTX_INPROC_SERVER,
IID_PPV_ARGS(&pGraph));
hr = pGraph->QueryInterface(IID_PPV_ARGS(&pControl));
hr = pGraph->QueryInterface(IID_PPV_ARGS(&pEvent));
// Create the Sample Grabber filter.
hr = CoCreateInstance(CLSID_SampleGrabber, NULL, CLSCTX_INPROC_SERVER,
IID_PPV_ARGS(&pGrabberF));
hr = pGraph->AddFilter(pGrabberF, L"Sample Grabber");
hr = pGrabberF->QueryInterface(IID_PPV_ARGS(&pGrabber));
// Displays the metadata of the file
DisplayFileInfo((wchar_t*)pszVideoFile); // to display video information
AM_MEDIA_TYPE mt;
ZeroMemory(&mt, sizeof(mt));
mt.majortype = MEDIATYPE_Video;
mt.subtype = MEDIASUBTYPE_RGB24;
hr = pGrabber->SetMediaType(&mt);
hr = pGraph->AddSourceFilter(pszVideoFile, L"Source", &pSourceF);
hr = pSourceF->EnumPins(&pEnum);
while (S_OK == pEnum->Next(1, &pPin, NULL))
{
hr = ConnectFilters(pGraph, pPin, pGrabberF);
SafeRelease(&pPin);
if (SUCCEEDED(hr))
{
break;
}
}
hr = CoCreateInstance(CLSID_NullRenderer, NULL, CLSCTX_INPROC_SERVER,
IID_PPV_ARGS(&pNullF));
hr = pGraph->AddFilter(pNullF, L"Null Filter");
hr = ConnectFilters(pGraph, pGrabberF, pNullF);
hr = pGrabber->SetOneShot(TRUE);
hr = pGrabber->SetBufferSamples(TRUE);
hr = pControl->Run();
hr = pEvent->WaitForCompletion(INFINITE, &evCode);
for (int i = 0; i < 10; i++)
{
// Find the required buffer size.
long cbBuffer;
hr = pGrabber->GetCurrentBuffer(&cbBuffer, NULL);
pBuffer = (BYTE*)CoTaskMemAlloc(cbBuffer);
hr = pGrabber->GetCurrentBuffer(&cbBuffer, (long*)pBuffer);
hr = pGrabber->GetConnectedMediaType(&mt);
// Examine the format block.
if ((mt.formattype == FORMAT_VideoInfo) &&
(mt.cbFormat >= sizeof(VIDEOINFOHEADER)) &&
(mt.pbFormat != NULL))
{
swprintf(temp, 5, L"%d", i);
wcscat_s(framename, temp);
wcscat_s(framename, L".bmp");
VIDEOINFOHEADER *pVih = (VIDEOINFOHEADER*)mt.pbFormat;
hr = WriteBitmap((PCWSTR)framename, &pVih->bmiHeader,
mt.cbFormat - SIZE_PREHEADER, pBuffer, cbBuffer);
wcscpy_s(framename, IMAGE_FILE_PATH);
}
else
{
// Invalid format.
hr = VFW_E_INVALIDMEDIATYPE;
}
FreeMediaType(mt);
}
done:
CoTaskMemFree(pBuffer);
SafeRelease(&pPin);
SafeRelease(&pEnum);
SafeRelease(&pNullF);
SafeRelease(&pSourceF);
SafeRelease(&pGrabber);
SafeRelease(&pGrabberF);
SafeRelease(&pControl);
SafeRelease(&pEvent);
SafeRelease(&pGraph);
return hr;
}
The input video file has 132 frames.
But only 68 images are generated.
Also last frame of the video is captured for the last 38 images.
I think the directshow graph is running continuously and WriteBitmap() is missing frames.
How to get the control in directX to capture one frame and write it to bmp file and capture the next frame and thus capture all the frames as bmp images.
Thanks
Arun
Your approach is wrong. Currently, you set the sample grabber to one shot and after that you wait for the graph completion. This way it only works for capturing a single frame. You need to capture the frames inside the ISampleGrabberCB callback of your pGrabber. You need to implement ISampleGrabberCB interface and use ISampleGrabber::SetCallback on your pGrabber filter to point it to your implementation. After that you can capture the frames inside either SampleCB or BufferCB methods. http://www.infognition.com/blog/2013/accessing_raw_video_in_directshow.html
I am having trouble with a video recording application that I am writing using Microsoft Media Foundation.
Specifically, the read/write function (which I put on a loop that lives on it's own thread) doesn't make it pass the call to ReadSample:
HRESULT WinCapture::rwFunction(void) {
HRESULT hr;
DWORD streamIndex, flags;
LONGLONG llTimeStamp;
IMFSample *pSample = NULL;
EnterCriticalSection(&m_critsec);
// Read another sample.
hr = m_pReader->ReadSample(
(DWORD)MF_SOURCE_READER_FIRST_VIDEO_STREAM,
0,
&streamIndex, // actual
&flags,//NULL, // flags
&llTimeStamp,//NULL, // timestamp
&pSample // sample
);
if (FAILED(hr)) { goto done; }
hr = m_pWriter->WriteSample(0, pSample);
goto done;
done:
return hr;
SafeRelease(&pSample);
LeaveCriticalSection(&m_critsec);
}
The value of hr is an exception code: 0xc00d3704 so the code snippet skips the call to WriteSample.
It is a lot of steps, but I am fairly certain that I am setting up m_pReader (type IMFSource *) correctly.
HRESULT WinCapture::OpenMediaSource(IMFMediaSource *pSource)
{
HRESULT hr = S_OK;
IMFAttributes *pAttributes = NULL;
hr = MFCreateAttributes(&pAttributes, 2);
// use a callback
//if (SUCCEEDED(hr))
//{
// hr = pAttributes->SetUnknown(MF_SOURCE_READER_ASYNC_CALLBACK, this);
//}
// set the desired format type
DWORD dwFormatIndex = (DWORD)formatIdx;
IMFPresentationDescriptor *pPD = NULL;
IMFStreamDescriptor *pSD = NULL;
IMFMediaTypeHandler *pHandler = NULL;
IMFMediaType *pType = NULL;
// create the source reader
if (SUCCEEDED(hr))
{
hr = MFCreateSourceReaderFromMediaSource(
pSource,
pAttributes,
&m_pReader
);
}
// steps to set the selected format type
hr = pSource->CreatePresentationDescriptor(&pPD);
if (FAILED(hr))
{
goto done;
}
BOOL fSelected;
hr = pPD->GetStreamDescriptorByIndex(0, &fSelected, &pSD);
if (FAILED(hr))
{
goto done;
}
hr = pSD->GetMediaTypeHandler(&pHandler);
if (FAILED(hr))
{
goto done;
}
hr = pHandler->GetMediaTypeByIndex(dwFormatIndex, &pType);
if (FAILED(hr))
{
goto done;
}
hr = pHandler->SetCurrentMediaType(pType);
{
goto done;
}
hr = m_pReader->SetCurrentMediaType(
(DWORD)MF_SOURCE_READER_FIRST_VIDEO_STREAM,
NULL,
pType
);
// set to maximum framerate?
hr = pHandler->GetCurrentMediaType(&pType);
if (FAILED(hr))
{
goto done;
}
// Get the maximum frame rate for the selected capture format.
// Note: To get the minimum frame rate, use the
// MF_MT_FRAME_RATE_RANGE_MIN attribute instead.
PROPVARIANT var;
if (SUCCEEDED(pType->GetItem(MF_MT_FRAME_RATE_RANGE_MAX, &var)))
{
hr = pType->SetItem(MF_MT_FRAME_RATE, var);
PropVariantClear(&var);
if (FAILED(hr))
{
goto done;
}
hr = pHandler->SetCurrentMediaType(pType);
{
goto done;
}
hr = m_pReader->SetCurrentMediaType(
(DWORD)MF_SOURCE_READER_FIRST_VIDEO_STREAM,
NULL,
pType
);
}
goto done;
done:
SafeRelease(&pPD);
SafeRelease(&pSD);
SafeRelease(&pHandler);
SafeRelease(&pType);
SafeRelease(&pAttributes);
return hr;
}
This code is all copied from Microsoft documentation pages and the SDK sample code. The variable formatIdx is 0, I get it from enumerating the camera formats and choosing the first one.
UPDATE
I have rewritten this program so that it uses callbacks instead of a blocking read/write function and I have exactly the same issue.
Here I get the device and initiate the callback method:
HRESULT WinCapture::initCapture(const WCHAR *pwszFileName, IMFMediaSource *pSource) {
HRESULT hr = S_OK;
EncodingParameters params;
params.subtype = MFVideoFormat_WMV3; // TODO, paramterize this
params.bitrate = TARGET_BIT_RATE;
m_llBaseTime = 0;
IMFMediaType *pType = NULL;
DWORD sink_stream = 0;
EnterCriticalSection(&m_critsec);
hr = m_ppDevices[selectedDevice]->ActivateObject(IID_PPV_ARGS(&pSource));
//m_bIsCapturing = false; // this is set externally here
if (SUCCEEDED(hr))
hr = OpenMediaSource(pSource); // also creates the reader
if (SUCCEEDED(hr))
{
hr = m_pReader->GetCurrentMediaType(
(DWORD)MF_SOURCE_READER_FIRST_VIDEO_STREAM,
&pType
);
}
// Create the sink writer
if (SUCCEEDED(hr))
{
hr = MFCreateSinkWriterFromURL(
pwszFileName,
NULL,
NULL,
&m_pWriter
);
}
if (SUCCEEDED(hr))
hr = ConfigureEncoder(params, pType, m_pWriter, &sink_stream);
// kick off the recording
if (SUCCEEDED(hr))
{
m_llBaseTime = 0;
m_bIsCapturing = TRUE;
hr = m_pReader->ReadSample(
(DWORD)MF_SOURCE_READER_FIRST_VIDEO_STREAM,
0,
NULL,
NULL,
NULL,
NULL
);
}
SafeRelease(&pType);
SafeRelease(&pSource);
pType = NULL;
LeaveCriticalSection(&m_critsec);
return hr;
}
The OpenMediaSource method is here:
HRESULT WinCapture::OpenMediaSource(IMFMediaSource *pSource)
{
HRESULT hr = S_OK;
IMFAttributes *pAttributes = NULL;
hr = MFCreateAttributes(&pAttributes, 2);
// use a callback
if (SUCCEEDED(hr))
{
hr = pAttributes->SetUnknown(MF_SOURCE_READER_ASYNC_CALLBACK, this);
}
// set the desired format type
DWORD dwFormatIndex = (DWORD)formatIdx;
IMFPresentationDescriptor *pPD = NULL;
IMFStreamDescriptor *pSD = NULL;
IMFMediaTypeHandler *pHandler = NULL;
IMFMediaType *pType = NULL;
// create the source reader
if (SUCCEEDED(hr))
{
hr = MFCreateSourceReaderFromMediaSource(
pSource,
pAttributes,
&m_pReader
);
}
// steps to set the selected format type
if (SUCCEEDED(hr)) hr = pSource->CreatePresentationDescriptor(&pPD);
if (FAILED(hr))
{
goto done;
}
BOOL fSelected;
hr = pPD->GetStreamDescriptorByIndex(0, &fSelected, &pSD);
if (FAILED(hr))
{
goto done;
}
hr = pSD->GetMediaTypeHandler(&pHandler);
if (FAILED(hr))
{
goto done;
}
hr = pHandler->GetMediaTypeByIndex(dwFormatIndex, &pType);
if (FAILED(hr))
{
goto done;
}
hr = pHandler->SetCurrentMediaType(pType);
if (FAILED(hr))
{
goto done;
}
// get available framerates
hr = MFGetAttributeRatio(pType, MF_MT_FRAME_RATE, &frameRate, &denominator);
std::cout << "frameRate " << frameRate << " denominator " << denominator << std::endl;
hr = m_pReader->SetCurrentMediaType(
(DWORD)MF_SOURCE_READER_FIRST_VIDEO_STREAM,
NULL,
pType
);
// set to maximum framerate?
hr = pHandler->GetCurrentMediaType(&pType);
if (FAILED(hr))
{
goto done;
}
goto done;
done:
SafeRelease(&pPD);
SafeRelease(&pSD);
SafeRelease(&pHandler);
SafeRelease(&pType);
SafeRelease(&pAttributes);
return hr;
}
Here, formatIdx is a field of this class that get sets by the user via the GUI. I leave it 0 in order to test. So, I don't think I am missing any steps to get the camera going, but maybe I am.
When I inspect what applications are using the webcam (using this method) after the call to ActivateObject, I see that my application is using the webcam as expected. But, when I enter the callback routine, I see there are two instances of my application using the webcam. This is the same using a blocking method.
I don't know if that is good or bad, but when I enter my callback method:
HRESULT WinCapture::OnReadSample(
HRESULT hrStatus,
DWORD /*dwStreamIndex*/,
DWORD /*dwStreamFlags*/,
LONGLONG llTimeStamp,
IMFSample *pSample // Can be NULL
)
{
EnterCriticalSection(&m_critsec);
if (!IsCapturing() || m_bIsCapturing == false)
{
LeaveCriticalSection(&m_critsec);
return S_OK;
}
HRESULT hr = S_OK;
if (FAILED(hrStatus))
{
hr = hrStatus;
goto done;
}
if (pSample)
{
if (m_bFirstSample)
{
m_llBaseTime = llTimeStamp;
m_bFirstSample = FALSE;
}
// rebase the time stamp
llTimeStamp -= m_llBaseTime;
hr = pSample->SetSampleTime(llTimeStamp);
if (FAILED(hr)) { goto done; }
hr = m_pWriter->WriteSample(0, pSample);
if (FAILED(hr)) { goto done; }
}
// Read another sample.
hr = m_pReader->ReadSample(
(DWORD)MF_SOURCE_READER_FIRST_VIDEO_STREAM,
0,
NULL, // actual
NULL, // flags
NULL, // timestamp
NULL // sample
);
done:
if (FAILED(hr))
{
//NotifyError(hr);
}
LeaveCriticalSection(&m_critsec);
return hr;
}
hrStatus is the 0x00d3704 error I was getting before, and the callback goes straight to done thus killing the callbacks.
Finally, I should say that I am modeling (read, 'copying') my code from the example MFCaptureToFile in the Windows SDK Samples and that doesn't work either. Although, there I get this weird negative number for the failed HRESULT: -1072875772.
If you have got error [0xC00D3704] - it means that source does not initialized. Such error can be caused by mistake of initialization, busy camera by another application (process) or unsupport of the camera by UVC driver(old cameras support DirectShow driver with partly compatibleness with UVC. It is possible read some general info from old cameras via UVC as friendly name, symbolic link. However, old cameras support DirectShow models - PUSH, while camera pushes bytes into the pipeline, while Media Foundation PULL data - sends special signal and wait data).
For checking your code I would like advise to research articles about capturing video from web cam on site "CodeProject" - search "videoInput" name.
Scenario:
Load a TIFF image and extract the frames of tiff image and save it locally.
Combine the extracted frames to the output TIFF image.
When i try to combine the frames, the size of the output tiff image is increasing drastically. For example if my input size if 40 MB , the output is increased to 300 MB.
Below is the code which explains the scenario,
void readTiff()
{
HRESULT hr;
IWICBitmapFrameDecode *frameDecode = NULL;
IWICFormatConverter *formatConverter = NULL;
IWICBitmapEncoder *encoder = NULL;
IWICStream *pOutStream = NULL;
IWICBitmapFrameEncode *frameEncode = NULL;
IWICImagingFactory* m_pWICFactory;
hr = CoCreateInstance(
CLSID_WICImagingFactory,
NULL,
CLSCTX_INPROC_SERVER,
IID_PPV_ARGS(&m_pWICFactory)
);
IWICBitmapDecoder *pIDecoder = NULL;
hr = m_pWICFactory->CreateDecoderFromFilename(
L"D:\\test28\\Multitiff_files\\300dpiTIFF40MB_WATER.tif", // Image to be decoded
NULL, // Do not prefer a particular vendor
GENERIC_READ, // Desired read access to the file
WICDecodeMetadataCacheOnDemand, // Cache metadata when needed
&pIDecoder // Pointer to the decoder
);
UINT frameCount = 0;
pIDecoder->GetFrameCount(&frameCount);
for (int i = 0; i < frameCount; i++)
{
wchar_t temp[200];
int j = i;
swprintf_s(temp, 200, L"D:\\test28\\Multitiff_files\\out\\filename_png%d.jpeg", i);
if (SUCCEEDED(hr))
hr = m_pWICFactory->CreateStream(&pOutStream);
if (SUCCEEDED(hr))
hr = pOutStream->InitializeFromFilename(temp, GENERIC_WRITE);
if (SUCCEEDED(hr))
hr = m_pWICFactory->CreateEncoder(GUID_ContainerFormatJpeg, NULL, &encoder);
if (SUCCEEDED(hr))
hr = encoder->Initialize(pOutStream, WICBitmapEncoderNoCache);
hr = pIDecoder->GetFrame(i, &frameDecode);
if (SUCCEEDED(hr))
hr = m_pWICFactory->CreateFormatConverter(&formatConverter);
hr = formatConverter->Initialize(
frameDecode, // Source frame to convert
GUID_WICPixelFormat8bppIndexed, // The desired pixel format
WICBitmapDitherTypeNone, // The desired dither pattern
NULL, // The desired palette
0.f, // The desired alpha threshold
WICBitmapPaletteTypeMedianCut // Palette translation type
);
IPropertyBag2 *pPropertybag = NULL;
//Create a new frame..
hr = encoder->CreateNewFrame(&frameEncode, &pPropertybag);
//PROPBAG2 option = { 0 };
//option.pstrName = L"ImageQuality";
//VARIANT varValue;
//VariantInit(&varValue);
//varValue.vt = VT_R4;
//varValue.fltVal = 0.01f;
//hr = pPropertybag->Write(
// 1, // number of properties being set
// &option,
// &varValue);
WICPixelFormatGUID pixelFormat;
hr = frameEncode->Initialize(pPropertybag);
//pixelFormat = GUID_WICPixelFormat8bppIndexed;
frameDecode->GetPixelFormat(&pixelFormat);
hr = frameEncode->SetPixelFormat(&pixelFormat);
hr = frameEncode->WriteSource(formatConverter, NULL);
frameEncode->Commit();
encoder->Commit();
if (formatConverter)
formatConverter->Release();
if (frameDecode)
frameDecode->Release();
if (frameEncode)
frameEncode->Release();
if (pOutStream)
pOutStream->Release();
if (encoder)
encoder->Release();
}
if (m_pWICFactory)
m_pWICFactory->Release();
pIDecoder->Release();
}
void combineFile(int count)
{
HRESULT hr;
IWICImagingFactory *pFactory = NULL;
IWICStream *pInStream = NULL;
IWICBitmapDecoder *decoder = NULL;
IWICBitmapFrameDecode *frameDecode = NULL;
IWICFormatConverter *formatConverter = NULL;
IWICBitmapEncoder *encoder = NULL;
IWICStream *pOutStream = NULL;
IWICBitmapFrameEncode *frameEncode = NULL;
hr = CoCreateInstance(CLSID_WICImagingFactory, NULL, CLSCTX_INPROC_SERVER, IID_IWICImagingFactory, (LPVOID*)&pFactory);
if (!SUCCEEDED(hr)) {
hr = CoCreateInstance(CLSID_WICImagingFactory, NULL, CLSCTX_INPROC_SERVER, IID_IWICImagingFactory, (LPVOID*)&pFactory);
}
if (SUCCEEDED(hr))
hr = pFactory->CreateStream(&pOutStream);
if (SUCCEEDED(hr))
hr = pOutStream->InitializeFromFilename(L"D:\\test28\\Multitiff_files\\out\\out.tiff", GENERIC_WRITE);
if (SUCCEEDED(hr))
hr = pFactory->CreateEncoder(GUID_ContainerFormatWmp, NULL, &encoder);
if (SUCCEEDED(hr))
hr = encoder->Initialize(pOutStream, WICBitmapEncoderNoCache);
for (int i = 0; i < count; i++)
{
wchar_t temp[200];
swprintf_s(temp, 200, L"D:\\test28\\Multitiff_files\\out\\filename_png%d.jpeg", i);
if (SUCCEEDED(hr))
hr = pFactory->CreateStream(&pInStream);
if (SUCCEEDED(hr))
hr = pInStream->InitializeFromFilename(temp, GENERIC_READ);
if (SUCCEEDED(hr))
hr = pFactory->CreateDecoderFromStream(pInStream, NULL, WICDecodeMetadataCacheOnLoad, &decoder);
if (SUCCEEDED(hr))
hr = pFactory->CreateFormatConverter(&formatConverter);
hr = decoder->GetFrame(0, &frameDecode);
//hr = formatConverter->Initialize(
// frameDecode, // Source frame to convert
// GUID_WICPixelFormat8bppIndexed, // The desired pixel format
// WICBitmapDitherTypeNone, // The desired dither pattern
// NULL, // The desired palette
// 0.f, // The desired alpha threshold
// WICBitmapPaletteTypeMedianCut // Palette translation type
// );
WICPixelFormatGUID pixelFormat;
frameDecode->GetPixelFormat(&pixelFormat);
IPropertyBag2 *pPropertybag = NULL;
//Create a new frame..
hr = encoder->CreateNewFrame(&frameEncode, &pPropertybag);
PROPBAG2 option = { 0 };
option.pstrName = L"TiffCompressionMethod";
VARIANT varValue;
VariantInit(&varValue);
varValue.vt = VT_UI1;
varValue.bVal = WICTiffCompressionOption::WICTiffCompressionZIP;
hr = pPropertybag->Write(1, &option, &varValue);
hr = frameEncode->Initialize(pPropertybag);
hr = frameEncode->SetPixelFormat(&pixelFormat);
hr = frameEncode->WriteSource(formatConverter, NULL);
hr = frameEncode->Commit();
if (pInStream)
pInStream->Release();
if (decoder)
decoder->Release();
if (formatConverter)
formatConverter->Release();
if (frameEncode)
frameEncode->Release();
//if (frameDecode)
//frameDecode->Release();
}
encoder->Commit();
if (pFactory)
pFactory->Release();
if (encoder)
encoder->Release();
if (pOutStream)
pOutStream->Release();
}
One easy way to debug this kind of issue is to use ImageMagick which is installed on most Linux distros and is available for OSX and Windows. First use the identify utility within the suite with its verbose option to find out everything about your before and after images like this:
# Find out all we know about first image and put in file "1.txt"
identify -verbose image1.tif > 1.txt
# Find out all we know about second image and put in file "2.txt"
identify -verbose image2.tif > 2.txt
Now use your favourite file comparison tool to see the differences:
opendiff 1.txt 2.txt
I am new to directshow and using DirectShow Sample "FrameGrabberDemo" and facing issue in getting the image. I tried with .avi and .mpg, both are giving same issue.
First issue might be the S_FALSE return value from IMediaControl::Run(). However, it is not an error and states that:
The graph is preparing to run, but some filters have not completed the
transition to a running state.
Second observation is ISampleGrabber::GetCurrentBuffer() returns E_OUTOFMEMORY code, which states that "The specified buffer is not large enough." However, the BitmapInfo has biImageSize = 1244160 and also MediaType has ISampleSize = 1244160.
HRESULT CFrameGrabberDemoDlg::DoExtractFrame()
{
WCHAR wFile[MAX_PATH];
MultiByteToWideChar( CP_ACP, 0, m_FilePath, -1, wFile, MAX_PATH );
// Create the graph builder
CComPtr<IGraphBuilder> pGraphBuilder;
HRESULT hr = ::CoCreateInstance(CLSID_FilterGraph, NULL, CLSCTX_INPROC_SERVER,
IID_IGraphBuilder, (void**)&pGraphBuilder);
if (FAILED(hr))
return hr;
ASSERT(pGraphBuilder != NULL);
// Create the "Grabber filter"
CComPtr<IBaseFilter> pGrabberBaseFilter;
CComPtr<ISampleGrabber> pSampleGrabber;
AM_MEDIA_TYPE mt;
hr = ::CoCreateInstance(CLSID_SampleGrabber, NULL, CLSCTX_INPROC_SERVER,
IID_IBaseFilter, (LPVOID *)&pGrabberBaseFilter);
if (FAILED(hr))
return hr;
pGrabberBaseFilter->QueryInterface(IID_ISampleGrabber, (void**)&pSampleGrabber);
if (pSampleGrabber == NULL)
return E_NOINTERFACE;
hr = pGraphBuilder->AddFilter(pGrabberBaseFilter,L"Grabber");
if (FAILED(hr))
return hr;
ZeroMemory(&mt, sizeof(AM_MEDIA_TYPE));
mt.majortype = MEDIATYPE_Video;
mt.subtype = MEDIASUBTYPE_RGB24;
mt.formattype = FORMAT_VideoInfo;
hr = pSampleGrabber->SetMediaType(&mt);
if (FAILED(hr))
return hr;
hr = pGraphBuilder->RenderFile(wFile,NULL);
if (FAILED(hr))
return hr;
CComPtr<IMediaControl> pMediaControl;
CComPtr<IMediaEvent> pMediaEventEx;
// QueryInterface for some basic interfaces
pGraphBuilder->QueryInterface(IID_IMediaControl, (void **)&pMediaControl);
pGraphBuilder->QueryInterface(IID_IMediaEvent, (void **)&pMediaEventEx);
if (pMediaControl == NULL || pMediaEventEx == NULL)
return E_NOINTERFACE;
// Set up one-shot mode.
hr = pSampleGrabber->SetBufferSamples(TRUE);
if (FAILED(hr))
return hr;
hr = pSampleGrabber->SetOneShot(TRUE);
if (FAILED(hr))
return hr;
CComQIPtr<IMediaSeeking> pSeek = pMediaControl;
if (pSeek == NULL)
return E_NOINTERFACE;
LONGLONG Duration;
hr = pSeek->GetDuration(&Duration);
if (FAILED(hr))
return hr;
int NumSecs = int(Duration/10000000);
REFERENCE_TIME rtStart = 1 * 10000000;
if (NumSecs < 1)
rtStart = 0;
REFERENCE_TIME rtStop = rtStart;
hr = pSeek->SetPositions(&rtStart, AM_SEEKING_AbsolutePositioning,
&rtStop, AM_SEEKING_AbsolutePositioning);
if (FAILED(hr))
return hr;
CComQIPtr<IVideoWindow> pVideoWindow = pGraphBuilder;
hr = pVideoWindow->put_AutoShow(OAFALSE);
if (FAILED(hr))
return hr;
// Run the graph and wait for completion.
hr = pMediaControl->Run();
if (FAILED(hr))
return hr;
long evCode;
hr = pMediaEventEx->WaitForCompletion(INFINITE, &evCode);
if (FAILED(hr))
return hr;
AM_MEDIA_TYPE MediaType;
ZeroMemory(&MediaType,sizeof(MediaType));
hr = pSampleGrabber->GetConnectedMediaType(&MediaType);
if (FAILED(hr))
return hr;
// Get a pointer to the video header.
VIDEOINFOHEADER *pVideoHeader = (VIDEOINFOHEADER*)MediaType.pbFormat;
if (pVideoHeader == NULL)
return E_FAIL;
// The video header contains the bitmap information.
// Copy it into a BITMAPINFO structure.
BITMAPINFO BitmapInfo;
ZeroMemory(&BitmapInfo, sizeof(BitmapInfo));
CopyMemory(&BitmapInfo.bmiHeader, &(pVideoHeader->bmiHeader), sizeof(BITMAPINFOHEADER));
// Create a DIB from the bitmap header, and get a pointer to the buffer.
void *buffer = NULL;
HBITMAP hBitmap = ::CreateDIBSection(0, &BitmapInfo, DIB_RGB_COLORS, &buffer, NULL, 0);
GdiFlush();
// Copy the image into the buffer.
long size = 0;
hr = pSampleGrabber->GetCurrentBuffer(&size, (long *)buffer);
if (FAILED(hr))
return hr;
long Width = pVideoHeader->bmiHeader.biWidth;
long Height = pVideoHeader->bmiHeader.biHeight;
HBITMAP hOldBitmap = m_Image.SetBitmap(hBitmap);
if (hOldBitmap != NULL)
::DeleteObject(hOldBitmap);
return S_OK;
}
The ::CreateDIBSection is also not returning NULL and buffer also got initialized.
How can this be resolved?
You are requesting data into zero-length buffer:
long size = 0;
hr = pSampleGrabber->GetCurrentBuffer(&size, (long *)buffer);
The error code looks relevant:
If pBuffer is not NULL, set this parameter equal to the size of the buffer, in bytes.
E_OUTOFMEMORY The specified buffer is not large enough.
You simple need proper arguments in the call in question (proper buffer length).