Render an IDirect3DSurface9 from DXVA2? - c++

I got a IDirect3DSurface9 from DXVA2 video decoder using hardware acceleration.
I'm try to Render this hardware IDirect3DSurface9 on My Window via its handle. The following is my summary code.
The first, I call dxva2_init(AVCodecContext *s, HWND hwnd); with hwnd is window's handle
int dxva2_init(AVCodecContext *s, HWND hwnd)
{
InputStream *ist = (InputStream *)s->opaque;
int loglevel = (ist->hwaccel_id == HWACCEL_AUTO) ? AV_LOG_VERBOSE : AV_LOG_ERROR;
DXVA2Context *ctx;
int ret;
if (!ist->hwaccel_ctx) {
ret = dxva2_alloc(s);
if (ret < 0)
return ret;
}
ctx = (DXVA2Context *)ist->hwaccel_ctx;
ctx->deviceHandle = hwnd;
if (s->codec_id == AV_CODEC_ID_H264 &&
(s->profile & ~FF_PROFILE_H264_CONSTRAINED) > FF_PROFILE_H264_HIGH) {
av_log(NULL, loglevel, "Unsupported H.264 profile for DXVA2 HWAccel: %d\n", s->profile);
return AVERROR(EINVAL);
}
if (ctx->decoder)
dxva2_destroy_decoder(s);
ret = dxva2_create_decoder(s);
if (ret < 0) {
av_log(NULL, loglevel, "Error creating the DXVA2 decoder\n");
return ret;
}
return 0;
}
After Decoding successful, I got got a IDirect3DSurface9, and I Render it by following function.
int dxva2_render(AVCodecContext *s, AVFrame *frame)
{
LPDIRECT3DSURFACE9 surface = (LPDIRECT3DSURFACE9)frame->data[3];
InputStream *ist = (InputStream *)s->opaque;
DXVA2Context *ctx = (DXVA2Context *)ist->hwaccel_ctx;
try
{
lockRenderCS.Enter();
HRESULT hr = ctx->d3d9device->Clear(0, NULL, D3DCLEAR_TARGET, D3DCOLOR_XRGB(255, 0, 0), 1.0f, 0);
if (hr != D3D_OK)
return 0;
hr = ctx->d3d9device->BeginScene();
if (hr != D3D_OK)
return 0;
hr = ctx->d3d9device->GetBackBuffer(0, 0, D3DBACKBUFFER_TYPE_MONO, &pBackBuffer);
if (hr != D3D_OK)
return 0;
hr = ctx->d3d9device->StretchRect(surface, NULL, pBackBuffer, NULL, D3DTEXF_LINEAR);
if (hr != D3D_OK)
return 0;
hr = ctx->d3d9device->EndScene();
if (hr != D3D_OK)
return 0;
hr = ctx->d3d9device->Present(NULL, NULL, NULL, NULL);
if (hr != D3D_OK)
return 0;
}
finally
{
lockRenderCS.Leave();
}
return 0;
}
Note: All D3D function above: Clear(), BeginScene(), GetBackBuffer(), StretchRect(), EndScene(), Present() were return Successful. But the frame was not display on My Window.
I Guess that, I miss some code for integrated My Window Handle with DXVA2Context. In this code I only assign: ctx->deviceHandle = hwnd; in function dxva2_init().
I search many times, but so far I still cannot find the solution, Anyone can help me?
Many Thanks!

I could help with full code.
Without full code, i suggest you to check your use of StretchRect, just like that :
IDirect3DDevice9::StretchRect
StretchRect Restrictions
Driver support varies. See the section on driver support (below) to see which drivers support which source and destination formats.
The source and destination surfaces must be created in the default memory pool.
If filtering is specified, you must set the appropriate filter caps (see StretchRectFilterCaps in D3DCAPS9).
etc...
The idea behind this, we don't know nothing about your d3d9device/surface9 (creation/initialization/parameter/etc...), for example.
Also, you can study this code from my project to do dxva2 decoding :
H264Dxva2Decoder

Related

DirectX11 DXGI Capture Screen when in full screen

I'm currently trying to develop a personal project where I will light up LEDs according to what's happening on the screen of my computer.
I've tried several solutions to capture my screen, and DirectX11 with DXGI is the fastest way I found to have a good FPS Rate.
My only issue is the following: When in full screen (for example, watching Netflix trogh Win10 App or playing any game in fullscreen), it seems that nothing is captured. I have two functions (one to set up and another one to capture a frame:
Setup Up Function:
bool DXGIScreenCapturer::Init() {
int lresult(-1);
D3D_FEATURE_LEVEL lFeatureLevel;
HRESULT hr(E_FAIL);
hr = D3D11CreateDevice(
nullptr,
D3D_DRIVER_TYPE_HARDWARE,
nullptr,
0,
gFeatureLevels,
gNumFeatureLevels,
D3D11_SDK_VERSION,
&_lDevice,
&lFeatureLevel,
&_lImmediateContext);
if (FAILED(hr))
return false;
if (!_lDevice)
return false;
// Get DXGI device
ComPtr<IDXGIDevice> lDxgiDevice;
hr = _lDevice.As(&lDxgiDevice);
if (FAILED(hr))
return false;
// Get DXGI adapter
ComPtr<IDXGIAdapter> lDxgiAdapter;
hr = lDxgiDevice->GetParent(__uuidof(IDXGIAdapter), &lDxgiAdapter);
if (FAILED(hr))
return false;
lDxgiDevice.Reset();
UINT Output = 0;
// Get output
ComPtr<IDXGIOutput> lDxgiOutput;
hr = lDxgiAdapter->EnumOutputs(Output, &lDxgiOutput);
if (FAILED(hr))
return false;
lDxgiAdapter.Reset();
hr = lDxgiOutput->GetDesc(&_lOutputDesc);
if (FAILED(hr))
return false;
// QI for Output 1
ComPtr<IDXGIOutput1> lDxgiOutput1;
hr = lDxgiOutput.As(&lDxgiOutput1);
if (FAILED(hr))
return false;
lDxgiOutput.Reset();
// Create desktop duplication
hr = lDxgiOutput1->DuplicateOutput(_lDevice.Get(), &_lDeskDupl);
if (FAILED(hr))
return false;
lDxgiOutput1.Reset();
// Create GUI drawing texture
_lDeskDupl->GetDesc(&_lOutputDuplDesc);
// Create CPU access texture
_desc.Width = _lOutputDuplDesc.ModeDesc.Width;
_desc.Height = _lOutputDuplDesc.ModeDesc.Height;
_desc.Format = _lOutputDuplDesc.ModeDesc.Format;
std::cout << _desc.Width << "x" << _desc.Height << "\n\n\n";
_desc.ArraySize = 1;
_desc.BindFlags = 0;
_desc.MiscFlags = 0;
_desc.SampleDesc.Count = 1;
_desc.SampleDesc.Quality = 0;
_desc.MipLevels = 1;
_desc.CPUAccessFlags = D3D11_CPU_ACCESS_FLAG::D3D11_CPU_ACCESS_READ;
_desc.Usage = D3D11_USAGE::D3D11_USAGE_STAGING;
while (!CaptureScreen(_result));
_result = cv::Mat(_desc.Height, _desc.Width, CV_8UC4, _resource.pData);
return true;
}
And the capture function:
bool DXGIScreenCapturer::CaptureScreen(cv::Mat& output)
{
HRESULT hr(E_FAIL);
ComPtr<IDXGIResource> lDesktopResource = nullptr;
DXGI_OUTDUPL_FRAME_INFO lFrameInfo;
ID3D11Texture2D* currTexture = NULL;
hr = _lDeskDupl->AcquireNextFrame(999, &lFrameInfo, &lDesktopResource);
if (FAILED(hr))
return false;
if (lFrameInfo.LastPresentTime.HighPart == 0) // not interested in just mouse updates, which can happen much faster than 60fps if you really shake the mouse
{
hr = _lDeskDupl->ReleaseFrame();
return false;
}
//int accum_frames = lFrameInfo.AccumulatedFrames;
//if (accum_frames > 1) {// && current_frame != 1) {
// // TOO MANY OF THESE is the problem
// // especially after having to wait >17ms in AcquireNextFrame()
//}
// QI for ID3D11Texture2D
hr = lDesktopResource.As(&_lAcquiredDesktopImage);
// Copy image into a newly created CPU access texture
hr = _lDevice->CreateTexture2D(&_desc, NULL, &currTexture);
if (FAILED(hr))
{
hr = _lDeskDupl->ReleaseFrame();
return false;
}
if (!currTexture)
{
hr = _lDeskDupl->ReleaseFrame();
return false;
}
_lImmediateContext->CopyResource(currTexture, _lAcquiredDesktopImage.Get());
UINT subresource = D3D11CalcSubresource(0, 0, 0);
_lImmediateContext->Map(currTexture, subresource, D3D11_MAP_READ, 0, &_resource);
_lImmediateContext->Unmap(currTexture, 0);
currTexture->Release();
hr = _lDeskDupl->ReleaseFrame();
output = _result;
return true;
}

Media Foundation - How to select input from crossbar?

Good Evening
I'm trying to put together a little video capture DLL using Media Foundation which is largely adapted from the SimpleCapture and CaptureEngine SDK samples. The whole thing works great when I the capture source is my inbuilt webcam.
The problem is when I try using an external USB frame grabber with multiple inputs (s-video, composite & stereo audio) there is no preview stream. I have got a function that uses the DShow interface which seems to successfully change the selected input on the cross bar (verified by looking at the options in AMCAP after running function) However I have not been able to get the device to preview at all.
Is there a Media Foundation way of selecting inputs from the crossbar? Or if not, can you suggest what may be wrong with the crossbar input selection code below:
HRESULT CaptureManager::doSelectInputUsingCrossbar(std::wstring deviceName, long input)
{
IGraphBuilder *pGraph = NULL;
ICaptureGraphBuilder2 *pBuilder = NULL;
IBaseFilter* pSrc = NULL;
ICreateDevEnum *pDevEnum = NULL;
IEnumMoniker *pClassEnum = NULL;
IMoniker *pMoniker = NULL;
bool bCameraFound = false;
IPropertyBag *pPropBag = NULL;
bool crossbarSet = false;
HRESULT hr = S_OK;
if (!(input == PhysConn_Video_Composite || input == PhysConn_Video_SVideo))
{
return S_FALSE;
}
// Create the Filter Graph Manager.
hr = CoCreateInstance(CLSID_FilterGraph, NULL,
CLSCTX_INPROC_SERVER, IID_IGraphBuilder, (void **)&pGraph);
if (SUCCEEDED(hr))
{
// Create the Capture Graph Builder.
hr = CoCreateInstance(CLSID_CaptureGraphBuilder2, NULL,
CLSCTX_INPROC_SERVER, IID_ICaptureGraphBuilder2,
(void **)&pBuilder);
if (SUCCEEDED(hr))
{
pBuilder->SetFiltergraph(pGraph);
}
else return hr;
}
else return hr;
//////////////////////////
// chooses the default camera filter
hr = CoCreateInstance(CLSID_SystemDeviceEnum, NULL, CLSCTX_INPROC,
IID_ICreateDevEnum, (void **)&pDevEnum);
if (FAILED(hr))
{
return E_FAIL;
}
// Create an enumerator for video capture devices.
if (FAILED(pDevEnum->CreateClassEnumerator(CLSID_VideoInputDeviceCategory, &pClassEnum, 0)))
{
return E_FAIL;
}
if (pClassEnum == NULL)
{
CheckHR(hr, "Class enumerator is null - no input devices detected?");
pDevEnum->Release();
return E_FAIL;
}
while (!bCameraFound && (pClassEnum->Next(1, &pMoniker, NULL) == S_OK))
{
HRESULT hr = pMoniker->BindToStorage(0, 0, IID_IPropertyBag, (void**)(&pPropBag));
if (FAILED(hr))
{
pMoniker->Release();
continue; // Skip this one, maybe the next one will work.
}
// Find the description or friendly name.
VARIANT varName;
VariantInit(&varName);
hr = pPropBag->Read(L"Description", &varName, 0);
if (FAILED(hr))
{
hr = pPropBag->Read(L"FriendlyName", &varName, 0);
}
if (SUCCEEDED(hr))
{
if (0 == wcscmp(varName.bstrVal, deviceName.c_str()))
{
pMoniker->BindToObject(0, 0, IID_IBaseFilter, (void**)&pSrc);
bCameraFound = true;
break;
}//if
VariantClear(&varName);
}
pPropBag->Release();
pMoniker->Release();
}//while
pClassEnum->Release();
pDevEnum->Release();
if (!bCameraFound)//else
{
CheckHR(hr, "Error: Get device Moniker, No device found");
goto done;
}
hr = pGraph->AddFilter(pSrc, L"video capture adapter");
if (FAILED(hr))
{
CheckHR(hr, "Can't add capture device to graph");
goto done;
}
///////////////
IAMCrossbar *pxBar = NULL;
hr = pBuilder->FindInterface(&PIN_CATEGORY_CAPTURE, &MEDIATYPE_Interleaved, pSrc, IID_IAMCrossbar, (void **)&pxBar);
if (FAILED(hr))
{
hr = pBuilder->FindInterface(&PIN_CATEGORY_CAPTURE, &MEDIATYPE_Video, pSrc, IID_IAMCrossbar, (void **)&pxBar);
if (FAILED(hr))
{
CheckHR(hr, "Failed to get crossbar filter");
goto done;
}
}//if
else
{
CheckHR(hr, "Failed to get crossbar (capture)");
goto done;
}
LONG lInpin, lOutpin;
hr = pxBar->get_PinCounts(&lOutpin, &lInpin);
BOOL IPin = TRUE; LONG pIndex = 0, pRIndex = 0, pType = 0;
while (pIndex < lInpin)
{
hr = pxBar->get_CrossbarPinInfo(IPin, pIndex, &pRIndex, &pType);
if (pType == input)
{
break;
}
pIndex++;
}
BOOL OPin = FALSE; LONG pOIndex = 0, pORIndex = 0, pOType = 0;
while (pOIndex < lOutpin)
{
hr = pxBar->get_CrossbarPinInfo(OPin, pOIndex, &pORIndex, &pOType);
if (pOType == PhysConn_Video_VideoDecoder)
{
break;
}
pIndex++;
}
hr = pxBar->Route(pOIndex, pIndex);
done:
SafeRelease(&pPropBag);
SafeRelease(&pMoniker);
SafeRelease(&pxBar);
SafeRelease(&pGraph);
SafeRelease(&pBuilder);
SafeRelease(&pSrc);
return hr;
}
USB frame grabber is not supported by Microsoft Media Foundation. Microsoft Media Foundation has supporting of live sources via USB Video Class devices driver. The modern web cameras support such driver, but USB frame grabber does not.
USB frame grabber support of DirectShow filters which based on PUSH model processing pipeline - sources send media samples into the media graph, while Microsoft Media Foundation uses PULL model processing pipeline - sinks send request for the new media samples and sources "listen" for such request.
With best.

Microsoft SDK AMCap GetCurrentImage error

I am trying to modify the existing AmCap application, available through Microsoft's SDK Direct Show samples, in order to get an image of the captured stream when I press the space button. Below is the point in which I am handling the space keydown.
case WM_KEYDOWN:
if((GetAsyncKeyState(VK_ESCAPE) & 0x01) && gcap.fCapturing)
{
StopCapture();
if(gcap.fWantPreview)
{
BuildPreviewGraph();
StartPreview();
}
}
else if ((GetAsyncKeyState(VK_SPACE) & 0x01))
{
IMediaControl *pMC = NULL;
HRESULT hr = gcap.pFg->QueryInterface(IID_IMediaControl, (void **)&pMC);
if (SUCCEEDED(hr)){
hr=pMC->Pause();
if (SUCCEEDED(hr)){
CaptureImage(TEXT("C:\\output.bmp"));
pMC->Run();
pMC->Release();
}else
ErrMsg(TEXT("Failed to pause stream! hr=0x%x"), hr);
}
else
ErrMsg(TEXT("Cannot grab image"));
}
break;
Below is the contents of the CaptureImage function.
HRESULT hr;
SmartPtr<IBasicVideo> pWC;
// If we got here, gcap.pVW is not NULL
ASSERT(gcap.pVW != NULL);
hr = gcap.pVW->QueryInterface(IID_IBasicVideo, (void**)&pWC);
if (pWC)
{
long bufSize;
long *lpCurrImage = NULL;
pWC->GetCurrentImage(&bufSize, NULL);
lpCurrImage = (long *)malloc(bufSize);
//
// Read the current video frame into a byte buffer. The information
// will be returned in a packed Windows DIB and will be allocated
// by the VMR.
hr = pWC->GetCurrentImage(&bufSize, lpCurrImage);
if (SUCCEEDED(hr))
{
HANDLE fh;
BITMAPFILEHEADER bmphdr;
BITMAPINFOHEADER bmpinfo;
DWORD nWritten;
memset(&bmphdr, 0, sizeof(bmphdr));
memset(&bmpinfo, 0, sizeof(bmpinfo));
bmphdr.bfType = ('M' << 8) | 'B';
bmphdr.bfSize = sizeof(bmphdr) + sizeof(bmpinfo) + bufSize;
bmphdr.bfOffBits = sizeof(bmphdr) + sizeof(bmpinfo);
bmpinfo.biSize = sizeof(bmpinfo);
bmpinfo.biWidth = 640;
bmpinfo.biHeight = 480;
bmpinfo.biPlanes = 1;
bmpinfo.biBitCount = 32;
fh = CreateFile(TEXT("C:\\Users\\mike\\Desktop\\output.bmp"),
GENERIC_WRITE, 0, NULL,
CREATE_ALWAYS, FILE_ATTRIBUTE_NORMAL, NULL);
WriteFile(fh, &bmphdr, sizeof(bmphdr), &nWritten, NULL);
WriteFile(fh, &bmpinfo, sizeof(bmpinfo), &nWritten, NULL);
WriteFile(fh, lpCurrImage, bufSize, &nWritten, NULL);
CloseHandle(fh);
ErrMsg(TEXT("Captured current image to %s"), szFile);
return TRUE;
}
else
{
ErrMsg(TEXT("Failed to capture image! hr=0x%x"), hr);
return FALSE;
}
}
return FALSE;
The problem is that when I am trying to run the app, I receive an HRESULT (0x8000ffff) error when the GetCurrentImage function is being called.
On the other hand in case I execute the app through the VS debugger the code works just fine.
I tried to add a Sleep just after the stream pMC->Pause(), assuming that the problem was timing issue but that did not work.
Any ideas would be extremely helpful!
Thank you in advance.

Video captured by Media Foundation is vertical mirrorred

I'm using Media Foundation IMFSourceReaderCallback implementation for grabbing video frames from the camera, and then OpenCV imshow to present the frames in a loop.
However I get the frames vertically flipped...
Is this a bug? Should I set some attribute to avoid this?
Here is my code:
Initialization:
IMFAttributes* pDeviceAttrs, *pReaderAttrs;
hr = MFCreateAttributes(&pDeviceAttrs, 1);
if (FAILED(hr)) goto Exit;
hr = pDeviceAttrs->SetGUID(MF_DEVSOURCE_ATTRIBUTE_SOURCE_TYPE, MF_DEVSOURCE_ATTRIBUTE_SOURCE_TYPE_VIDCAP_GUID);
if (FAILED(hr)) goto Exit;
//...
// Correct source provider is activated through ActivateObject
//
hr = MFCreateAttributes(&pReaderAttrs, 2);
if (FAILED(hr)) goto Exit;
pReaderAttrs->SetUnknown(MF_SOURCE_READER_ASYNC_CALLBACK,(IUnknown*)this);
pReaderAttrs->SetUINT32(MF_READWRITE_ENABLE_HARDWARE_TRANSFORMS, TRUE);
hr = MFCreateSourceReaderFromMediaSource(pMediaSource, pReaderAttrs, &m_pReader);
if (FAILED(hr)) goto Exit;
// Correct profile is set
OnReadSample implementation:
HRESULT hr = S_OK;
LONG defaultStride = 0;
LONG stride = 0;
BYTE *pBuffer = NULL;
EnterCriticalSection(&m_critSec);
if (NULL != pSample)
{
IMFMediaBuffer* pMediaBuffer;
DWORD dataSize = 0;
// In case of a single buffer, no copy would happen
hr = pSample->ConvertToContiguousBuffer(&pMediaBuffer);
if (FAILED(hr)) goto Cleanup;
pMediaBuffer->GetCurrentLength(&dataSize);
hr = pMediaBuffer->Lock(&pBuffer, &dataSize, &dataSize);
if (FAILED(hr)) goto Cleanup;
// todo: use a backbuffer to avoid sync issues
if (NULL == m_pLatestFrame) m_pLatestFrame = (BYTE*)malloc(dataSize);
memcpy(m_pLatestFrame, pBuffer, dataSize);
++m_frameNumber;
pMediaBuffer->Unlock();
pMediaBuffer->Release();
}
Cleanup:
LeaveCriticalSection(&m_critSec);
// Async ReadFrame for the next buffer:
hr = m_pReader->ReadSample(
(DWORD)MF_SOURCE_READER_FIRST_VIDEO_STREAM,
0,
NULL, // actual
NULL, // flags
NULL, // timestamp
NULL // sample
);
return hr;
Conversion to cv::image:
void SourceReaderImpl::GetLatestFrame(BYTE** ppLatestFrame)
{
EnterCriticalSection(&m_critSec);
*ppLatestFrame = m_pLatestFrame;
LeaveCriticalSection(&m_critSec);
}
void* CameraWrapperImpl::getLatestFrame()
{
BYTE* pLatestFrame = NULL;
m_pMfReader->GetLatestFrame(&pLatestFrame);
return pLatestFrame;
}
void Player::Present()
{
//...
color = cv::Mat(colorSize,
CV_8UC3,
static_cast<unsigned char*>(m_pColorCameraImpl->getLatestFrame()));
cv::imshow(color);
}
Any idea?
Thanks in advance!
A bitmap is stored with the last scan line first, so the image will appear upside down. The easiest solution is to call cv::flip
void Player::Present()
{
//...
color = cv::Mat(colorSize,
CV_8UC3,
static_cast<unsigned char*>(m_pColorCameraImpl->getLatestFrame()));
cv::Mat corrected;
flip(color, corrected, 0);
imshow(corrected);
}

DirectShow: webcam preview and image capture

After looking at a very similar question and seeing almost identical code, I've decided to ask this question separately. I want to show a video preview of the webcam's video stream to the default window DirectShow uses, and I also want the ability to "take a picture" of the video stream at any given moment.
I started with the DirectShow examples on MSDN, as well as the AMCap sample code, and have something I believe should should the preview part, but does not. I've found no examples of grabbing an image from the video stream except using SampleGrabber, which is deprecated and therefore I am trying not to use it.
Below is my code, line for line. Note that most of the code in EnumerateCameras is commented out. That code would've been for attaching to another window, which I don't want to do. In the MSDN documentation, it explicitly states that the VMR_7 creates its own window to display the video stream. I get no errors in my app, but this window never appears.
My question then is this: What am I doing wrong? Alternatively, if you know of a simple example of what I am trying to do, link me to it. AMCap is not a simple example, for reference.
NOTE: InitalizeVMR is for running in windowless state, which is my ultimate goal (integrating into a DirectX game). For now, however, i just want it to run in the simplest mode possible.
EDIT: The first portion of this question, that is previewing the camera stream, is solved. I am now just looking for an alternative to the deprecated SampleGrabber class so I can snap a photo at any moment and save it to a file.
EDIT: After looking for almost an hour on google, the general concensus seems to be that you HAVE to use ISampleGrabber. Please let me know if you find anything different.
Testing code (main.cpp):
CWebcam* camera = new CWebcam();
HRESULT hr = CoInitializeEx(NULL, COINIT_MULTITHREADED);
MessageBox(NULL, L"text", L"caption", NULL);
if (SUCCEEDED(hr))
{
camera->Create();
camera->EnumerateCameras();
camera->StartCamera();
}
int d;
cin >> d;
Webcam.cpp:
#include "Webcam.h"
CWebcam::CWebcam() {
HRESULT hr = CoInitializeEx(NULL, COINIT_MULTITHREADED);
//m_pTexInst = nullptr;
//m_pTexRes = nullptr;
}
CWebcam::~CWebcam() {
CoUninitialize();
m_pDeviceMonikers->Release();
m_pMediaController->Release();
}
BOOL CWebcam::Create() {
InitCaptureGraphBuilder(&m_pFilterGraph, &m_pCaptureGraph);
hr = m_pFilterGraph->QueryInterface(IID_IMediaControl, (void **)&m_pMediaController);
return TRUE;
}
void CWebcam::Destroy() {
}
void CWebcam::EnumerateCameras() {
HRESULT hr = EnumerateDevices(CLSID_VideoInputDeviceCategory, &m_pDeviceMonikers);
if (SUCCEEDED(hr))
{
//DisplayDeviceInformation(m_pDeviceMonikers);
//m_pDeviceMonikers->Release();
IMoniker *pMoniker = NULL;
if(m_pDeviceMonikers->Next(1, &pMoniker, NULL) == S_OK)
{
hr = pMoniker->BindToObject(0, 0, IID_IBaseFilter, (void**)&m_pCameraFilter);
if (SUCCEEDED(hr))
{
hr = m_pFilterGraph->AddFilter(m_pCameraFilter, L"Capture Filter");
}
}
// connect the output pin to the video renderer
if(SUCCEEDED(hr))
{
hr = m_pCaptureGraph->RenderStream(&PIN_CATEGORY_PREVIEW, &MEDIATYPE_Video,
m_pCameraFilter, NULL, NULL);
}
//InitializeVMR(hwnd, m_pFilterGraph, &m_pVMRControl, 1, FALSE);
//get the video window that will be displayed from the filter graph
IVideoWindow *pVideoWindow = NULL;
hr = m_pFilterGraph->QueryInterface(IID_IVideoWindow, (void **)&pVideoWindow);
/*if(hr != NOERROR)
{
printf("This graph cannot preview properly");
}
else
{
//get the video stream configurations
hr = m_pCaptureGraph->FindInterface(&PIN_CATEGORY_CAPTURE,
&MEDIATYPE_Video, m_pCameraFilter,
IID_IAMStreamConfig, (void **)&m_pVideoStreamConfig);
//Find out if this is a DV stream
AM_MEDIA_TYPE *pMediaTypeDV;
//fake window handle
HWND window = NULL;
if(m_pVideoStreamConfig && SUCCEEDED(m_pVideoStreamConfig->GetFormat(&pMediaTypeDV)))
{
if(pMediaTypeDV->formattype == FORMAT_DvInfo)
{
// in this case we want to set the size of the parent window to that of
// current DV resolution.
// We get that resolution from the IVideoWindow.
IBasicVideo* pBasivVideo;
// If we got here, gcap.pVW is not NULL
//ASSERT(pVideoWindow != NULL);
hr = pVideoWindow->QueryInterface(IID_IBasicVideo, (void**)&pBasivVideo);
/*if(SUCCEEDED(hr))
{
HRESULT hr1, hr2;
long lWidth, lHeight;
hr1 = pBasivVideo->get_VideoHeight(&lHeight);
hr2 = pBasivVideo->get_VideoWidth(&lWidth);
if(SUCCEEDED(hr1) && SUCCEEDED(hr2))
{
ResizeWindow(lWidth, abs(lHeight));
}
}
}
}
RECT rc;
pVideoWindow->put_Owner((OAHWND)window); // We own the window now
pVideoWindow->put_WindowStyle(WS_CHILD); // you are now a child
GetClientRect(window, &rc);
pVideoWindow->SetWindowPosition(0, 0, rc.right, rc.bottom); // be this big
pVideoWindow->put_Visible(OATRUE);
}*/
}
}
BOOL CWebcam::StartCamera() {
if(m_bIsStreaming == FALSE)
{
m_bIsStreaming = TRUE;
hr = m_pMediaController->Run();
if(FAILED(hr))
{
// stop parts that ran
m_pMediaController->Stop();
return FALSE;
}
return TRUE;
}
return FALSE;
}
void CWebcam::EndCamera() {
if(m_bIsStreaming)
{
hr = m_pMediaController->Stop();
m_bIsStreaming = FALSE;
//invalidate client rect as well so that it must redraw
}
}
BOOL CWebcam::CaptureToTexture() {
return TRUE;
}
HRESULT CWebcam::InitCaptureGraphBuilder(
IGraphBuilder **ppGraph, // Receives the pointer.
ICaptureGraphBuilder2 **ppBuild // Receives the pointer.
)
{
if (!ppGraph || !ppBuild)
{
return E_POINTER;
}
IGraphBuilder *pGraph = NULL;
ICaptureGraphBuilder2 *pBuild = NULL;
// Create the Capture Graph Builder.
HRESULT hr = CoCreateInstance(CLSID_CaptureGraphBuilder2, NULL,
CLSCTX_INPROC_SERVER, IID_ICaptureGraphBuilder2, (void**)&pBuild );
if (SUCCEEDED(hr))
{
// Create the Filter Graph Manager.
hr = CoCreateInstance(CLSID_FilterGraph, 0, CLSCTX_INPROC_SERVER,
IID_IGraphBuilder, (void**)&pGraph);
if (SUCCEEDED(hr))
{
// Initialize the Capture Graph Builder.
pBuild->SetFiltergraph(pGraph);
// Return both interface pointers to the caller.
*ppBuild = pBuild;
*ppGraph = pGraph; // The caller must release both interfaces.
return S_OK;
}
else
{
pBuild->Release();
}
}
return hr; // Failed
}
HRESULT CWebcam::EnumerateDevices(REFGUID category, IEnumMoniker **ppEnum)
{
// Create the System Device Enumerator.
ICreateDevEnum *pSystemDeviceEnumerator;
HRESULT hr = CoCreateInstance(CLSID_SystemDeviceEnum, NULL,
CLSCTX_INPROC_SERVER, IID_PPV_ARGS(&pSystemDeviceEnumerator));
if (SUCCEEDED(hr))
{
// Create an enumerator for the category.
hr = pSystemDeviceEnumerator->CreateClassEnumerator(category, ppEnum, 0);
if (hr == S_FALSE)
{
hr = VFW_E_NOT_FOUND; // The category is empty. Treat as an error.
}
pSystemDeviceEnumerator->Release();
}
return hr;
}
void CWebcam::DisplayDeviceInformation(IEnumMoniker *pEnum)
{
IMoniker *pMoniker = NULL;
int counter = 0;
while (pEnum->Next(1, &pMoniker, NULL) == S_OK)
{
IPropertyBag *pPropBag;
HRESULT hr = pMoniker->BindToStorage(0, 0, IID_PPV_ARGS(&pPropBag));
if (FAILED(hr))
{
pMoniker->Release();
continue;
}
VARIANT var;
VariantInit(&var);
// Get description or friendly name.
hr = pPropBag->Read(L"Description", &var, 0);
if (FAILED(hr))
{
hr = pPropBag->Read(L"FriendlyName", &var, 0);
}
if (SUCCEEDED(hr))
{
printf("%d: %S\n", counter, var.bstrVal);
VariantClear(&var);
}
hr = pPropBag->Write(L"FriendlyName", &var);
// WaveInID applies only to audio capture devices.
hr = pPropBag->Read(L"WaveInID", &var, 0);
if (SUCCEEDED(hr))
{
printf("%d: WaveIn ID: %d\n", counter, var.lVal);
VariantClear(&var);
}
hr = pPropBag->Read(L"DevicePath", &var, 0);
if (SUCCEEDED(hr))
{
// The device path is not intended for display.
printf("%d: Device path: %S\n", counter, var.bstrVal);
VariantClear(&var);
}
pPropBag->Release();
pMoniker->Release();
counter++;
}
}
HRESULT CWebcam::InitializeVMR(
HWND hwndApp, // Application window.
IGraphBuilder* pFG, // Pointer to the Filter Graph Manager.
IVMRWindowlessControl** ppWc, // Receives the interface.
DWORD dwNumStreams, // Number of streams to use.
BOOL fBlendAppImage // Are we alpha-blending a bitmap?
)
{
IBaseFilter* pVmr = NULL;
IVMRWindowlessControl* pWc = NULL;
*ppWc = NULL;
// Create the VMR and add it to the filter graph.
HRESULT hr = CoCreateInstance(CLSID_VideoMixingRenderer, NULL,
CLSCTX_INPROC, IID_IBaseFilter, (void**)&pVmr);
if (FAILED(hr))
{
return hr;
}
hr = pFG->AddFilter(pVmr, L"Video Mixing Renderer");
if (FAILED(hr))
{
pVmr->Release();
return hr;
}
// Set the rendering mode and number of streams.
IVMRFilterConfig* pConfig;
hr = pVmr->QueryInterface(IID_IVMRFilterConfig, (void**)&pConfig);
if (SUCCEEDED(hr))
{
pConfig->SetRenderingMode(VMRMode_Windowless);
// Set the VMR-7 to mixing mode if you want more than one video
// stream, or you want to mix a static bitmap over the video.
// (The VMR-9 defaults to mixing mode with four inputs.)
if (dwNumStreams > 1 || fBlendAppImage)
{
pConfig->SetNumberOfStreams(dwNumStreams);
}
pConfig->Release();
hr = pVmr->QueryInterface(IID_IVMRWindowlessControl, (void**)&pWc);
if (SUCCEEDED(hr))
{
pWc->SetVideoClippingWindow(hwndApp);
*ppWc = pWc; // The caller must release this interface.
}
}
pVmr->Release();
// Now the VMR can be connected to other filters.
return hr;
}
In windowless mode VMR would not create separate window. Since you started initialization for widnowless mode, you have to follow SetVideoClippingWindow with IVMRWindowlessControl::SetVideoPosition call to provide position within the window, see VMR Windowless Mode on MSDN.
Another sample code snippet for you: http://www.assembla.com/code/roatl-utilities/subversion/nodes/trunk/FullScreenWindowlessVmrSample01/MainDialog.h#ln188