I am having a big issue getting raw audio data from the SampleGrabber GraphFilter and writing it to an audio file in the proper format.
What i'm trying to achieve is:
grabbing the data from: Microphone->SampleGrabber->Null Rendrer
Write the raw data captured to a proper audio file (MP3\AVI\WAVE)
Some questions:
Am I setting the proper MediaType for the SampleGrabber?
How can you compress the audio data to a proper audio file?
My current code below:
struct sRecortdingData {
std::vector<BYTE> recordingData;
double totalRecordingTime;
};
class CFakeCallback : public ISampleGrabberCB
{
public:
sRecortdingData sRecording;
STDMETHODIMP_(ULONG) AddRef() { return 2; }
STDMETHODIMP_(ULONG) Release() { return 1; }
STDMETHODIMP_(HRESULT __stdcall) QueryInterface(REFIID riid, void** ppv)
{
if (riid == IID_ISampleGrabberCB || riid == IID_IUnknown)
{
*ppv = (void*) static_cast<ISampleGrabberCB*>(this);
return NOERROR;
}
return E_NOINTERFACE;
}
STDMETHODIMP_(HRESULT __stdcall) SampleCB(double SampleTime, IMediaSample* pSample)
{
return S_OK;
}
STDMETHODIMP_(HRESULT __stdcall) BufferCB(double SampleTime, BYTE* pBuffer, long BufferLen)
{
sRecording.recordingData.push_back(*pBuffer);
sRecording.totalRecordingTime = SampleTime;
return S_OK;
}
};
void Microphone::RecordAudio()
{
HRESULT hr;
AM_MEDIA_TYPE mt;
long recordingEventCode;
IMediaControl* pMediaControl = NULL;
ISampleGrabber* pISampleGrabber = NULL;
IBaseFilter* pSampleGrabberFilter;
IGraphBuilder* pGraphBuilder = NULL;
IMediaEventEx* pEvent = NULL;
IBaseFilter* pMicrophoneFilter = NULL;
ISampleGrabber* pSampleGrabber = NULL;
ICreateDevEnum* pDeviceEnum = NULL;
IBaseFilter* pNullRender = NULL;
hr = CoInitializeEx(NULL, COINIT_APARTMENTTHREADED);
if (FAILED(hr))
{
HR_Failed(hr);
return;
}
// Filter graph
hr = CoCreateInstance(CLSID_FilterGraph, NULL, CLSCTX_INPROC_SERVER, IID_PPV_ARGS(&pGraphBuilder));
if (FAILED(hr))
{
HR_Failed(hr);
return;
}
// Device enum
hr = CoCreateInstance(CLSID_SystemDeviceEnum, NULL, CLSCTX_INPROC_SERVER, IID_PPV_ARGS(&pDeviceEnum));
if (FAILED(hr))
{
HR_Failed(hr);
return;
}
// Null renderer
hr = CoCreateInstance(CLSID_NullRenderer, NULL, CLSCTX_INPROC_SERVER, IID_PPV_ARGS(&pNullRender));
if (FAILED(hr))
{
HR_Failed(hr);
return;
}
// Get the even control
hr = pGraphBuilder->QueryInterface(IID_PPV_ARGS(&pEvent));
if (FAILED(hr))
{
HR_Failed(hr);
return;
}
// Sample Grabber
hr = CoCreateInstance(CLSID_SampleGrabber, NULL, CLSCTX_INPROC_SERVER, IID_PPV_ARGS(&pSampleGrabberFilter));
if (FAILED(hr))
{
HR_Failed(hr);
return;
}
// Get Media Control
hr = pGraphBuilder->QueryInterface(IID_PPV_ARGS(&pMediaControl));
if (FAILED(hr))
{
HR_Failed(hr);
return;
}
// Setup input device filter
pMicrophoneFilter = Microphone::SetupDeviceFilter(CLSID_AudioInputDeviceCategory, pGraphBuilder, pDeviceEnum,
Utils::s2ws("My Microphone");
if (pMicrophoneFilter == NULL) {
HR_Failed(hr);
return;
}
// Setup sample grabber filter
pSampleGrabberFilter = Microphone::SetupDeviceFilter(CLSID_LegacyAmFilterCategory,
pGraphBuilder, pDeviceEnum, L"SampleGrabber");
if (pSampleGrabberFilter == NULL) {
HR_Failed(hr);
return;
}
// Connect both pins together
Device_Connect(pMicrophoneFilter, pSampleGrabberFilter);
// Setup null renderer filter
pNullRender = Microphone::SetupDeviceFilter(CLSID_LegacyAmFilterCategory,
pGraphBuilder, pDeviceEnum, L"Null Renderer");
if (pNullRender == NULL) {
HR_Failed(hr);
return;
}
// Connect both pins together
Device_Connect(pSampleGrabberFilter, pNullRender);
// Get the ISampleGranner interface
hr = pSampleGrabberFilter->QueryInterface(IID_ISampleGrabber, (LPVOID*)&pISampleGrabber);
if (FAILED(hr))
{
HR_Failed(hr);
return;
}
hr = pISampleGrabber->SetCallback(&CB, 1);
ZeroMemory(&mt, sizeof(mt));
mt.majortype = MEDIATYPE_Audio;
mt.subtype = MEDIASUBTYPE_PCM;
// Set the media type
hr = pISampleGrabber->SetMediaType(&mt);
if (FAILED(hr))
{
HR_Failed(hr);
return;
}
pISampleGrabber->SetBufferSamples(FALSE);
if (FAILED(hr))
{
HR_Failed(hr);
return;
}
hr = pMediaControl->Run();
if (FAILED(hr))
{
HR_Failed(hr);
return;
}
DWORD recordingTime = 20000;
hr = pEvent->WaitForCompletion(recordingTime, &recordingEventCode);
std::string str(CB.sRecording.recordingData.begin(), CB.sRecording.recordingData.end());
// Do something with the data from callback and write it to audio file
}
I have such an implementation
void coAudioPlayerSampleGrabber::test(SoundDataType dataType,
unsigned char const * pData,
int64_t dataLen)
{
HeapSetInformation(NULL, HeapEnableTerminationOnCorruption, NULL, 0);
IMFSourceReader *pReader = NULL;
IMFByteStream * spByteStream = NULL;
HRESULT hr = S_OK;
// Initialize the COM library.
hr = CoInitializeEx(NULL, COINIT_APARTMENTTHREADED | COINIT_DISABLE_OLE1DDE);
// Initialize the Media Foundation platform.
if (SUCCEEDED(hr))
{
hr = MFStartup(MF_VERSION);
}
hr = MFCreateMFByteStreamOnStreamEx((IUnknown*)pData, &spByteStream);
if (FAILED(hr))
{
printf("Error MFCreateMFByteStreamOnStreamEx");
}
IMFAttributes * Atrr = NULL;
hr = MFCreateAttributes(&Atrr, 10);
if (FAILED(hr))
{
printf("Error MFCreateAttributes");
}
hr = Atrr->SetUINT32(MF_READWRITE_ENABLE_HARDWARE_TRANSFORMS, true);
if (FAILED(hr))
{
printf("Error Atrr->SetUINT32(MF_READWRITE_ENABLE_HARDWARE_TRANSFORMS, true)");
}
hr = MFCreateSourceReaderFromByteStream(spByteStream, Atrr, &pReader);
if (FAILED(hr))
{
printf("Error MFCreateSourceReaderFromByteStream");
}
if (FAILED(hr))
{
printf("Error opening input file");
}
IMFMediaType *pAudioType = NULL; // Represents the PCM audio format.
hr = ConfigureAudioStream(dataType, pReader, &pAudioType);
if (FAILED(hr))
{
printf("Error ConfigureAudioStream");
}
IMFSample *pSample = NULL;
IMFMediaBuffer *pBuffer = NULL;
BYTE *pAudioData = NULL;
DWORD cbBuffer = 0;
std::vector<SampleData> samples_vec;
while (true)
{
DWORD dwFlags = 0;
hr = pReader->ReadSample((DWORD)MF_SOURCE_READER_FIRST_AUDIO_STREAM, 0, NULL, &dwFlags, NULL, &pSample);
if (FAILED(hr)) { break; }
if (dwFlags & MF_SOURCE_READERF_CURRENTMEDIATYPECHANGED)
{
printf("Type change - not supported by WAVE file format.\n");
break;
}
if (dwFlags & MF_SOURCE_READERF_ENDOFSTREAM)
{
printf("End of input file.\n");
break;
}
hr = pSample->ConvertToContiguousBuffer(&pBuffer);
if (FAILED(hr)) { break; }
hr = pBuffer->Lock(&pAudioData, NULL, &cbBuffer);
if (FAILED(hr)) { break; }
//Do something with the pAudioData which is an array of unsigned chars of lenth cbBuffer
SampleData tmp;
tmp.pAudioData = new byte[cbBuffer];
memcpy(tmp.pAudioData, pAudioData, cbBuffer);
tmp.cbBuffer = cbBuffer;
samples_vec.push_back(tmp);
// Unlock the buffer.
hr = pBuffer->Unlock();
pAudioData = NULL;
if (FAILED(hr)) { break; }
}
SafeRelease(&pReader);
SafeRelease(&pSample);
SafeRelease(&pBuffer);
SafeRelease(&spByteStream);
SafeRelease(&Atrr);
// Shut down Media Foundation.
MFShutdown();
CoUninitialize();
}
So as you can see I have pointer to data and size this is actually my data I need to decode it. Problem is that here
hr = MFCreateMFByteStreamOnStreamEx((IUnknown*)pData, &spByteStream);
I got an error access violation and as far as I see this is because I try to convert pData to IUnknown*. Question is - How to convert it right?
You can't cut corners like this:
unsigned char const * pData;
...
hr = MFCreateMFByteStreamOnStreamEx((IUnknown*)pData, &spByteStream);
IUnknown is not yet another fancy alias for a byte. You are supposed to literally supply interface pointer representing stream, as documented.
Media Foundation does offer you means to read from memory bytes. You need to create a create a real stream, IStream or IRandomAccessStream or IMFByteStream per docuemntation. Also supply IMFAttributes you created with proper attributes to specify data type (which otherwise in the case of a file are derived from extension or MIME type) and then Source Reader API would be able to process memory bytes as source of media file data, and suitable decoder would decode audio into PCM data (similar to this).
Something you can do real quick: CreateStreamOnHGlobal to create IStream implementation and copy your bytes into underlying buffer (see docs). Then MFCreateMFByteStreamOnStream would create a IMFByteStream wrappr over it, and you can use this wrapper as MFCreateSourceReaderFromByteStream argument.
I used to use a VectorStream class for such stuff. Some functions are not implemented, but you should get the basic idea.
class VectorStream : public IStream
{
public:
bool ReadOnly = false;
ULONG r = 1;
std::vector<char> d;
size_t p = 0;
VectorStream()
{
}
void Clear()
{
d.clear();
p = 0;
}
// IUnknown
virtual HRESULT STDMETHODCALLTYPE QueryInterface(
/* [in] */ REFIID riid,
/* [iid_is][out] */ _COM_Outptr_ void __RPC_FAR* __RPC_FAR* ppvObject)
{
if (riid == __uuidof(IUnknown) || riid == __uuidof(IStream))
{
*ppvObject = (IStream*)this;
r++;
return S_OK;
}
return E_NOINTERFACE;
}
virtual ULONG STDMETHODCALLTYPE AddRef(void)
{
return ++r;
}
virtual ULONG STDMETHODCALLTYPE Release(void)
{
return --r;
}
HRESULT __stdcall Clone(
IStream** ppstm
)
{
return E_NOTIMPL;
}
HRESULT __stdcall Commit(
DWORD grfCommitFlags
)
{
return S_OK;
}
HRESULT __stdcall CopyTo(
IStream* pstm,
ULARGE_INTEGER cb,
ULARGE_INTEGER* pcbRead,
ULARGE_INTEGER* pcbWritten
)
{
return E_NOINTERFACE;
}
HRESULT __stdcall LockRegion(
ULARGE_INTEGER libOffset,
ULARGE_INTEGER cb,
DWORD dwLockType
)
{
return S_OK;
}
HRESULT __stdcall UnlockRegion(
ULARGE_INTEGER libOffset,
ULARGE_INTEGER cb,
DWORD dwLockType
)
{
return S_OK;
}
HRESULT __stdcall Revert()
{
return E_NOTIMPL;
}
HRESULT __stdcall Seek(
LARGE_INTEGER dlibMove,
DWORD dwOrigin,
ULARGE_INTEGER* plibNewPosition
)
{
LARGE_INTEGER lo = { 0 };
if (dwOrigin == STREAM_SEEK_SET)
{
p = dlibMove.QuadPart;
}
if (dwOrigin == STREAM_SEEK_CUR)
{
p += dlibMove.QuadPart;
}
if (dwOrigin == STREAM_SEEK_END)
{
p = d.size() - dlibMove.QuadPart;
}
if (p >= d.size())
p = d.size();
if (plibNewPosition)
plibNewPosition->QuadPart = p;
return S_OK;
}
HRESULT __stdcall SetSize(
ULARGE_INTEGER libNewSize
)
{
d.resize(libNewSize.QuadPart);
return S_OK;
}
int eb = 0;
HRESULT __stdcall Stat(
STATSTG* pstatstg,
DWORD grfStatFlag
)
{
pstatstg->type = STGTY_STREAM;
pstatstg->cbSize.QuadPart = d.size();
pstatstg->grfLocksSupported = true;
return S_OK;
}
unsigned long long readbytes = 0;
HRESULT __stdcall Read(
void* pv,
ULONG cb,
ULONG* pcbRead
)
{
auto av = d.size() - p;
if (cb < av)
av = cb;
memcpy(pv, d.data() + p, av);
p += av;
if (pcbRead)
*pcbRead = (ULONG)av;
// if (av < cb)
// return S_FALSE;
return S_OK;
}
HRESULT __stdcall Write(
const void* pv,
ULONG cb,
ULONG* pcbWritten
)
{
if (ReadOnly)
return STG_E_ACCESSDENIED;
if (d.size() < (p + cb))
{
auto exc = (p + cb) - d.size();
d.resize(d.size() + exc);
}
memcpy(d.data() + p, pv, cb);
p += cb;
if (pcbWritten)
*pcbWritten = cb;
return S_OK;
}
};
It encapsulates std::vector<> in a IStream.
I have created a sample based on this MS link:
https://learn.microsoft.com/en-us/windows/win32/directshow/using-the-sample-grabber
Code appears below.
But the code only grabs one frame. I think the very first frame in the video But I want to grab every frame available in the video file. How do I do that?
/*
for this sample to work you have to have
"C:\\temp\\FlickAnimation.avi"
and it will output the first frame to grab1.bmp
work out how to do all frames.
qedit.h from here:
https://social.msdn.microsoft.com/Forums/windowsdesktop/en-US/2ab5c212-5824-419d-b5d9-7f5db82f57cd/qedith-missing-in-current-windows-sdk-v70?forum=windowsdirectshowdevelopment
dshowutil.h from here:
https://raw.githubusercontent.com/microsoft/Windows-classic-samples/master/Samples/Win7Samples/multimedia/directshow/common/dshowutil.h
*/
#include <windows.h>
#include <dshow.h> // DirectShow main header
#include "qedit.h" // from Microsoft
#include <strmif.h> // for IMediaSample
#include <combaseapi.h> // IID_PPV_ARGS
#include "dshowutil.h" // from Microsoft
#pragma comment(lib, "strmiids.lib")
#pragma comment(lib, "dxguid.lib")
template <class T> void SafeRelease(T **ppT)
{
if (*ppT)
{
(*ppT)->Release();
*ppT = NULL;
}
}
HRESULT WriteBitmap(PCWSTR, BITMAPINFOHEADER*, size_t, BYTE*, size_t);
HRESULT GrabVideoBitmap(PCWSTR pszVideoFile, PCWSTR pszBitmapFile)
{
IGraphBuilder *pGraph = NULL;
IMediaControl *pControl = NULL;
IMediaEventEx *pEvent = NULL;
IBaseFilter *pGrabberF = NULL;
ISampleGrabber *pGrabber = NULL;
IBaseFilter *pSourceF = NULL;
IEnumPins *pEnum = NULL;
IPin *pPin = NULL;
IBaseFilter *pNullF = NULL;
BYTE *pBuffer = NULL;
HRESULT hr = CoCreateInstance(CLSID_FilterGraph, NULL,
CLSCTX_INPROC_SERVER, IID_PPV_ARGS(&pGraph));
if (FAILED(hr))
{
goto done;
}
hr = pGraph->QueryInterface(IID_PPV_ARGS(&pControl));
if (FAILED(hr))
{
goto done;
}
hr = pGraph->QueryInterface(IID_PPV_ARGS(&pEvent));
if (FAILED(hr))
{
goto done;
}
// Create the Sample Grabber filter.
hr = CoCreateInstance(CLSID_SampleGrabber, NULL, CLSCTX_INPROC_SERVER,
IID_PPV_ARGS(&pGrabberF));
if (FAILED(hr))
{
goto done;
}
hr = pGraph->AddFilter(pGrabberF, L"Sample Grabber");
if (FAILED(hr))
{
goto done;
}
hr = pGrabberF->QueryInterface(IID_PPV_ARGS(&pGrabber));
if (FAILED(hr))
{
goto done;
}
AM_MEDIA_TYPE mt;
ZeroMemory(&mt, sizeof(mt));
mt.majortype = MEDIATYPE_Video;
mt.subtype = MEDIASUBTYPE_RGB24;
hr = pGrabber->SetMediaType(&mt);
if (FAILED(hr))
{
goto done;
}
hr = pGraph->AddSourceFilter(pszVideoFile, L"Source", &pSourceF);
if (FAILED(hr))
{
goto done;
}
hr = pSourceF->EnumPins(&pEnum);
if (FAILED(hr))
{
goto done;
}
while (S_OK == pEnum->Next(1, &pPin, NULL))
{
hr = ConnectFilters(pGraph, pPin, pGrabberF);
SafeRelease(&pPin);
if (SUCCEEDED(hr))
{
break;
}
}
if (FAILED(hr))
{
goto done;
}
hr = CoCreateInstance(CLSID_NullRenderer, NULL, CLSCTX_INPROC_SERVER,
IID_PPV_ARGS(&pNullF));
if (FAILED(hr))
{
goto done;
}
hr = pGraph->AddFilter(pNullF, L"Null Filter");
if (FAILED(hr))
{
goto done;
}
hr = ConnectFilters(pGraph, pGrabberF, pNullF);
if (FAILED(hr))
{
goto done;
}
hr = pGrabber->SetOneShot(FALSE); // TRUE);
if (FAILED(hr))
{
goto done;
}
hr = pGrabber->SetBufferSamples(TRUE);
if (FAILED(hr))
{
goto done;
}
hr = pControl->Run();
if (FAILED(hr))
{
goto done;
}
long evCode;
hr = pEvent->WaitForCompletion(INFINITE, &evCode);
// Find the required buffer size.
long cbBuffer;
hr = pGrabber->GetCurrentBuffer(&cbBuffer, NULL);
if (FAILED(hr))
{
goto done;
}
pBuffer = (BYTE*)CoTaskMemAlloc(cbBuffer);
if (!pBuffer)
{
hr = E_OUTOFMEMORY;
goto done;
}
hr = pGrabber->GetCurrentBuffer(&cbBuffer, (long*)pBuffer);
if (FAILED(hr))
{
goto done;
}
hr = pGrabber->GetConnectedMediaType(&mt);
if (FAILED(hr))
{
goto done;
}
// Examine the format block.
if ((mt.formattype == FORMAT_VideoInfo) &&
(mt.cbFormat >= sizeof(VIDEOINFOHEADER)) &&
(mt.pbFormat != NULL))
{
VIDEOINFOHEADER *pVih = (VIDEOINFOHEADER*)mt.pbFormat;
hr = WriteBitmap(pszBitmapFile, &pVih->bmiHeader,
mt.cbFormat - SIZE_PREHEADER, pBuffer, cbBuffer);
}
else
{
// Invalid format.
hr = VFW_E_INVALIDMEDIATYPE;
}
FreeMediaType(mt);
done:
CoTaskMemFree(pBuffer);
SafeRelease(&pPin);
SafeRelease(&pEnum);
SafeRelease(&pNullF);
SafeRelease(&pSourceF);
SafeRelease(&pGrabber);
SafeRelease(&pGrabberF);
SafeRelease(&pControl);
SafeRelease(&pEvent);
SafeRelease(&pGraph);
return hr;
};
// Writes a bitmap file
// pszFileName: Output file name.
// pBMI: Bitmap format information (including pallete).
// cbBMI: Size of the BITMAPINFOHEADER, including palette, if present.
// pData: Pointer to the bitmap bits.
// cbData Size of the bitmap, in bytes.
HRESULT WriteBitmap(PCWSTR pszFileName, BITMAPINFOHEADER *pBMI, size_t cbBMI,
BYTE *pData, size_t cbData)
{
HANDLE hFile = CreateFile(pszFileName, GENERIC_WRITE, 0, NULL,
CREATE_ALWAYS, 0, NULL);
if (hFile == NULL)
{
return HRESULT_FROM_WIN32(GetLastError());
}
BITMAPFILEHEADER bmf = {};
bmf.bfType = 0x4d42; // 0x42 = "B" 0x4d = "M"
bmf.bfSize = cbBMI + cbData + sizeof(bmf);
bmf.bfOffBits = sizeof(bmf) + cbBMI;
DWORD cbWritten = 0;
BOOL result = WriteFile(hFile, &bmf, sizeof(bmf), &cbWritten, NULL);
if (result)
{
result = WriteFile(hFile, pBMI, cbBMI, &cbWritten, NULL);
}
if (result)
{
result = WriteFile(hFile, pData, cbData, &cbWritten, NULL);
}
HRESULT hr = result ? S_OK : HRESULT_FROM_WIN32(GetLastError());
CloseHandle(hFile);
return hr;
}
int main() {
// Initialize the COM library.
HRESULT hr = CoInitialize(NULL);
if (FAILED(hr))
{
printf("ERROR - Could not initialize COM library");
return 1;
}
GrabVideoBitmap(L"C:\\temp\\FlickAnimation.avi", L"grab1.bmp");
CoUninitialize();
}
Idea behind solution:
Try opening the video with OpenCV, then use it's helper functions to read it frame-by-frame, saving the frame to images.
Code sample:
#include "opencv2/opencv.hpp"
#include <iostream>
using namespace std;
using namespace cv;
int main(){
// Create a VideoCapture object and open the input file
// If the input is the web camera, pass 0 instead of the video file name
VideoCapture cap("chaplin.mp4");
// Check if camera opened successfully
if(!cap.isOpened()){
cout << "Error opening video stream or file" << endl;
return -1;
}
int counter = 0;
while(1){
Mat frame;
// Capture frame-by-frame
cap >> frame;
// If the frame is empty, break immediately
if (frame.empty())
break;
// Display the resulting frame
//imshow( "Frame", frame );
//Save the resulting frame
imwrite( "FilePathAndName" + std::to_string( counter ), frame );
counter++;
// Press ESC on keyboard to exit
char c=(char)waitKey(25);
if(c==27)
break;
}
// When everything done, release the video capture object
cap.release();
// Closes all the frames
destroyAllWindows();
return 0;
}
Hope this helps! ;)
i am using the directshow to control the camera settings and using open cv i am capturing the images..but my problem is when i capture the images the images settings which i give changes after 2 or 3 captures and turns to a default value...i need this for my college project,,,i have given my code below...i always want a image with same camera settings...the solution given by you will be highly helpfull because i am completely new to this..
#include <stdio.h>
#include <atlstr.h>
#include <dshow.h>
#include <opencv2\highgui\highgui.hpp>
#include <opencv2\imgproc\imgproc.hpp>
#include <opencv2\core\core.hpp>
#include <opencv\cv.h>
#include <iostream>
#include <streams.h>
CFactoryTemplate g_Templates[1];
int g_cTemplates;
void setCameraMode(ICaptureGraphBuilder2 *pCaptureGraphBuilder2, IAMStreamConfig *pConfig, IBaseFilter *pDeviceFilter, HRESULT hr)
{
// Set res, frame rate, and color mode
hr = CoInitialize(0);
hr = pCaptureGraphBuilder2->FindInterface(&PIN_CATEGORY_CAPTURE, 0, pDeviceFilter, IID_IAMStreamConfig, (void**)&pConfig);
int iCount = 0, iSize = 0;
hr = pConfig->GetNumberOfCapabilities(&iCount, &iSize);
// Check the size to make sure we pass in the correct structure.
if (iSize == sizeof(VIDEO_STREAM_CONFIG_CAPS))
{
// Use the video capabilities structure.
for (int iFormat = 0; iFormat < iCount; iFormat++)
{
VIDEO_STREAM_CONFIG_CAPS scc;
AM_MEDIA_TYPE *pmtConfig;
hr = pConfig->GetStreamCaps(iFormat, &pmtConfig, (BYTE*)&scc);
if (SUCCEEDED(hr))
{
if ((pmtConfig->majortype == MEDIATYPE_Video)) //&&
//(pmtConfig->subtype == MEDIASUBTYPE_RGB24))
{
VIDEOINFOHEADER *pVih = (VIDEOINFOHEADER*)pmtConfig->pbFormat;
// pVih contains the detailed format information.
LONG lWidth = pVih->bmiHeader.biWidth;
LONG lHeight = pVih->bmiHeader.biHeight;
pVih->bmiHeader.biWidth = 160;
pVih->bmiHeader.biHeight = 120;
pVih->bmiHeader.biSizeImage = DIBSIZE(pVih->bmiHeader);
// pVih->AvgTimePerFrame = 10000000;
}
}
hr = pConfig->SetFormat(pmtConfig);
hr = pConfig->GetStreamCaps(iFormat, &pmtConfig, (BYTE*)&scc);
//DeleteMediaType(pmtConfig);
}
}
}
void setCameraControl(IBaseFilter *pDeviceFilter, HRESULT hr, int exposure, int focus)
{
// Query the capture filter for the IAMCameraControl interface.
IAMCameraControl *pCameraControl = 0;
hr = pDeviceFilter->QueryInterface(IID_IAMCameraControl, (void**)&pCameraControl);
if (FAILED(hr))
{
// The device does not support IAMCameraControl
}
else
{
long Min, Max, Step, Default, Flags, Val;
// Get the range and default values
hr = pCameraControl->GetRange(CameraControl_Exposure, &Min, &Max, &Step, &Default, &Flags);
hr = pCameraControl->GetRange(CameraControl_Focus, &Min, &Max, &Step, &Default, &Flags);
if (SUCCEEDED(hr))
{
hr = pCameraControl->Set(CameraControl_Exposure, -10, CameraControl_Flags_Manual );
// Min = -11, Max = 1, Step = 1
hr = pCameraControl->Set(CameraControl_Focus, focus, CameraControl_Flags_Manual );
}
}
}
void setCameraProperties(IBaseFilter *pDeviceFilter, HRESULT hr, int brightness, int backLightCompensation, int contrast, int saturation, int sharpness, int whiteBalance)
{
// Query the capture filter for the IAMVideoProcAmp interface.
IAMVideoProcAmp *pProcAmp = 0;
hr = pDeviceFilter->QueryInterface(IID_IAMVideoProcAmp, (void**)&pProcAmp);
if (FAILED(hr))
{
// The device does not support IAMVideoProcAmp
}
else
{
long Min, Max, Step, Default, Flags, Val;
// Get the range and default values
hr = pProcAmp->GetRange(VideoProcAmp_Brightness, &Min, &Max, &Step, &Default, &Flags);
hr = pProcAmp->GetRange(VideoProcAmp_BacklightCompensation, &Min, &Max, &Step, &Default, &Flags);
hr = pProcAmp->GetRange(VideoProcAmp_Contrast, &Min, &Max, &Step, &Default, &Flags);
hr = pProcAmp->GetRange(VideoProcAmp_Saturation, &Min, &Max, &Step, &Default, &Flags);
hr = pProcAmp->GetRange(VideoProcAmp_Sharpness, &Min, &Max, &Step, &Default, &Flags);
hr = pProcAmp->GetRange(VideoProcAmp_WhiteBalance, &Min, &Max, &Step, &Default, &Flags);
if (SUCCEEDED(hr))
{
hr = pProcAmp->Set(VideoProcAmp_Brightness,100, VideoProcAmp_Flags_Manual);
hr = pProcAmp->Set(VideoProcAmp_BacklightCompensation, 0, VideoProcAmp_Flags_Manual);
hr = pProcAmp->Set(VideoProcAmp_Contrast, 20 , VideoProcAmp_Flags_Manual);
hr = pProcAmp->Set(VideoProcAmp_Saturation,50, VideoProcAmp_Flags_Manual);
hr = pProcAmp->Set(VideoProcAmp_Sharpness, 0, VideoProcAmp_Flags_Manual);
hr = pProcAmp->Set(VideoProcAmp_WhiteBalance, 0, VideoProcAmp_Flags_Manual);
}
}
}
//given in the example program
IPin *GetPin(IBaseFilter *pFilter, PIN_DIRECTION PinDir)
{
BOOL bFound = FALSE;
IEnumPins *pEnum;
IPin *pPin;
pFilter->EnumPins(&pEnum);
while(pEnum->Next(1, &pPin, 0) == S_OK)
{
PIN_DIRECTION PinDirThis;
pPin->QueryDirection(&PinDirThis);
if (bFound = (PinDir == PinDirThis))
break;
pPin->Release();
}
pEnum->Release();
return (bFound ? pPin : 0);
}
int main()
{
// for playing
IGraphBuilder *pGraphBuilder;
ICaptureGraphBuilder2 *pCaptureGraphBuilder2;
IMediaControl *pMediaControl = NULL;
IMediaEventEx *pEvent = NULL;
// multiple cameras
IBaseFilter *pDeviceFilter_0 = NULL;
IBaseFilter *m_pGrabber_0 = NULL;
ISampleGrabber *m_pGrabberSettings_0 = NULL;
// select camera
ICreateDevEnum *pCreateDevEnum = NULL;
IEnumMoniker *pEnumMoniker = NULL;
IMoniker *pMoniker = NULL;
ULONG nFetched = 0;
// initialize COM
CoInitialize(NULL);
// selecting a device
// Create CreateDevEnum to list device
std::string USB1 = "\\\\?\\usb#vid_045e&pid_076d&mi_00#7&1ba27d43&0&0000#{65e8773d-8f56-11d0-a3b9-00a0c9223196}\\global";
CoCreateInstance(CLSID_SystemDeviceEnum, NULL, CLSCTX_INPROC_SERVER, IID_ICreateDevEnum, (PVOID *)&pCreateDevEnum);
// Create EnumMoniker to list VideoInputDevice
pCreateDevEnum->CreateClassEnumerator(CLSID_VideoInputDeviceCategory, &pEnumMoniker, 0);
if (pEnumMoniker == NULL) {
// this will be shown if there is no capture device
printf("no device\n");
return 0;
}
// reset EnumMoniker
pEnumMoniker->Reset();
// get each Moniker
while (pEnumMoniker->Next(1, &pMoniker, &nFetched) == S_OK)
{
IPropertyBag *pPropertyBag;
TCHAR devname[256];
TCHAR devpath[256];
// bind to IPropertyBag
pMoniker->BindToStorage(0, 0, IID_IPropertyBag, (void **)&pPropertyBag);
VARIANT var;
// get FriendlyName
var.vt = VT_BSTR;
pPropertyBag->Read(L"FriendlyName", &var, 0);
WideCharToMultiByte(CP_ACP, 0, var.bstrVal, -1, devname, sizeof(devname), 0, 0);
VariantClear(&var);
// get DevicePath
// DevicePath : A unique string
var.vt = VT_BSTR;
pPropertyBag->Read(L"DevicePath", &var, 0);
WideCharToMultiByte(CP_ACP, 0, var.bstrVal, -1, devpath, sizeof(devpath), 0, 0);
std::string devpathString = devpath;
pMoniker->BindToObject(0, 0, IID_IBaseFilter, (void**)&pDeviceFilter_0 );
pMoniker->Release();
pPropertyBag->Release();
if (pDeviceFilter_0 == NULL)
{
MessageBox(NULL, "No MS HD-5000 cameras found", "No cameras", MB_OK);
return 0;
}
}
// create FilterGraph and CaptureGraphBuilder2
CoCreateInstance(CLSID_FilterGraph, NULL, CLSCTX_INPROC, IID_IGraphBuilder, (LPVOID *)&pGraphBuilder);
CoCreateInstance(CLSID_CaptureGraphBuilder2, NULL, CLSCTX_INPROC, IID_ICaptureGraphBuilder2, (LPVOID *)&pCaptureGraphBuilder2);
HRESULT hr = CoInitialize(0);
IAMStreamConfig *pConfig = NULL;
setCameraMode(pCaptureGraphBuilder2, pConfig, pDeviceFilter_0, hr); // FPS, Res, color mode
setCameraControl(pDeviceFilter_0, hr, 10 , 12); // Focus, exposure
setCameraProperties(pDeviceFilter_0, hr, 180, 0, 4, 100, 0, 2800); // Brightness, saturation, etc
// set grabber properties
AM_MEDIA_TYPE mt;
hr = CoCreateInstance(CLSID_SampleGrabber, NULL, CLSCTX_INPROC_SERVER, IID_IBaseFilter, (void**)&m_pGrabber_0); // create ISampleGrabber
pCaptureGraphBuilder2->SetFiltergraph(pGraphBuilder); // set FilterGraph
pGraphBuilder->QueryInterface(IID_IMediaControl, (LPVOID *)&pMediaControl); // get MediaControl interface
m_pGrabber_0->QueryInterface(IID_ISampleGrabber, (void**)&m_pGrabberSettings_0);
ZeroMemory(&mt, sizeof(AM_MEDIA_TYPE));
mt.majortype = MEDIATYPE_Video;
mt.subtype = MEDIASUBTYPE_RGB24;
hr = m_pGrabberSettings_0->SetMediaType(&mt);
if (FAILED(hr))
{
return hr;
}
hr = m_pGrabberSettings_0->SetOneShot(FALSE);
hr = m_pGrabberSettings_0->SetBufferSamples(TRUE);
// build filter graph
pGraphBuilder->AddFilter(pDeviceFilter_0, L"Device Filter");
pGraphBuilder->AddFilter(m_pGrabber_0, L"Sample Grabber");
IPin* pSourceOut_0 = GetPin(pDeviceFilter_0, PINDIR_OUTPUT);
IPin* pGrabberIn_0 = GetPin(m_pGrabber_0, PINDIR_INPUT);
pGraphBuilder->Connect(pSourceOut_0, pGrabberIn_0);
/*
pMediaControl->Run();
long pBufferSize;
unsigned char* pBuffer_0 = 0;
hr = m_pGrabberSettings_0->GetCurrentBuffer(&pBufferSize, NULL);
if (FAILED(hr))
{
return 0;
}
pBuffer_0 = (BYTE*)CoTaskMemAlloc(pBufferSize);
if (!pBuffer_0)
{
hr = E_OUTOFMEMORY;
return 0;
}
long pBufferSize = 0;
unsigned char* pBuffer_0 = 0;
long Size=0;
hr = m_pGrabberSettings_0->GetCurrentBuffer(&Size, NULL);
if (Size != pBufferSize)
{
pBufferSize = Size;
if (pBuffer_0 != 0)
{
delete[] pBuffer_0;
}
pBuffer_0= new unsigned char[pBufferSize];
}
long pBufferSize = 425;
unsigned char* pBuffer_0 = 0;
pBuffer_0 = new unsigned char[pBufferSize];
// start playing
pMediaControl->Run();
while (1) {
if (MessageBox(NULL, "Grab frame?", "Grab?", MB_OKCANCEL) == 2)
{
break;
}
hr = m_pGrabberSettings_0->GetCurrentBuffer(&pBufferSize,(long*)pBuffer_0);
Cleanup:
// convert to OpenCV format
IplImage* img_0 = cvCreateImage(cvSize(160,120),IPL_DEPTH_8U,3);
for (int i = 0; i < pBufferSize ; i++)
{
img_0->imageData[i] = pBuffer_0[i];
}
cvFlip(img_0, NULL, 0);
// show
// cvNamedWindow("mainWin_0", CV_WINDOW_AUTOSIZE);
// cvMoveWindow("mainWin_0", 100, 100);
cvShowImage("mainWin_0", img_0 );
cvSaveImage("c:\\users\\senthil\\desktop\\img.png",img_0 );
//cvWaitKey(0);
cvReleaseImage(&img_0 );
}
*/
pMediaControl->Run();
cvNamedWindow("Camera_Output", 1); //Create window
CvCapture* capture = cvCaptureFromCAM(0); //Capture using any camera connected to your system
while(1)
{
//Create infinte loop for live streaming
if (MessageBox(NULL, "Grab frame?", "Grab?", MB_OKCANCEL) == 2)
{
break;
}
IplImage* frame = cvQueryFrame(capture); //Create image frames from capture
cvShowImage("Camera_Output", frame); //Show image frames on created window
cvSaveImage("c:\\users\\senthil\\desktop\\img1.png",frame);
// cv::Mat img(frame);
// cv::imwrite("c:\\users\\selvaraj\\desktop\\img.png",img);
}
//std::cout << "FPS: " << fps << std::endl;
//std::cout << "PROP_BRIGHTNESS: " << PROP_BRIGHTNESS << std::endl;
//WriteComPort("COM3","A");
cvReleaseCapture(&capture); //Release capture.
cvDestroyWindow("Camera_Output"); //Destroy Window */
// release
pMediaControl->Release();
pCaptureGraphBuilder2->Release();
pGraphBuilder->Release();
pEnumMoniker->Release();
pCreateDevEnum->Release();
// finalize COM
CoUninitialize();
return 0;
}
i tried using the sample grabber also but it is also not usefull...help me to solve this code..
Are you talking about these settings:
setCameraMode(pCaptureGraphBuilder2, pConfig, pDeviceFilter_0, hr); // FPS, Res, color mode
setCameraControl(pDeviceFilter_0, hr, 10 , 12); // Focus, exposure
setCameraProperties(pDeviceFilter_0, hr, 180, 0, 4, 100, 0, 2800); // Brightness, saturation, etc