I'm trying to use Windows Desktop Duplication API to capture the screen and save the raw output to a video. I'm using AcquireNextFrame with a very high timeout value (999ms). This way I should get every new frame from windows as soon as it at has one, which naturally should be at 60fps anyway. I end up getting sequences where everything looks good (frame 6-11), and then sequences where things look bad (frame 12-14). If I check AccumulatedFrames
lFrameInfo.AccumulatedFrames
the value is often 2 or higher. From my understanding, this means windows is saying "hey hold up, I don't have a frame for you yet", because calls to AcquireNextFrame take so long. But once windows does finally give me a frame, it is saying "hey you were actually too slow and ended up missing a frame". If i could somehow get these frames I think I would be getting 60hz.
This can be further clarified with logging:
I0608 10:40:16.964375 4196 window_capturer_dd.cc:438] 206 - Frame 6 start acquire
I0608 10:40:16.973867 4196 window_capturer_dd.cc:451] 216 - Frame 6 acquired
I0608 10:40:16.981364 4196 window_capturer_dd.cc:438] 223 - Frame 7 start acquire
I0608 10:40:16.990864 4196 window_capturer_dd.cc:451] 233 - Frame 7 acquired
I0608 10:40:16.998364 4196 window_capturer_dd.cc:438] 240 - Frame 8 start acquire
I0608 10:40:17.007876 4196 window_capturer_dd.cc:451] 250 - Frame 8 acquired
I0608 10:40:17.015393 4196 window_capturer_dd.cc:438] 257 - Frame 9 start acquire
I0608 10:40:17.023905 4196 window_capturer_dd.cc:451] 266 - Frame 9 acquired
I0608 10:40:17.032411 4196 window_capturer_dd.cc:438] 274 - Frame 10 start acquire
I0608 10:40:17.039912 4196 window_capturer_dd.cc:451] 282 - Frame 10 acquired
I0608 10:40:17.048925 4196 window_capturer_dd.cc:438] 291 - Frame 11 start acquire
I0608 10:40:17.058428 4196 window_capturer_dd.cc:451] 300 - Frame 11 acquired
I0608 10:40:17.065943 4196 window_capturer_dd.cc:438] 308 - Frame 12 start acquire
I0608 10:40:17.096945 4196 window_capturer_dd.cc:451] 336 - Frame 12 acquired
I0608 10:40:17.098947 4196 window_capturer_dd.cc:464] 1 FRAMES MISSED on frame: 12
I0608 10:40:17.101444 4196 window_capturer_dd.cc:438] 343 - Frame 13 start acquire
I0608 10:40:17.128958 4196 window_capturer_dd.cc:451] 368 - Frame 13 acquired
I0608 10:40:17.130957 4196 window_capturer_dd.cc:464] 1 FRAMES MISSED on frame: 13
I0608 10:40:17.135459 4196 window_capturer_dd.cc:438] 377 - Frame 14 start acquire
I0608 10:40:17.160959 4196 window_capturer_dd.cc:451] 399 - Frame 14 acquired
I0608 10:40:17.162958 4196 window_capturer_dd.cc:464] 1 FRAMES MISSED on frame: 14
Frame 6-11 look good, the acquires are roughly 17ms apart. Frame 12 should be acquired at (300+17=317ms). Frame 12 starts waiting at 308, but doesn't get anything until 336ms. Windows didn't have anything for me until the frame after (300+17+17~=336ms). Okay sure maybe windows just missed a frame, but when I finally get it, I can check AccumulatedFrames and its value was 2 (meaning I missed a frame because I waited too long before calling AcquireNextFrame). In my understanding, it only makes sense for AccumulatedFrames to be larger than 1 if AcquireNextFrame returns immediately.
Furthermore, I can use PresentMon while my capture software is running. The logs show MsBetweenDisplayChange for every frame, which is fairly steady at 16.666ms (with a couple outliers, but much less than my capture software is seeing).
These people (1, 2) seem to have been able to get 60fps, so I'm wondering what I am doing incorrectly.
My code is based on this:
int main() {
int FPS = 60;
int video_length_sec = 5;
int total_frames = FPS * video_length_sec;
for (int i = 0; i < total_frames; i++) {
if(!CaptureSingleFrame()){
i--;
}
}
}
ComPtr<ID3D11Device> lDevice;
ComPtr<ID3D11DeviceContext> lImmediateContext;
ComPtr<IDXGIOutputDuplication> lDeskDupl;
ComPtr<ID3D11Texture2D> lAcquiredDesktopImage;
ComPtr<ID3D11Texture2D> lGDIImage;
ComPtr<ID3D11Texture2D> lDestImage;
DXGI_OUTPUT_DESC lOutputDesc;
DXGI_OUTDUPL_DESC lOutputDuplDesc;
D3D11_TEXTURE2D_DESC desc;
// Driver types supported
D3D_DRIVER_TYPE gDriverTypes[] = {
D3D_DRIVER_TYPE_HARDWARE
};
UINT gNumDriverTypes = ARRAYSIZE(gDriverTypes);
// Feature levels supported
D3D_FEATURE_LEVEL gFeatureLevels[] = {
D3D_FEATURE_LEVEL_11_0,
D3D_FEATURE_LEVEL_10_1,
D3D_FEATURE_LEVEL_10_0,
D3D_FEATURE_LEVEL_9_1
};
UINT gNumFeatureLevels = ARRAYSIZE(gFeatureLevels);
bool Init() {
int lresult(-1);
D3D_FEATURE_LEVEL lFeatureLevel;
HRESULT hr(E_FAIL);
// Create device
for (UINT DriverTypeIndex = 0; DriverTypeIndex < gNumDriverTypes; ++DriverTypeIndex)
{
hr = D3D11CreateDevice(
nullptr,
gDriverTypes[DriverTypeIndex],
nullptr,
0,
gFeatureLevels,
gNumFeatureLevels,
D3D11_SDK_VERSION,
&lDevice,
&lFeatureLevel,
&lImmediateContext);
if (SUCCEEDED(hr))
{
// Device creation success, no need to loop anymore
break;
}
lDevice.Reset();
lImmediateContext.Reset();
}
if (FAILED(hr))
return false;
if (lDevice == nullptr)
return false;
// Get DXGI device
ComPtr<IDXGIDevice> lDxgiDevice;
hr = lDevice.As(&lDxgiDevice);
if (FAILED(hr))
return false;
// Get DXGI adapter
ComPtr<IDXGIAdapter> lDxgiAdapter;
hr = lDxgiDevice->GetParent(
__uuidof(IDXGIAdapter), &lDxgiAdapter);
if (FAILED(hr))
return false;
lDxgiDevice.Reset();
UINT Output = 0;
// Get output
ComPtr<IDXGIOutput> lDxgiOutput;
hr = lDxgiAdapter->EnumOutputs(
Output,
&lDxgiOutput);
if (FAILED(hr))
return false;
lDxgiAdapter.Reset();
hr = lDxgiOutput->GetDesc(
&lOutputDesc);
if (FAILED(hr))
return false;
// QI for Output 1
ComPtr<IDXGIOutput1> lDxgiOutput1;
hr = lDxgiOutput.As(&lDxgiOutput1);
if (FAILED(hr))
return false;
lDxgiOutput.Reset();
// Create desktop duplication
hr = lDxgiOutput1->DuplicateOutput(
lDevice.Get(), //TODO what im i doing here
&lDeskDupl);
if (FAILED(hr))
return false;
lDxgiOutput1.Reset();
// Create GUI drawing texture
lDeskDupl->GetDesc(&lOutputDuplDesc);
desc.Width = lOutputDuplDesc.ModeDesc.Width;
desc.Height = lOutputDuplDesc.ModeDesc.Height;
desc.Format = lOutputDuplDesc.ModeDesc.Format;
desc.ArraySize = 1;
desc.BindFlags = D3D11_BIND_FLAG::D3D11_BIND_RENDER_TARGET;
desc.MiscFlags = D3D11_RESOURCE_MISC_GDI_COMPATIBLE;
desc.SampleDesc.Count = 1;
desc.SampleDesc.Quality = 0;
desc.MipLevels = 1;
desc.CPUAccessFlags = 0;
desc.Usage = D3D11_USAGE_DEFAULT;
hr = lDevice->CreateTexture2D(&desc, NULL, &lGDIImage);
if (FAILED(hr))
return false;
if (lGDIImage == nullptr)
return false;
// Create CPU access texture
desc.Width = lOutputDuplDesc.ModeDesc.Width;
desc.Height = lOutputDuplDesc.ModeDesc.Height;
desc.Format = lOutputDuplDesc.ModeDesc.Format;
std::cout << desc.Width << "x" << desc.Height << "\n\n\n";
desc.ArraySize = 1;
desc.BindFlags = 0;
desc.MiscFlags = 0;
desc.SampleDesc.Count = 1;
desc.SampleDesc.Quality = 0;
desc.MipLevels = 1;
desc.CPUAccessFlags = D3D11_CPU_ACCESS_READ | D3D11_CPU_ACCESS_WRITE;
desc.Usage = D3D11_USAGE_STAGING;
return true;
}
void WriteFrameToCaptureFile(ID3D11Texture2D* texture) {
D3D11_MAPPED_SUBRESOURCE* pRes = new D3D11_MAPPED_SUBRESOURCE;
UINT subresource = D3D11CalcSubresource(0, 0, 0);
lImmediateContext->Map(texture, subresource, D3D11_MAP_READ_WRITE, 0, pRes);
void* d = pRes->pData;
char* data = reinterpret_cast<char*>(d);
// writes data to file
WriteFrameToCaptureFile(data, 0);
}
bool CaptureSingleFrame()
{
HRESULT hr(E_FAIL);
ComPtr<IDXGIResource> lDesktopResource = nullptr;
DXGI_OUTDUPL_FRAME_INFO lFrameInfo;
ID3D11Texture2D* currTexture;
hr = lDeskDupl->AcquireNextFrame(
999,
&lFrameInfo,
&lDesktopResource);
if (FAILED(hr)) {
LOG(INFO) << "Failed to acquire new frame";
return false;
}
if (lFrameInfo.LastPresentTime.HighPart == 0) {
// not interested in just mouse updates, which can happen much faster than 60fps if you really shake the mouse
hr = lDeskDupl->ReleaseFrame();
return false;
}
int accum_frames = lFrameInfo.AccumulatedFrames;
if (accum_frames > 1 && current_frame != 1) {
// TOO MANY OF THESE is the problem
// especially after having to wait >17ms in AcquireNextFrame()
}
// QI for ID3D11Texture2D
hr = lDesktopResource.As(&lAcquiredDesktopImage);
// Copy image into a newly created CPU access texture
hr = lDevice->CreateTexture2D(&desc, NULL, &currTexture);
if (FAILED(hr))
return false;
if (currTexture == nullptr)
return false;
lImmediateContext->CopyResource(currTexture, lAcquiredDesktopImage.Get());
writer_thread->Schedule(
FROM_HERE, [this, currTexture]() {
WriteFrameToCaptureFile(currTexture);
});
pending_write_counts_++;
hr = lDeskDupl->ReleaseFrame();
return true;
}
**EDIT - According to my measurements, you must call AcquireNextFrame() before the frame will actually appear by about ~10ms, or windows will fail to acquire it and get you the next one. Every time my recording program takes more than 7 ms to wrap around (after acquiring frame i until calling AcquireNextFrame() on i+1), frame i+1 is missed.
***EDIT - Heres a screenshot of GPU View showing what I'm talking about. The first 6 frames process in no time, then the 7th frame takes 119ms. The long rectangle beside "capture_to_argb.exe" corresponds to me being stuck inside AcquireNextFrame(). If you look up to the hardware queue, you can see it cleanly rendering at 60fps, even while I'm stuck in AcquireNextFrame(). At least this is my interpretation (I have no idea what I'm doing).
"Current Display Mode: 3840 x 2160 (32 bit) (60hz)" refers to display refresh rate, that is how many frames can be passed to display per second. However the rate at which new frames are rendered is typically much lower. You can inspect this rate using PresentMon or similar utilities. When I don't move the mouse it reports me something like this:
As you can see when nothing happens Windows presents new frame only twice per second or even slower. However this is typically really good for video encoding because even if you are recording video at 60 fps and AcquireNextFrame reports that no new frame is available then it means that current frame is exactly the same as previous.
Doing a blocking wait before next call of AcquireNextFrame you are missing the actual frames. Desktop Duplication API logic suggests that you attempt to acquire next frame immediately if you expect a decent frame rate. Your sleeping call effectively relinquishes the available remainder of execution timeout without hard promise that you get a new slice in scheduled interval of time.
You have to poll at maximal frame rate. Do not sleep (even with zero sleep time) and request next frame immediately. You will have the option to drop the frames that come too early. Desktop Duplication API is designed in a way that getting extra frames might be not too expensive of you identify them early and stop their processing.
If you still prefer to sleep between the frames, you might want to read the accuracy remark:
To increase the accuracy of the sleep interval, call the timeGetDevCaps function to determine the supported minimum timer resolution and the timeBeginPeriod function to set the timer resolution to its minimum. Use caution when calling timeBeginPeriod, as frequent calls can significantly affect the system clock, system power usage, and the scheduler. If you call timeBeginPeriod, call it one time early in the application and be sure to call the timeEndPeriod function at the very end of the application.
As others have mentioned, the 60Hz refresh rate only indicates the frequency with which the display may change. It doesn't actually mean that it will change that frequently. AcquireNextFrame will only return a frame when what is being displayed on the duplicated output has changed.
My recommendation is to ...
Create a Timer Queue timer with the desired video frame interval
Create a compatible resource in which to buffer the desktop bitmap
When the timer goes off, call AcquireNextFrame with a zero timeout
If there has been a change, copy the returned resource to your buffer and release it
Send the buffered frame to the encoder or whatever further processing
This will yield a sequence of frames at the desired rate. If the display hasn't changed, you'll have a copy of the previous frame to use to maintain your frame rate.
Related
I am using ffmpeg to record video input from GDI (windows screen recorder) to view it later using VLC (via ActiveX plugin) + ffmpeg to decode it.
Right now seeking in video is not working in VLC via plugin (which is critical). VLC player itself provide seeking, but it is more like byte position seeking (on I- frames which are larger than other frames it makes larger steps on horizontal scroll and also there are no timestamps).
Encoder is opened with next defaults:
avformat_alloc_output_context2(&outputContext, NULL, "mpegts", "test.mpg");
outputFormat = outputContext->oformat;
encoder = avcodec_find_encoder(AV_CODEC_ID_H264);
outputStream = avformat_new_stream(outputContext, encoder);
outputStream->id = outputContext->nb_streams - 1;
encoderContext = outputStream->codec;
encoderContext->bit_rate = bitrate; // 800000 by default
encoderContext->rc_max_rate = bitrate;
encoderContext->width = imageWidth; // 1920
encoderContext->height = imageHeight; // 1080
encoderContext->time_base.num = 1;
encoderContext->time_base.den = fps; // 25 by default
encoderContext->gop_size = fps;
encoderContext->keyint_min = fps;
encoderContext->max_b_frames = 0;
encoderContext->pix_fmt = AV_PIX_FMT_YUV420P;
outputStream->time_base = encoderContext->time_base;
avcodec_open2(encoderContext, encoder, NULL);
Recording is done this way:
// my impl of GDI recorder, returning AVFrame with only data and linesize filled.
AVFrame* tmp_frame = impl_->recorder->acquireFrame();
// converting RGB -> YUV420
sws_scale(impl_->scaleContext, tmp_frame->data, tmp_frame->linesize, 0, impl_->frame->height, impl_->frame->data, impl_->frame->linesize);
// pts variable is calculated by using QueryPerformanceCounter form WinAPI. It is strictly increasing
impl_->frame->pts = pts;
avcodec_encode_video2(impl_->encoderContext, impl_->packet, impl_->frame, &out_size);
if (out_size) {
impl_->packet->pts = pts;
impl_->packet->dts = pts;
impl_->packet->duration = 1; // here it is! It is set but has no effect
av_packet_rescale_ts(impl_->packet, impl_->encoderContext->time_base, impl_->outputStream->time_base);
// here pts = 3600*pts, dts = 3600*pts, duration = 3600 what I consider to be legit in terms of milliseconds
impl_->packet->stream_index = impl_->outputStream->index;
av_interleaved_write_frame(impl_->outputContext, impl_->packet);
av_packet_unref(impl_->packet);
out_size = 0;
}
ffprobe is providing next info on frames:
[FRAME]
media_type=video
stream_index=0
key_frame=1
pkt_pts=3600
pkt_pts_time=0:00:00.040000
pkt_dts=3600
pkt_dts_time=0:00:00.040000
best_effort_timestamp=3600
best_effort_timestamp_time=0:00:00.040000
pkt_duration=N/A
pkt_duration_time=N/A
pkt_pos=564
pkt_size=97.018555 Kibyte
width=1920
height=1080
pix_fmt=yuv420p
sample_aspect_ratio=N/A
pict_type=I
coded_picture_number=0
display_picture_number=0
interlaced_frame=0
top_field_first=0
repeat_pict=0
[/FRAME]
I believe that problem is in pkt_duration variable, though it was set.
What I am doing wrong in recording so I can't seek in video?
P.S. on other videos (also h264) seeking is working in ActiveX VLC plugin.
What is definitely wrong, is:
impl_->packet->pts = pts;
impl_->packet->dts = pts;
PTS and DTS are not equal! They could be if you would have only I-frames, which is not the case here. Also, your comment says: pts variable is calculated by using QueryPerformanceCounter form WinAPI. If your frame rate is constant, and I believe it is, then you don't need QueryPerformanceCounter API. PTS is usually in 90kHz units. The duration of 1 frame expressed in 90kHz is calculated like this:
90000 x denominator / numerator
If fps is 25 then numerator is 25 and denominator is 1. For 29.97 fps the numerator is 30000 and denominator is 1001. Each new frame's PTS should be increased for that amount (unless you have dropped frames). Regarding the DTS, the encoder should provide that value.
Context:
I'm developing a native C++ Unity 5 plugin that reads in DXT compressed texture data and uploads it to the GPU for further use in Unity. The aim is to create an fast image-sequence player, updating image data on-the-fly. The textures are compressed with an offline console application.
Unity can work with different graphics engines, I'm aiming towards DirectX11 and OpenGL 3.3+.
Problem:
The DirectX runtime texture update code, through a mapped subresource, gives different outputs on different graphics drivers. Updating a texture through such a mapped resource means mapping a pointer to the texture data and memcpy'ing the data from the RAM buffer to the mapped GPU buffer. Doing so, different drivers seem to expect different parameters for the row pitch value when copying bytes. I never had problems on the several Nvidia GPU's I tested on, but AMD and Intel GPU seems to act differently and I get distorted output as shown underneath. Furthermore, I'm working with DXT1 pixel data (0.5bpp) and DXT5 data (1bpp). I can't seem to get the correct pitch parameter for these DXT textures.
Code:
The following initialisation code for generating the d3d11 texture and filling it with initial texture data - e.g. the first frame of an image sequence - works perfect on all drivers. The player pointer points to a custom class that handles all file reads and contains getters for the current loaded DXT compressed frame, it's dimensions, etc...
if (s_DeviceType == kUnityGfxRendererD3D11)
{
HRESULT hr;
DXGI_FORMAT format = (compression_type == DxtCompressionType::DXT_TYPE_DXT1_NO_ALPHA) ? DXGI_FORMAT_BC1_UNORM : DXGI_FORMAT_BC3_UNORM;
// Create texture
D3D11_TEXTURE2D_DESC desc;
desc.Width = w;
desc.Height = h;
desc.MipLevels = 1;
desc.ArraySize = 1;
desc.Format = format;
// no anti-aliasing
desc.SampleDesc.Count = 1;
desc.SampleDesc.Quality = 0;
desc.Usage = D3D11_USAGE_DYNAMIC;
desc.BindFlags = D3D11_BIND_SHADER_RESOURCE;
desc.CPUAccessFlags = D3D11_CPU_ACCESS_WRITE;
desc.MiscFlags = 0;
// Initial data: first frame
D3D11_SUBRESOURCE_DATA data;
data.pSysMem = player->getBufferPtr();
data.SysMemPitch = 16 * (player->getWidth() / 4);
data.SysMemSlicePitch = 0; // just a 2d texture, no depth
// Init with initial data
hr = g_D3D11Device->CreateTexture2D(&desc, &data, &dxt_d3d_tex);
if (SUCCEEDED(hr) && dxt_d3d_tex != 0)
{
DXT_VERBOSE("Succesfully created D3D Texture.");
DXT_VERBOSE("Creating D3D SRV.");
D3D11_SHADER_RESOURCE_VIEW_DESC SRVDesc;
memset(&SRVDesc, 0, sizeof(SRVDesc));
SRVDesc.Format = format;
SRVDesc.ViewDimension = D3D11_SRV_DIMENSION_TEXTURE2D;
SRVDesc.Texture2D.MipLevels = 1;
hr = g_D3D11Device->CreateShaderResourceView(dxt_d3d_tex, &SRVDesc, &textureView);
if (FAILED(hr))
{
dxt_d3d_tex->Release();
return hr;
}
DXT_VERBOSE("Succesfully created D3D SRV.");
}
else
{
DXT_ERROR("Error creating D3D texture.")
}
}
The following update code that runs for each new frame has the error somewhere. Please note the commented line containing method 1 using a simple memcpy without any rowpitch specified which works well on NVIDIA drivers.
You can see further in method 2 that I log the different row pitch values. For instace for a 1920x960 frame I get 1920 for the buffer stride, and 2048 for the runtime stride. This 128 pixels difference probably have to be padded (as can be seen in the example pic below) but I can't figure out how. When I just use the mappedResource.RowPitch without dividing it by 4 (done by the bitshift), Unity crashes.
ID3D11DeviceContext* ctx = NULL;
g_D3D11Device->GetImmediateContext(&ctx);
if (dxt_d3d_tex && bShouldUpload)
{
if (player->gather_stats) before_upload = ns();
D3D11_MAPPED_SUBRESOURCE mappedResource;
ctx->Map(dxt_d3d_tex, 0, D3D11_MAP_WRITE_DISCARD, 0, &mappedResource);
/* 1: THIS CODE WORKS ON ALL NVIDIA DRIVERS BUT GENERATES DISTORTED OR NO OUTPUT ON AMD/INTEL: */
//memcpy(mappedResource.pData, player->getBufferPtr(), player->getBytesPerFrame());
/* 2: THIS CODE GENERATES OUTPUT BUT SEEMS TO NEED PADDING? */
BYTE* mappedData = reinterpret_cast<BYTE*>(mappedResource.pData);
BYTE* buffer = player->getBufferPtr();
UINT height = player->getHeight();
UINT buffer_stride = player->getBytesPerFrame() / player->getHeight();
UINT runtime_stride = mappedResource.RowPitch >> 2;
DXT_VERBOSE("Buffer stride: %d", buffer_stride);
DXT_VERBOSE("Runtime stride: %d", runtime_stride);
for (UINT i = 0; i < height; ++i)
{
memcpy(mappedData, buffer, buffer_stride);
mappedData += runtime_stride;
buffer += buffer_stride;
}
ctx->Unmap(dxt_d3d_tex, 0);
}
Example pic 1 - distorted ouput when using memcpy to copy whole buffer without using separate row pitch on AMD/INTEL (method 1)
Example pic 2 - better but still erroneous output when using above code with mappedResource.RowPitch on AMD/INTEL (method 2). The blue bars indicate zone of error, and need to disappear so all pixels align well and form one image.
Thanks for any pointers!
Best,
Vincent
The mapped data row pitch is in byte, when you divide by four, it is definitely an issue.
UINT runtime_stride = mappedResource.RowPitch >> 2;
...
mappedData += runtime_stride; // here you are only jumping one quarter of a row
It is the height count with a BC format that is divide by 4.
Also a BC1 format is 8 bytes per 4x4 block, so the line below should by 8 * and not 16 *, but as long as you handle row stride properly on your side, d3d will understand, you just waste half the memory here.
data.SysMemPitch = 16 * (player->getWidth() / 4);
I am currently developing a little screenshot application which records both of my screen's desktop in a file.
I am using the GetFrontBufferData() function and it is working great.
Unfortunately when changing the screen color depth from 32 to 16 bits (to perform some tests) I have a bad image (purple image with changed resolution) and the recorded screenshot has a very poor quality:
Does someone know if there is a way to use GetFrontBufferData() with a 16 bits screen ?
edit:
My init direct3D:
ZeroMemory(&d3dPresentationParameters,sizeof(D3DPRESENT_PARAMETERS));//Fills a block of memory with zeros.
d3dPresentationParameters.Windowed = TRUE;
d3dPresentationParameters.Flags = D3DPRESENTFLAG_LOCKABLE_BACKBUFFER;
d3dPresentationParameters.BackBufferFormat = d3dFormat;//d3dDisplayMode.Format;//D3DFMT_A8R8G8B8;
d3dPresentationParameters.BackBufferCount = 1;
d3dPresentationParameters.BackBufferHeight = gScreenRect.bottom = uiHeight;
d3dPresentationParameters.BackBufferWidth = gScreenRect.right = uiWidth;
d3dPresentationParameters.MultiSampleType = D3DMULTISAMPLE_NONE;
d3dPresentationParameters.MultiSampleQuality = 0;
d3dPresentationParameters.SwapEffect = D3DSWAPEFFECT_DISCARD;
d3dPresentationParameters.hDeviceWindow = hWnd;
d3dPresentationParameters.PresentationInterval = D3DPRESENT_INTERVAL_DEFAULT;
d3dPresentationParameters.FullScreen_RefreshRateInHz = D3DPRESENT_RATE_DEFAULT;
The thread I use to capture screenshots:
CreateOffscreenPlainSurface(uiWidth, uiHeight, D3DFMT_A8R8G8B8, D3DPOOL_SYSTEMMEM, pBackBuffer, NULL)) != D3D_OK )
{
DBG("Error: CreateOffscreenPlainSurface failed = 0x%x", iRes);
break;
}
GetFrontBufferData(0, pCaptureSurface)) != D3D_OK)
{
DBG("Error: GetFrontBufferData failed = 0x%x", iRes);
break;
}
//D3DXSaveSurfaceToFile("Desktop.bmp", D3DXIFF_BMP, pBackBuffer,NULL, NULL); //Test purposes
ZeroMemory(lockedRect, sizeof(D3DLOCKED_RECT));
LockRect(lockedRect, NULL, D3DLOCK_READONLY)) != D3D_OK )
{
DBG("Error: LockRect failed = 0x%x", iRes);
break;
}
if( (iRes = UnlockRect()) != D3D_OK )
{
DBG("Error: UnlockRect failed = 0x%x", iRes);
break;
}
/**/
This code is perfectly working with 32 bits color depth but not with 16bits.
When creating the device I create 2 devices for both screens (iScreenNber). This is also working in 32bits (not in 16).
When saving the captured screenshot into 2 bmp files for testing (in 16 bits), I have one screen which represents the main display perfectly and the other screen is black.
When using memcpy to use pData, I have the above screenshot with purple color and bad resolution
edit2:
I noticed the following:
When saving Offscreen surface to a BMP file, I get the main display (on 1.bmp) which is refreshed each frame (so it is working just fine). For the second display, I just get the first frame then nothing more.
Quoting MSDN for GetFrontBufferData "The buffer pointed to by pDestSurface will be filled with a representation of the front buffer, converted to the standard 32 bits per pixel format D3DFMT_A8R8G8B8." I guess this is a problem for 16 bits color depth.
The first problem comes from the memcpy which does not handle properly the 16 bits color depth and I still don't know why ----> Help needed for this !!
Second problem is the second display which is not working and I don't why either
What am I doing wrong here ? I just get a black image on my Desktop N°xx.bmp file
Thank you very much for your help.
This is how I create a surface to capture screenshots:
IDirect3DSurface9* pCaptureSurface = NULL;
HRESULT hr = pD3DDevice->CreateOffscreenPlainSurface(
D3DPresentParams.BackBufferWidth,
D3DPresentParams.BackBufferHeight,
D3DPresentParams.BackBufferFormat,
D3DPOOL_SYSTEMMEM,
&pCaptureSurface,
NULL);
pD3DDevice->GetFrontBufferData(0, pCaptureSurface);
If you didn't store D3DPresentParams anywhere, you can use IDirect3DDevice9::GetDisplayMode to obtain width, height and format of your swap chain. All operations of resizing and format conversion you can perform after capturing a front buffer. Also, as I know, display format doesn't support alpha channel, so it typically is D3DFMT_X8R8G8B8, not D3DFMT_A8R8G8B8.
Update:
Actually, you try to capture a whole screen by using d3d device, without rendering anything. A purpose of d3d/opengl is to create or process images and do it GPU-accelerated. Taking a screenshot is just copying some video memory, it doesn't use all GPU power. So, using any GPU API brings no significant gain. Moreover, when you capture front buffer rendered not by yourself, strange things occur, you see. To extend your app you may capture image by GDI and then load it into texture and do any GPU postprocessing.
So i found some answers to my problem.
1) Second monitor wasn't working and I was unable to capture screenshot from it in 16 bits
This comes from the memcpy(..) line in the code. Because I am working with a 16 bits monitor, when executing the memcpy, the surface memory is corrupt and this leads to a black screen.
I still didn't find the solution for this but I'm working on.
2) The colors of the screenshot are wrong
This is, without any surprise, due to the 16 bits color depth. Because I am using GetFrontBufferData, and I am quoting Microsoft: The buffer pointed to by pDestSurface will be filled with a representation of the front buffer, converted to the standard 32 bits per pixel format D3DFMT_A8R8G8B8. This means, if I want to use the pixel data from LockRect(...), I have to "re-convert" my data into 16 bits mode. Therefore, I need to convert my pData data from D3DFMT_A8R8G8B8 to D3DFMT_R5G6B5 which is pretty simple.
3) How to debug the application ?
Thanks to your comments, I've been told that I should analyze pScreeInfo->pData content when I was in 16bits (thanks to Niello). Therefore, I've created a simple method using raw data from pScreeInfo->pData and copying in a .bmp:
HRESULT hr;
DWORD dwBytesRead;
UINT uiSize = 1920 * 1080 * 4;
HANDLE hFile;
hFile = CreateFile(TEXT("data.raw"), GENERIC_READ, 0, NULL, OPEN_EXISTING, FILE_ATTRIBUTE_NORMAL, NULL);
BOOL bOk = ReadFile(hFile, pData, uiSize, &dwBytesRead, NULL);
if(!bOk)
exit(0);
pTexture = NULL;
hr = pScreenInfo->g_pD3DDevice->CreateTexture(width, height, 1, 0, D3DFMT_A8R8G8B8, D3DPOOL_MANAGED, &pTexture, NULL);
D3DLOCKED_RECT lockedRect;
hr = pTexture->LockRect(0, &lockedRect, NULL, D3DLOCK_READONLY);
memcpy(lockedRect.pBits, pData, lockedRect.Pitch * height);
hr = pTexture->UnlockRect(0);
hr = D3DXSaveTextureToFile(test, D3DXIFF_BMP, pTexture,NULL);
bOk = CloseHandle(hFile);
SAFE_RELEASE(pTexture);
This piece of code allowed me to notice that pData data was correct and I could get a good .bmp file at the end which means that GetFrontBufferData(...) was correctly working and the problem comes from the memcpy(...)
4) Remaining problems
I am still trying to know how I can solve the memcpy issue to see where the problem comes from. This is the last problem since the colors are good now (thanks to the 32bits to 16 bits conversion)
Thank everybody for your helpful comments !
I'm implementing deferred shading in a directx 9 application. My method of deferred shading requires 3 render targets( color, position, and normal ). It is necessary to:
set the render targets in the device at the beginning of the 'render' function
draw the data to them in the 'rt pass'
remove the render targets from the device( so as not to draw over them during subsequent passes)
set the render targets as textures for subsequent passes so that the effect can recall data 'drawn' to the rt's in the 'rt pass'...
This method works fine, however, I am experiencing performance issues. I've narrowed them down to two function calls:
IDirect3DTexture9::GetSurfaceLevel()
IDirect3DDevice9::SetRenderTarget()
Here is code to set render target:
IDirect3DDevice9 *pd3dDevice = CEffectManager::GetDevice();
IDirect3DTexture9 *pRT = CEffectManager::GetColorRT();
IDirect3DSurface9 *pSrf = NULL;
pRT->GetSurfaceLevel( 0, &pSrf );
pd3dDevice->SetRenderTarget( 0, pSrf );
PIX indicates that the duration( cycles ) of the call to GetSurfaceLevel() is very high ~1/2 ms per call( Duration / Total Duration * 1 / FrameRate ). Because it is necessary to get 3 surfaces, combined, the duration is too high! Its more than 4 times greater than the combined draw calls...
I tried to eliminate the call to GetSurfaceLevel() by storing a pointer to the surface during render target creation...oddly enough, the SetRenderTarget() function assumed the same duration( when before its duration was negligible ). Here is altered code:
IDirect3DDevice9 *pd3dDevice = CEffectManager::GetDevice();
IDirect3DSurface9 *pSrf = CEffectManager::GetColorSurface();
pd3dDevice->SetRenderTarget( 0, pSrf );
Is there a way around this performance issue? Why does the second method take as long as the first? It seems as though the process within IDirect3DDevice9::SetRenderTarget() simply takes time...is there a device state that I can set to help performance?
Update:
I've implemented the following code in order to better test performance:
IDirect3DDevice9 *pd3dDevice = CEffectManager::GetDevice();
IDirect3DTexture9 *pRT = CEffectManager::GetColorRT();
IDirect3DSurface9 *pSRF = NULL;
IDirect3DQuery9 *pEvent = NULL;
LARG_INTEGER lnStart, lnStop, lnFrequency;
// create query
pd3dDevice->CreateQuery( D3DQUERYTYPE_EVENT, &pEvent );
// insert 'end' marker
pEvent->Issue( D3DISSUE_END );
// flush command buffer
while( S_FALSE == pEvent->GetData( NULL, 0, D3DGETDATA_FLUSH ) );
// get start time
QueryPerformanceCounter( &lnStart );
// api call
pRT->GetSurfaceLevel();
// insert 'end' marker
pEvent->Issue( D3DISSUE_END )
// flush the command buffer
while( S_FALSE == pEvent->GetData( NULL, 0, D3DGETDATA_FLUSH ) );
QueryPerformanceCounter( &lnStop );
QueryPerformanceFrequency( &lnFreq );
lnStop.QuadPart -= lnStart.QuadPart;
float fElapsedTime = ( float )lnStop.QuadPart / ( float )lnFreq.QuadPart;
fElapsedTime on average measured 10 - 50 microseconds
I performed the same test on IDirect3DDevice9::SetRenderTarget() and the results on average measured 5 - 30 microseconds...
This data is much better than what I got from PIX...It suggests that there is not as much of a delay as I thought, however, the framerate is drastically reduced using deferred shading...this seems to be the most likely source for the loss of performance...did I effectively query the device?
I am trying to extract images out of a mp4 video stream. After looking stuff up, it seems like the proper way of doing that is using Media Foundations in C++ and open the frame/read stuff out of it.
There's very little by way of documentation and samples, but after some digging, it seems like some people have had success in doing this by reading frames into a texture and copying the content of that texture to a memory-readable texture (I am not even sure if I am using the correct terms here). Trying what I found though gives me errors and I am probably doing a bunch of stuff wrong.
Here's a short piece of code from where I try to do that (project itself attached at the bottom).
ComPtr<ID3D11Texture2D> spTextureDst;
MEDIA::ThrowIfFailed(
m_spDX11SwapChain->GetBuffer(0, IID_PPV_ARGS(&spTextureDst))
);
auto rcNormalized = MFVideoNormalizedRect();
rcNormalized.left = 0;
rcNormalized.right = 1;
rcNormalized.top = 0;
rcNormalized.bottom = 1;
MEDIA::ThrowIfFailed(
m_spMediaEngine->TransferVideoFrame(m_spRenderTexture.Get(), &rcNormalized, &m_rcTarget, &m_bkgColor)
);
//copy the render target texture to the readable texture.
m_spDX11DeviceContext->CopySubresourceRegion(m_spCopyTexture.Get(),0,0,0,0,m_spRenderTexture.Get(),0,NULL);
m_spDX11DeviceContext->Flush();
//Map the readable texture;
D3D11_MAPPED_SUBRESOURCE mapped = {0};
m_spDX11DeviceContext->Map(m_spCopyTexture.Get(),0,D3D11_MAP_READ,0,&mapped);
void* buffer = ::CoTaskMemAlloc(600 * 400 * 3);
memcpy(buffer, mapped.pData,600 * 400 * 3);
//unmap so we can copy during next update.
m_spDX11DeviceContext->Unmap(m_spCopyTexture.Get(),0);
// and the present it to the screen
MEDIA::ThrowIfFailed(
m_spDX11SwapChain->Present(1, 0)
);
}
The error I get is:
First-chance exception at 0x76814B32 in App1.exe: Microsoft C++ exception: Platform::InvalidArgumentException ^ at memory location 0x07AFF60C. HRESULT:0x80070057
I am not really sure how to pursue it further it since, like I said, there's very little docs about it.
Here's the modified sample I am working off of. This question is specific for WinRT (Windows 8 apps).
UPDATE success!! see edit at bottom
Some partial success, but maybe enough to answer your question. Please read on.
On my system, debugging the exception showed that the OnTimer() function failed when attempting to call TransferVideoFrame(). The error it gave was InvalidArgumentException.
So, a bit of Googling led to my first discovery - there is apparently a bug in NVIDIA drivers - which means the video playback seems to fail with 11 and 10 feature levels.
So my first change was in function CreateDX11Device() as follows:
static const D3D_FEATURE_LEVEL levels[] = {
/*
D3D_FEATURE_LEVEL_11_1,
D3D_FEATURE_LEVEL_11_0,
D3D_FEATURE_LEVEL_10_1,
D3D_FEATURE_LEVEL_10_0,
*/
D3D_FEATURE_LEVEL_9_3,
D3D_FEATURE_LEVEL_9_2,
D3D_FEATURE_LEVEL_9_1
};
Now TransferVideoFrame() still fails, but gives E_FAIL (as an HRESULT) instead of an invalid argument.
More Googling led to my second discovery -
Which was an example showing use of TransferVideoFrame() without using CreateTexture2D() to pre-create the texture. I see you already had some code in OnTimer() similar to this but which was not used, so I guess you'd found the same link.
Anyway, I now used this code to get the video frame:
ComPtr <ID3D11Texture2D> spTextureDst;
m_spDX11SwapChain->GetBuffer (0, IID_PPV_ARGS (&spTextureDst));
m_spMediaEngine->TransferVideoFrame (spTextureDst.Get (), nullptr, &m_rcTarget, &m_bkgColor);
After doing this, I see that TransferVideoFrame() succeeds (good!) but calling Map() on your copied texture - m_spCopyTexture - fails because that texture wasn't created with CPU read access.
So, I just used your read/write m_spRenderTexture as the target of the copy instead because that has the correct flags and, due to the previous change, I was no longer using it.
//copy the render target texture to the readable texture.
m_spDX11DeviceContext->CopySubresourceRegion(m_spRenderTexture.Get(),0,0,0,0,spTextureDst.Get(),0,NULL);
m_spDX11DeviceContext->Flush();
//Map the readable texture;
D3D11_MAPPED_SUBRESOURCE mapped = {0};
HRESULT hr = m_spDX11DeviceContext->Map(m_spRenderTexture.Get(),0,D3D11_MAP_READ,0,&mapped);
void* buffer = ::CoTaskMemAlloc(176 * 144 * 3);
memcpy(buffer, mapped.pData,176 * 144 * 3);
//unmap so we can copy during next update.
m_spDX11DeviceContext->Unmap(m_spRenderTexture.Get(),0);
Now, on my system, the OnTimer() function does not fail. Video frames are rendered to the texture and the pixel data is copied out successfully to the memory buffer.
Before looking to see if there are further problems, maybe this is a good time to see if you can make the same progress as I have so far. If you comment on this answer with more info, I will edit the answer to add any more help if possible.
EDIT
Changes made to texture description in FramePlayer::CreateBackBuffers()
//make first texture cpu readable
D3D11_TEXTURE2D_DESC texDesc = {0};
texDesc.Width = 176;
texDesc.Height = 144;
texDesc.MipLevels = 1;
texDesc.ArraySize = 1;
texDesc.Format = DXGI_FORMAT_B8G8R8A8_UNORM;
texDesc.SampleDesc.Count = 1;
texDesc.SampleDesc.Quality = 0;
texDesc.Usage = D3D11_USAGE_STAGING;
texDesc.BindFlags = 0;
texDesc.CPUAccessFlags = D3D11_CPU_ACCESS_READ | D3D11_CPU_ACCESS_WRITE;
texDesc.MiscFlags = 0;
MEDIA::ThrowIfFailed(m_spDX11Device->CreateTexture2D(&texDesc,NULL,&m_spRenderTexture));
Note also that there's a memory leak that needs to be cleared up sometime (I'm sure you're aware) - the memory allocated in the following line is never freed:
void* buffer = ::CoTaskMemAlloc(176 * 144 * 3); // sizes changed for my test
SUCCESS
I have now succeeded in saving an individual frame, but now without the use of the copy texture.
First, I downloaded the latest version of the DirectXTex Library, which provides DX11 texture helper functions, for example to extract an image from a texture and to save to file. The instructions for adding the DirectXTex library to your solution as an existing project need to be followed carefully, taking note of the changes needed for Windows 8 Store Apps.
Once, the above library is included, referenced and built, add the following #include's to FramePlayer.cpp
#include "..\DirectXTex\DirectXTex.h" // nb - use the relative path you copied to
#include <wincodec.h>
Finally, the central section of code in FramePlayer::OnTimer() needs to be similar to the following. You will see I just save to the same filename each time so this will need amending to add e.g. a frame number to the name
// new frame available at the media engine so get it
ComPtr<ID3D11Texture2D> spTextureDst;
MEDIA::ThrowIfFailed(m_spDX11SwapChain->GetBuffer(0, IID_PPV_ARGS(&spTextureDst)));
auto rcNormalized = MFVideoNormalizedRect();
rcNormalized.left = 0;
rcNormalized.right = 1;
rcNormalized.top = 0;
rcNormalized.bottom = 1;
MEDIA::ThrowIfFailed(m_spMediaEngine->TransferVideoFrame(spTextureDst.Get(), &rcNormalized, &m_rcTarget, &m_bkgColor));
// capture an image from the DX11 texture
DirectX::ScratchImage pImage;
HRESULT hr = DirectX::CaptureTexture(m_spDX11Device.Get(), m_spDX11DeviceContext.Get(), spTextureDst.Get(), pImage);
if (SUCCEEDED(hr))
{
// get the image object from the wrapper
const DirectX::Image *pRealImage = pImage.GetImage(0, 0, 0);
// set some place to save the image frame
StorageFolder ^dataFolder = ApplicationData::Current->LocalFolder;
Platform::String ^szPath = dataFolder->Path + "\\frame.png";
// save the image to file
hr = DirectX::SaveToWICFile(*pRealImage, DirectX::WIC_FLAGS_NONE, GUID_ContainerFormatPng, szPath->Data());
}
// and the present it to the screen
MEDIA::ThrowIfFailed(m_spDX11SwapChain->Present(1, 0));
I don't have time right now to take this any further but I'm very pleased with what I have achieved so far :-))
Can you take a fresh look and update your results in comments?
Look at the Video Thumbnail Sample and the Source Reader documentation.
You can find sample code under SDK Root\Samples\multimedia\mediafoundation\VideoThumbnail
I think OpenCV may help you.
OpenCV offers api to capture frames from camera or video files.
You can download it here http://opencv.org/downloads.html.
The following is a demo I writed with "OpenCV 2.3.1".
#include "opencv.hpp"
using namespace cv;
int main()
{
VideoCapture cap("demo.avi"); // open a video to capture
if (!cap.isOpened()) // check if succeeded
return -1;
Mat frame;
namedWindow("Demo", CV_WINDOW_NORMAL);
// Loop to capture frame and show on the window
while (1) {
cap >> frame;
if (frame.empty())
break;
imshow("Demo", frame);
if (waitKey(33) >= 0) // pause 33ms every frame
break;
}
return 0;
}