How to convert screenshot to uint8_t array - c++

I'm trying to somehow get screenshot from uint8 array of RGB, but for example this code doesn't work:
#include <torch/torch.h>
#include <iostream>
#include <Windows.h>
#include <gdiplus.h>
#include <gdipluspixelformats.h> // PixelFormat24bppRGB
#include <vector>
#include <cstdlib>
#pragma comment(lib, "gdiplus.lib")
int main()
{
Gdiplus::GdiplusStartupInput input;
ULONG_PTR token;
Gdiplus::GdiplusStartup(&token, &input, NULL);
const int mWidth = 1920;
const int mHeight = 1080;
std::vector<uint8_t> pixels;
Gdiplus::BitmapData bmpData;
DWORD start = GetTickCount64();
HDC hdcScreen = GetDC(NULL);
BITMAPINFO bmpInfo;
bmpInfo.bmiHeader.biSize = sizeof(BITMAPINFOHEADER);
bmpInfo.bmiHeader.biWidth = mWidth;
bmpInfo.bmiHeader.biHeight = mHeight;
bmpInfo.bmiHeader.biPlanes = 1;
bmpInfo.bmiHeader.biBitCount = 24;
bmpInfo.bmiHeader.biCompression = BI_RGB;
HDC scComDC = CreateCompatibleDC(hdcScreen);
BitBlt(
scComDC,
0,0,
mWidth,
mHeight,
hdcScreen,
0,0,
SRCCOPY
);
LPDWORD lpPixel;
HBITMAP display = CreateDIBSection(scComDC, &bmpInfo, DIB_RGB_COLORS, (void**)&lpPixel, NULL, 0);
ReleaseDC(NULL,hdcScreen);
DeleteDC(scComDC);
LOGPALETTE lpPalette;
lpPalette.palVersion = 0x0300;
lpPalette.palNumEntries = 1;
lpPalette.palPalEntry[0].peRed =
lpPalette.palPalEntry[0].peGreen =
lpPalette.palPalEntry[0].peBlue =
lpPalette.palPalEntry[0].peFlags = NULL;
HPALETTE hPalette = CreatePalette(&lpPalette);
auto image = Gdiplus::Bitmap::FromHBITMAP(display, hPalette);
int bWidth = image->GetWidth();
int bHeight = image->GetHeight();
std::cout << image << std::endl;
std::cout << bWidth << std::endl;
std::cout << bHeight << std::endl;
auto stride = 3 * bWidth;
pixels.resize(stride * bHeight);
Gdiplus::Rect rect(0, 0, bWidth, bHeight);
image->LockBits(&rect, Gdiplus::ImageLockModeRead, PixelFormat24bppRGB, &bmpData);
for (int y = 0; y < bHeight; ++y) {
memcpy(pixels.data() + y * stride, (byte*)bmpData.Scan0 + y * bmpData.Stride, stride);
}
image->UnlockBits(&bmpData);
DWORD end = GetTickCount64();
std::cout << end - start << "ms\n";
while (true){
int inint;
std::cin >> inint;
if (inint == -1) {
break;
}
std::cout << ">> " << +pixels[inint*3]
<< "," << +pixels[inint * 3 + 1]
<< "," << +pixels[inint * 3 + 2]
<< "\n";
}
Gdiplus::GdiplusShutdown(token);
//system("PAUSE");
return 1;
}
I cannot successfully convert pixels to vector by running this program. It returns 0,0,0 indicating that all pixels are black.
I think I'm right around memcpy, since I was able to array successfully with
auto image = Gdiplus::Bitmap::FromFile(L"C:\sample.jpg");
but when I use Bitmap::FromHBITMAP(display, hPalette);, it doesn't work at all.
get screenshot from uint8 array of RGB

Related

How to combine OpenCV + Tesseract?

I am trying to create a bot to recognize text in an inactive window. To do this, I capture each frame using Bitmap. And I want Tesseract to scan the text (if there is one) on this frame and display it on the screen.
#include <iostream>
#include <opencv2/imgcodecs.hpp>
#include <opencv2/highgui.hpp>
#include <opencv2/imgproc.hpp>
#include <Windows.h>
#include <string>
#include <tesseract/capi.h>
#include <tesseract/baseapi.h>
#include <leptonica/allheaders.h>
using namespace cv;
Mat getMat(HWND hwnd) {
HDC deviceContext = GetDC(hwnd);
HDC memoryDeviceContext = CreateCompatibleDC(deviceContext);
RECT windowRect;
GetClientRect(hwnd, &windowRect);
int height = 500; /// windowRect.bottom windowRect.right
int width = 500;
HBITMAP bitmap = CreateCompatibleBitmap(deviceContext, width, height);
SelectObject(memoryDeviceContext, bitmap);
// Copy data into bitmap
BitBlt(memoryDeviceContext, 0, 0, width, height, deviceContext, 750, 359, SRCCOPY);
// Spesify format by using bitmapinfoheader
BITMAPINFOHEADER bi;
bi.biSize = sizeof(BITMAPINFOHEADER);
bi.biWidth = width;
bi.biHeight = -height;
bi.biPlanes = 1;
bi.biBitCount = 32;
bi.biCompression = BI_RGB;
bi.biSizeImage = 0; // No compression
bi.biXPelsPerMeter = 1;
bi.biYPelsPerMeter = 2;
bi.biClrUsed = 3;
bi.biClrImportant = 4;
Mat mat = Mat(height, width, CV_8UC4); // 8 bit unsigned ints 4 channels -> RGBA
// Transform data and store into mat.data
GetDIBits(memoryDeviceContext, bitmap, 0, height, mat.data, (BITMAPINFO*)&bi, DIB_RGB_COLORS);
// Clean up
DeleteObject(bitmap);
DeleteDC(memoryDeviceContext);
ReleaseDC(hwnd, deviceContext);
return mat;
}
int main() {
LPCWSTR windowTitle = L"Discord";
HWND hwnd = FindWindow(NULL, windowTitle);
/// String outText, imPath = argv[1];
/// tesseract::TessBaseAPI* temp = new tesseract::TessBaseAPI();
std::cout << "Start" << "\n";
while (true) {
Mat temp = getMat(hwnd);
cv::imshow("output", temp);
cv::waitKey(2);
}
std::cout << "Done\n";
return 1;
}
My attempts were unsuccessful:
while (true) {
Mat temp = getMat(hwnd);
cv::imshow("output", temp);
cv::waitKey(2);
String outText, imPath = argv[1];
tesseract::TessBaseAPI* temp = new tesseract::TessBaseAPI();
cv::Mat img = cv::imread((const unsigned char*)temp);
ocr->Init(NULL, "eng", tesseract::OEM_LSTM_ONLY);
ocr->SetPageSegMode(tesseract::PSM_AUTO);
ocr->SetImage(img);
outText = String(ocr->GetUTF8Text());
}
Another attempt
while (true) {
Mat temp = getMat(hwnd);
cv::imshow("output", temp);
cv::waitKey(2);
tesseract::TessBaseAPI* ocr = new tesseract::TessBaseAPI();
char* outText;
ocr->SetPageSegMode(tesseract::PSM_AUTO);
ocr->Init(NULL, "eng", tesseract::OEM_LSTM_ONLY);
ocr->SetImage(temp, 0, 0, 0, 0);
outText = ocr->GetUTF8Text();
ocr->End();
delete ocr;
return outText;
}
I just want every frame from Bitmap to be scanned for the presence of text. And this text was displayed. I will be glad of any help. Thanks
Just google for it. e.g. blogpost OpenCV and tesseract or example in tesseract doc.

cannot work out why SDL2 streaming texture is black

I am trying to draw individual pixels to an SDL2 streaming texture. I am attempting to store my pixels in a std::vector and then lock my texture, update the texture then unlock, pass it to the renderer and the present it.
However, I am always getting a black screen. I have tried several permutations of the below code but I am at a loss.
#include "pch.h"
#include "Window.h"
Window::Window(unsigned int width, unsigned int height) :
m_width(width), m_height(height)
{
if (SDL_Init(SDL_INIT_VIDEO || SDL_INIT_TIMER) < 0)
{
std::cout << "SDL failed to initilize correct. Error: "
<< SDL_GetError << std::endl;
}
m_window = SDL_CreateWindow("Cotton Rasterizer - Hardeep Bahia 2021", SDL_WINDOWPOS_CENTERED, SDL_WINDOWPOS_CENTERED, m_width, m_height, 0);
if (m_window == NULL)
{
std::cout << "Window failed to initilize: " << SDL_GetError << std::endl;
}
m_renderer = SDL_CreateRenderer(m_window, -1, SDL_RENDERER_TARGETTEXTURE);
if (m_renderer == NULL)
{
std::cout << "Renderer failed to initilize: " << SDL_GetError << std::endl;
}
m_texture = SDL_CreateTexture(m_renderer, SDL_PIXELFORMAT_RGBA8888, SDL_TEXTUREACCESS_STREAMING, m_width, m_height);
if (m_texture == NULL)
{
std::cout << "Texture failed to initilize: " << SDL_GetError << std::endl;
}
auto p_format = SDL_GetWindowPixelFormat(m_window);
m_pixel_format = SDL_AllocFormat(p_format);
}
Window::~Window()
{
SDL_FreeFormat(m_pixel_format);
SDL_DestroyTexture(m_texture);
SDL_DestroyRenderer(m_renderer);
SDL_DestroyWindow(m_window);
}
void Window::test()
{
SDL_Rect filledRect = { m_width / 4, m_height / 4, m_width / 2, m_height / 2 };
SDL_SetRenderDrawColor(m_renderer, 255, 0, 0, 255);
SDL_RenderFillRect(m_renderer, &filledRect);
}
void Window::present()
{
SDL_RenderPresent(m_renderer);
}
void Window::clear()
{
SDL_SetRenderDrawColor(m_renderer, 0.f, 0.f, 0.f, 255.f);
SDL_RenderClear(m_renderer);
}
void Window::draw(const std::vector<Pixel>& i_pixels)
{
int pitch {(m_width * 4)};
std::vector<Uint32> pixels(m_width * m_height * 4, 0);
Uint32 color = SDL_MapRGBA(m_pixel_format, 255, 255, 255, 255);
for (auto& a : i_pixels)
{
unsigned int offset = (m_width * 4 * a.m_position.y) + a.m_position.x * 4;
pixels[offset + 0] = 255;
pixels[offset + 1] = 255;
pixels[offset + 2] = 255;
pixels[offset + 3] = 255;
}
unsigned char* locked_pixels{};
SDL_LockTexture(m_texture, NULL, reinterpret_cast<void **> (&locked_pixels) , &pitch);
std::memcpy(locked_pixels, pixels.data(), pixels.size());
SDL_UpdateTexture(m_texture, NULL, pixels.data(), pitch);
SDL_UnlockTexture(m_texture);
SDL_RenderCopy(m_renderer, m_texture, NULL, NULL);
}
My test function does draw, and is visible on the screen but it does not use the texture.
Resolved it by changing the draw function to as follow:
void Window::draw(const std::vector<Pixel>& i_pixels)
{
std::vector<unsigned char>pixels(m_width * m_height * 4, 0);
for (auto& i : i_pixels)
{
unsigned int x = i.m_position.x;
unsigned int y = i.m_position.y;
const unsigned int offset = (m_width * 4 * y) + (x * 4);
pixels[offset] = 255;
pixels[offset + 1] = 255;
pixels[offset + 2] = 255;
pixels[offset + 3] = 255;
}
unsigned char* lockedpixels = nullptr;
int pitch = (m_width * 4);
SDL_LockTexture(m_texture, NULL, reinterpret_cast<void**>(&lockedpixels), &pitch);
std::memcpy(lockedpixels, pixels.data(), pixels.size());
SDL_UpdateTexture(m_texture, NULL, pixels.data(), m_width * 4);
SDL_UnlockTexture(m_texture);
SDL_RenderCopy(m_renderer, m_texture, NULL, NULL);
}

FreeImage: why does it not work with cairo?

Here is the code:
bool bLoaded = false;
FreeImage_Initialise();
FIBITMAP* img = FreeImage_Load(FIF_PNG, "imageSave.png", 0);
FreeImage_Unload(img);
FreeImage_DeInitialise();
int w, h;
//cr = cairo_create(image);
cairo_surface_t *image = cairo_image_surface_create_from_png("/root/test1/FreeImage/imageSave.png");
w = cairo_image_surface_get_width(image);
h = cairo_image_surface_get_height(image);
cout << "width " << w << " height " << h;
cairo_surface_write_to_png(image, "image2.png");
cairo_surface_destroy(image);
When I use FreeImage, the output of width and heigt is always 0, but when I comment this 4 line, the cairo works
FreeImage_Initialise();
FIBITMAP* img = FreeImage_Load(FIF_PNG, "imageSave.png", 0);
FreeImage_Unload(img);
FreeImage_DeInitialise();
How can I use this both library? Please help!

OpenCV c++ hwnd2mat(screenshot) -> blobFromImage : Error

I am working on screen capture object recognition system.
My code:
#include "opencv2/imgproc.hpp"
#include "opencv2/highgui.hpp"
#include "opencv2/dnn.hpp"
#include <opencv2/core/utils/trace.hpp>
#include <Windows.h>
#include <iostream>
using namespace std;
using namespace cv;
using namespace cv::dnn;
Mat hwnd2mat(HWND hwnd) {
HDC hwindowDC,hwindowCompatibleDC;
int height,width,srcheight,srcwidth;
HBITMAP hbwindow;
Mat src;
BITMAPINFOHEADER bi;
hwindowDC=GetDC(hwnd);
hwindowCompatibleDC=CreateCompatibleDC(hwindowDC);
SetStretchBltMode(hwindowCompatibleDC,COLORONCOLOR);
RECT windowsize; // get the height and width of the screen
GetClientRect(hwnd, &windowsize);
srcheight = windowsize.bottom;
srcwidth = windowsize.right;
height = windowsize.bottom/1; //change this to whatever size you want to resize to
width = windowsize.right/1;
src.create(height,width,CV_8UC4);
// create a bitmap
hbwindow = CreateCompatibleBitmap( hwindowDC, width, height);
bi.biSize = sizeof(BITMAPINFOHEADER); //http://msdn.microsoft.com/en-us/library/windows/window/dd183402%28v=vs.85%29.aspx
bi.biWidth = width;
bi.biHeight = -height; //this is the line that makes it draw upside down or not
bi.biPlanes = 1;
bi.biBitCount = 32;
bi.biCompression = BI_RGB;
bi.biSizeImage = 0;
bi.biXPelsPerMeter = 0;
bi.biYPelsPerMeter = 0;
bi.biClrUsed = 0;
bi.biClrImportant = 0;
// use the previously created device context with the bitmap
SelectObject(hwindowCompatibleDC, hbwindow);
// copy from the window device context to the bitmap device context
StretchBlt( hwindowCompatibleDC, 0,0, width, height, hwindowDC, 0, 0,srcwidth,srcheight, SRCCOPY); //change SRCCOPY to NOTSRCCOPY for wacky colors !
GetDIBits(hwindowCompatibleDC,hbwindow,0,height,src.data,(BITMAPINFO *)&bi,DIB_RGB_COLORS); //copy from hwindowCompatibleDC to hbwindow
// avoid memory leak
DeleteObject (hbwindow);
DeleteDC(hwindowCompatibleDC);
ReleaseDC(hwnd, hwindowDC);
return src;
}
string CLASSES[] = {"background", "aeroplane", "bicycle", "bird", "boat",
"bottle", "bus", "car", "cat", "chair", "cow", "diningtable",
"dog", "horse", "motorbike", "person", "pottedplant", "sheep",
"sofa", "train", "tvmonitor"};
float confidenceThreshold = 0.2;
int main(int argc, char **argv) {
CV_TRACE_FUNCTION();
String modelTxt = "resources/Caffe/MobileNetSSD_deploy.prototxt";
String modelBin = "resources/Caffe/MobileNetSSD_deploy.caffemodel";
Net net = readNetFromCaffe(modelTxt, modelBin);
if (net.empty()) {
std::cerr << "Can't load network by using the following files: " << std::endl;
std::cerr << "prototxt: " << modelTxt << std::endl;
std::cerr << "caffemodel: " << modelBin << std::endl;
exit(-1);
}
HWND hwndDesktop = GetDesktopWindow();
// namedWindow("output", WINDOW_NORMAL); // Zeby mozna bylo zmieniac rozmiar okna "real-time"
int key = 0;
// while( key != 27 ) {
Mat frame = hwnd2mat(hwndDesktop);
// resize(frame, frame, Size(800, 450));
// Mat frame = imread("resources/auto.png");
Mat img2;
resize(frame, img2, Size(300,300));
Mat inputBlob = blobFromImage(img2, 0.007843, Size(300,300), Scalar(127.5, 127.5, 127.5), false);
net.setInput(inputBlob, "data");
Mat detection = net.forward("detection_out");
Mat detectionMat(detection.size[2], detection.size[3], CV_32F, detection.ptr<float>());
ostringstream ss;
for (int i = 0; i < detectionMat.rows; i++) {
float confidence = detectionMat.at<float>(i, 2);
if (confidence > confidenceThreshold) {
int idx = static_cast<int>(detectionMat.at<float>(i, 1));
int xLeftBottom = static_cast<int>(detectionMat.at<float>(i, 3) * frame.cols);
int yLeftBottom = static_cast<int>(detectionMat.at<float>(i, 4) * frame.rows);
int xRightTop = static_cast<int>(detectionMat.at<float>(i, 5) * frame.cols);
int yRightTop = static_cast<int>(detectionMat.at<float>(i, 6) * frame.rows);
Rect object((int)xLeftBottom, (int)yLeftBottom,
(int)(xRightTop - xLeftBottom),
(int)(yRightTop - yLeftBottom));
rectangle(frame, object, Scalar(0, 255, 0), 2);
cout << CLASSES[idx] << ": " << confidence << endl;
ss.str("");
ss << confidence;
String conf(ss.str());
String label = CLASSES[idx] + ": " + conf;
int baseLine = 0;
Size labelSize = getTextSize(label, FONT_HERSHEY_SIMPLEX, 0.5, 1, &baseLine);
putText(frame, label, Point(xLeftBottom, yLeftBottom-10),
FONT_HERSHEY_SIMPLEX, 0.5, Scalar(0,255,0));
}
}
imshow("output", frame);
// key = waitKey(60);
waitKey();
// }
}
When i use exteranl image everything works fine but when i use function called hwnd2mat it throws me exception:
Error: Assertion failed (inputs[0]->size[1] % blobs[0].size[1] == 0) in forward
I don't know what does it mean. I tried to resize image but it doesnt work.
I am using OpenCV-3.4.1 and MobileNetSSD model.
Thanks in advance.
Ok, i finally did it!
Code for taking screenshots:
Mat getScreenshot() {
HWND hwnd = GetDesktopWindow();
HDC hwindowDC,hwindowCompatibleDC;
int height,width,srcheight,srcwidth;
HBITMAP hbwindow;
Mat src;
BITMAPINFOHEADER bi;
hwindowDC=GetDC(hwnd);
hwindowCompatibleDC=CreateCompatibleDC(hwindowDC);
SetStretchBltMode(hwindowCompatibleDC,COLORONCOLOR);
RECT windowsize; // get the height and width of the screen
GetClientRect(hwnd, &windowsize);
srcheight = windowsize.bottom;
srcwidth = windowsize.right;
height = windowsize.bottom/1; //change this to whatever size you want to resize to
width = windowsize.right/1;
src.create(height,width,CV_8UC3); // This was problem
// create a bitmap
int iBits = GetDeviceCaps(hwindowDC, BITSPIXEL) * GetDeviceCaps(hwindowDC, PLANES);
WORD wBitCount;
if (iBits <= 1)
wBitCount = 1;
else if (iBits <= 4)
wBitCount = 4;
else if (iBits <= 8)
wBitCount = 8;
else
wBitCount = 24;
hbwindow = CreateCompatibleBitmap( hwindowDC, width, height);
bi.biSize = sizeof(BITMAPINFOHEADER); //http://msdn.microsoft.com/en-us/library/windows/window/dd183402%28v=vs.85%29.aspx
bi.biWidth = width;
bi.biHeight = -height; //this is the line that makes it draw upside down or not
bi.biPlanes = 1;
bi.biBitCount = wBitCount;
bi.biCompression = BI_RGB;
bi.biSizeImage = 0;
bi.biXPelsPerMeter = 0;
bi.biYPelsPerMeter = 0;
bi.biClrUsed = 256;
bi.biClrImportant = 0;
// use the previously created device context with the bitmap
SelectObject(hwindowCompatibleDC, hbwindow);
// copy from the window device context to the bitmap device context
StretchBlt( hwindowCompatibleDC, 0,0, width, height, hwindowDC, 0, 0,srcwidth,srcheight, SRCCOPY); //change SRCCOPY to NOTSRCCOPY for wacky colors !
GetDIBits(hwindowCompatibleDC,hbwindow,0,height,src.data,(BITMAPINFO *)&bi,DIB_RGB_COLORS); //copy from hwindowCompatibleDC to hbwindow
// avoid memory leak
DeleteObject (hbwindow);
DeleteDC(hwindowCompatibleDC);
ReleaseDC(hwnd, hwindowDC);
return src;
}

C++ Getting RGB from hBitmap

Working with bitmaps is very new to me so I've been really struggling with the online tutorials and strategies that I've read through. Basically my goal is to scan the screen for a particular RGB value. I believe the steps to do this is to capture the screen in a hBitmap and then produce an array of RGB values from it that I can scan through.
I originally started with GetPixel but that is very slow. The solution was to use GetDIBits which produces the array of RGB values. The problem is that it returns weird and possibly random RGB values instead.
I'm using the following code which I found from another tutorial:
/* Globals */
int ScreenX = GetDeviceCaps(GetDC(0), HORZRES);
int ScreenY = GetDeviceCaps(GetDC(0), VERTRES);
BYTE* ScreenData = new BYTE[3*ScreenX*ScreenY];
void ScreenCap() {
HDC hdc = GetDC(GetDesktopWindow());
HDC hdcMem = CreateCompatibleDC (hdc);
HBITMAP hBitmap = CreateCompatibleBitmap(hdc, ScreenX, ScreenY);
BITMAPINFOHEADER bmi = {0};
bmi.biSize = sizeof(BITMAPINFOHEADER);
bmi.biPlanes = 1;
bmi.biBitCount = 24;
bmi.biWidth = ScreenX;
bmi.biHeight = -ScreenY;
bmi.biCompression = BI_RGB;
bmi.biSizeImage = ScreenX * ScreenY;
SelectObject(hdcMem, hBitmap);
BitBlt(hdcMem, 0, 0, ScreenX, ScreenY, hdc, 0, 0, SRCCOPY);
GetDIBits(hdc, hBitmap, 0, ScreenY, ScreenData, (BITMAPINFO*)&bmi, DIB_RGB_COLORS);
DeleteDC(hdcMem);
ReleaseDC(NULL, hdc);
}
inline int PosR(int x, int y) {
return ScreenData[3*((y*ScreenX)+x)+2];
}
inline int PosG(int x, int y) {
return ScreenData[3*((y*ScreenX)+x)+1];
}
inline int PosB(int x, int y) {
return ScreenData[3*((y*ScreenX)+x)];
}
I test this with the following code. I hit Shift to call ScreenCap and then I move my cursor to the desired location and hit Space to see what the RGB value is at that location. Am I completely nuts?
int main() {
while ( true ) {
if (GetAsyncKeyState(VK_SPACE)){
// Print out current cursor position
GetCursorPos(&p);
printf("X:%d Y:%d \n",p.x,p.y);
// Print out RGB value at that position
int r = PosR(p.x, p.y);
int g = PosG(p.x, p.y);
int b = PosB(p.x, p.y);
printf("r:%d g:%d b:%d \n",r,g,b);
} else if (GetAsyncKeyState(VK_ESCAPE)){
printf("Quit\n");
break;
} else if (GetAsyncKeyState(VK_SHIFT)){
ScreenCap();
printf("Captured\n");
}
}
system("PAUSE");
return 0;
}
The issue is that your screen is actually 32bits deep not 24. The code below will give you the result you need:
/* Globals */
int ScreenX = 0;
int ScreenY = 0;
BYTE* ScreenData = 0;
void ScreenCap()
{
HDC hScreen = GetDC(NULL);
ScreenX = GetDeviceCaps(hScreen, HORZRES);
ScreenY = GetDeviceCaps(hScreen, VERTRES);
HDC hdcMem = CreateCompatibleDC(hScreen);
HBITMAP hBitmap = CreateCompatibleBitmap(hScreen, ScreenX, ScreenY);
HGDIOBJ hOld = SelectObject(hdcMem, hBitmap);
BitBlt(hdcMem, 0, 0, ScreenX, ScreenY, hScreen, 0, 0, SRCCOPY);
SelectObject(hdcMem, hOld);
BITMAPINFOHEADER bmi = {0};
bmi.biSize = sizeof(BITMAPINFOHEADER);
bmi.biPlanes = 1;
bmi.biBitCount = 32;
bmi.biWidth = ScreenX;
bmi.biHeight = -ScreenY;
bmi.biCompression = BI_RGB;
bmi.biSizeImage = 0;// 3 * ScreenX * ScreenY;
if(ScreenData)
free(ScreenData);
ScreenData = (BYTE*)malloc(4 * ScreenX * ScreenY);
GetDIBits(hdcMem, hBitmap, 0, ScreenY, ScreenData, (BITMAPINFO*)&bmi, DIB_RGB_COLORS);
ReleaseDC(GetDesktopWindow(),hScreen);
DeleteDC(hdcMem);
DeleteObject(hBitmap);
}
inline int PosB(int x, int y)
{
return ScreenData[4*((y*ScreenX)+x)];
}
inline int PosG(int x, int y)
{
return ScreenData[4*((y*ScreenX)+x)+1];
}
inline int PosR(int x, int y)
{
return ScreenData[4*((y*ScreenX)+x)+2];
}
bool ButtonPress(int Key)
{
bool button_pressed = false;
while(GetAsyncKeyState(Key))
button_pressed = true;
return button_pressed;
}
int main()
{
while (true)
{
if (ButtonPress(VK_SPACE))
{
// Print out current cursor position
POINT p;
GetCursorPos(&p);
printf("X:%d Y:%d \n",p.x,p.y);
// Print out RGB value at that position
std::cout << "Bitmap: r: " << PosR(p.x, p.y) << " g: " << PosG(p.x, p.y) << " b: " << PosB(p.x, p.y) << "\n";
} else if (ButtonPress(VK_ESCAPE))
{
printf("Quit\n");
break;
} else if (ButtonPress(VK_SHIFT))
{
ScreenCap();
printf("Captured\n");
}
}
system("PAUSE");
return 0;
}
Your image size is specified in pixels, it should be specified in bytes
**bmi.biSizeImage = ScreenX * ScreenY;**
**bmi.biBitCount = 24;**
bmi.biWidth = ScreenX;
bmi.biHeight = -ScreenY;
**bmi.biCompression = BI_RGB;**
biSizeImage its defined units are bytes and you are specifying RGB 3 bytes per pixel.
http://msdn.microsoft.com/en-us/library/windows/desktop/dd183376(v=vs.85).aspx
biSizeImage
The size, in bytes, of the image. This may be set to zero for BI_RGB bitmaps.