Related
I need to update an array of pixels to the screen every frame. It works initially, however when I try to resize the screen it glitches and eventually throws EXC_BAD_ACCESS 1. I already checked that the buffer is allocated to the correct size before every frame, however it does not seem to affect the result.
#include <stdio.h>
#include <stdlib.h>
#include <GLUT/GLUT.h>
unsigned char *buffer = NULL;
int width = 400, height = 400;
unsigned int screenTexture;
void Display()
{
for (int y = 0; y < height; y+=4) {
for (int x = 0; x < width; x++) {
buffer[(x + y * width) * 3] = 255;
}
}
glClear(GL_COLOR_BUFFER_BIT);
glEnable(GL_TEXTURE_2D);
// This function results in EXC_BAD_ACCESS 1, although the buffer is always correctly allocated
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGB, width, height, 0, GL_RGB, GL_UNSIGNED_BYTE, buffer);
glViewport(0, 0, width, height);
glMatrixMode(GL_PROJECTION);
glLoadIdentity();
glOrtho(0, width, height, 0, 0, 1);
glMatrixMode(GL_MODELVIEW);
glBegin (GL_QUADS);
glTexCoord2f(0,0); glVertex2i(0, 0);
glTexCoord2f(1,0); glVertex2i(width,0);
glTexCoord2f(1,1); glVertex2i(width,height);
glTexCoord2f(0,1); glVertex2i(0, height);
glEnd ();
glFlush();
glutPostRedisplay();
}
void Resize(int w, int h)
{
width = w;
height = h;
buffer = (unsigned char *)realloc(buffer, sizeof(unsigned char) * width * height * 3);
if (!buffer) {
printf("Error Reallocating buffer\n");
exit(1);
}
}
int main(int argc, char **argv)
{
glutInit(&argc, argv);
glutInitDisplayMode(GLUT_RGB | GLUT_SINGLE);
glutInitWindowSize(width, height);
glutCreateWindow("Rasterizer");
glutDisplayFunc(Display);
glutReshapeFunc(Resize);
glGenTextures(1, &screenTexture);
glBindTexture(GL_TEXTURE_2D, screenTexture);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_BASE_LEVEL, 0);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAX_LEVEL, 0);
glDisable(GL_DEPTH_TEST);
buffer = (unsigned char *)malloc(sizeof(unsigned char) * width * height * 3);
glutMainLoop();
}
After resizing the screen does not display properly either:
What is causing this problem? The code compiles and runs you just have to link GLUT and OpenGL.
As #genpfault mentioned, OpenGL reads 4 bytes per pixel instead of your assumption of 3.
Instead of changing GL_UNPACK_ALIGNMENT, you can also change your code to the correct assumption of 4 bytes per pixel via a simple struct:
struct pixel {
unsigned char r, g, b;
unsigned char unused;
};
Then, instead of using the magic constant 3, you can use the much clearer sizeof(struct pixel). This makes it easier to read and to convey the intent of the code, and it doesn't result in any extra code (as the structure is "effectively" an array of 4 bytes).
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGB, width, height, 0, GL_RGB, GL_UNSIGNED_BYTE, buffer);
^^^^^^
GL_UNPACK_ALIGNMENT defaults to 4, not 1. So OpenGL will read 4 bytes for every pixel, not the 3 that you're assuming.
Set GL_UNPACK_ALIGNMENT to 1 using glPixelStorei().
It sounds like you found something that works, but I don't think the problem was properly diagnosed. I believe the biggest issue is in the way you initialize your texture data here:
for (int y = 0; y < height; y+=4) {
for (int x = 0; x < width; x++) {
buffer[(x + y * width) * 3] = 255;
}
}
This only sets data in every 4th row, and then only for every 3rd byte within those rows. To initialize all the data to white, you need to increment the row number (y) by 1 instead of 4, and set all 3 components inside the loop:
for (int y = 0; y < height; y++) {
for (int x = 0; x < width; x++) {
buffer[(x + y * width) * 3 ] = 255;
buffer[(x + y * width) * 3 + 1] = 255;
buffer[(x + y * width) * 3 + 2] = 255;
}
}
You also need to set GL_UNPACK_ALIGNMENT to 1:
glPixelStorei(GL_UNPACK_ALIGNMENT, 1);
This controls the row alignment (not the pixel alignment, as suggested in a couple other answers). The default value for GL_UNPACK_ALIGNMENT is 4. But with 3 bytes per pixel in the GL_RGB format you are using, the size of a row is only a multiple of 4 bytes if the number of pixels is a multiple of 4. So for tightly packed rows with 3 bytes/pixel, the value needs to be set to 1.
I'm trying to draw to a renderbuffer (512x512) that's larger than the screen size (i.e., 320x480).
After doing a glReadPixels, the image looks correct, except once the dimensions of the image exceed that of the screen size- in this example, past 320 horizontal and 480 vertical. What causes this anomaly? Is there something I'm missing?
When the window size is >= the size of the renderbuffer, this code works absolutely fine.
Example image that was rendered to the buffer & glReadPixel'd:
http://img593.imageshack.us/img593/3220/rendertobroke.png
unsigned int canvasFrameBuffer;
bglGenFramebuffers(1, &canvasFrameBuffer);
bglBindFramebuffer(BGL_RENDERBUFFER, canvasFrameBuffer);
// Attach renderbuffer
unsigned int canvasRenderBuffer;
bglGenRenderbuffers(1, &canvasRenderBuffer);
bglBindRenderbuffer(BGL_RENDERBUFFER, canvasRenderBuffer);
bglRenderbufferStorage(BGL_RENDERBUFFER, BGL_RGBA4, width, height);
bglFramebufferRenderbuffer(BGL_FRAMEBUFFER, BGL_COLOR_ATTACHMENT0, BGL_RENDERBUFFER, canvasRenderBuffer);
bglViewport(0, 0, width, height);
Matrix::matrix_t identity, colorMatrix;
Matrix::LoadIdentity(&identity);
Matrix::LoadIdentity(&colorMatrix);
bglClearColor(1.0f, 1.0f, 1.0f, 1.0f);
bglClear(BGL_COLOR_BUFFER_BIT);
Vector::vector_t oldPos, oldScale;
Vector::Copy(&oldPos, &pos);
Vector::Mul(&pos, 0.0f);
Vector::Copy(&oldScale, &scale);
Vector::Load(&scale, 1, 1, 1);
int oldHAlign = halignment;
int oldVAlign = valignment;
halignment = Font::HALIGN_LEFT;
valignment = Font::VALIGN_BOTTOM;
float oldXRatio = vid.xratio;
float oldYRatio = vid.yratio;
vid.xratio = 1;
vid.yratio = 1;
Drawing::Set2D(this->size.x, this->size.y); // glOrtho and setup projection/modelview matrices
Draw(&identity, &colorMatrix);
Vector::Copy(&pos, &oldPos);
Vector::Copy(&scale, &oldScale);
halignment = oldHAlign;
valignment = oldVAlign;
vid.xratio = oldXRatio;
vid.yratio = oldYRatio;
byte *buffer = (byte*)Z_Malloc(width * height * 3, ZT_STATIC);
bglPixelStorei(BGL_PACK_ALIGNMENT, 1);
bglReadPixels(0, 0, width, height, BGL_RGB, BGL_UNSIGNED_BYTE, buffer);
byte *final = RGBtoLuminance(buffer, width, height);
SaveTGA("canvas.tga", final, width, height, 1);
Z_Free(buffer);
// unbind frame buffer
bglBindRenderbuffer(BGL_RENDERBUFFER, 0);
bglBindFramebuffer(BGL_FRAMEBUFFER, 0);
bglDeleteRenderbuffers(1, &canvasRenderBuffer);
bglDeleteFramebuffers(1, &canvasFrameBuffer);
bglViewport(0, 0, vid.width, vid.height);
Here's the answer.
Change this line:
bglBindFramebuffer(BGL_RENDERBUFFER, canvasFrameBuffer);
to this:
bglBindFramebuffer(BGL_FRAMEBUFFER, canvasFrameBuffer);
I would like to implement color picking/selection via these three methods:
void ColorIndex(uint colorIndex)
{
glColor4ubv((GLubyte *)&colorIndex);
}
void ColorIndices(vector<uint> &colorIndices)
{
GLubyte *colorPtr = (GLubyte *)&colorIndices[0];
glColorPointer(4, GL_UNSIGNED_BYTE, 0, colorPtr);
}
void ReadSelectedIndices(int x, int y, int width, int height, uint *selectedIndices)
{
glPixelStorei(GL_UNPACK_ALIGNMENT, 1);
glReadPixels(x, y, width, height, GL_RGBA, GL_UNSIGNED_BYTE, selectedIndices);
}
They basically packs unsigned integer (32 bit) into 4 bytes R, G, B, A. It is simple and very fast compared to GL_SELECT.
Problem is that it sometimes does not work. It does not work for me on Windows with NVIDIA 9800 graphics card. It works for me on ATI Radeon 4670 in Mac/Windows.
I was thinking that problem is in alpha channel so I tried this implementation:
union RgbColor
{
GLuint colorIndex : 24;
GLubyte components[3];
struct
{
GLubyte r;
GLubyte g;
GLubyte b;
};
};
void ColorIndex(uint colorIndex)
{
RgbColor color;
color.colorIndex = colorIndex;
glColor3ubv(color.components);
}
vector<GLubyte> colorComponents;
void ColorIndices(vector<uint> &colorIndices)
{
colorComponents.clear();
for (uint i = 0; i < colorIndices.size(); i++)
{
RgbColor color;
color.colorIndex = colorIndices[i];
colorComponents.push_back(color.components[0]);
colorComponents.push_back(color.components[1]);
colorComponents.push_back(color.components[2]);
}
GLubyte *colorPtr = (GLubyte *)&colorComponents[0];
glColorPointer(3, GL_UNSIGNED_BYTE, 0, colorPtr);
}
const uint kMaxSelectedIndicesCount = 2000 * 2000; // max width * max height resolution
GLubyte colorBuffer[kMaxSelectedIndicesCount * 3];
void ReadSelectedIndices(int x, int y, int width, int height, uint *selectedIndices)
{
uint count = (uint)width * (uint)height;
memset(colorBuffer, 0, count * 3);
glPixelStorei(GL_UNPACK_ALIGNMENT, 1);
glReadPixels(x, y, width, height, GL_RGB, GL_UNSIGNED_BYTE, colorBuffer);
GetGLError();
for (uint i = 0; i < count; i++)
{
RgbColor color;
color.components[0] = colorBuffer[i * 3 + 0];
color.components[1] = colorBuffer[i * 3 + 1];
color.components[2] = colorBuffer[i * 3 + 2];
selectedIndices[i] = color.colorIndex;
}
}
Now on both graphics cards and OSes selection works, but sometimes bad vertices are selected (outside of selection rectangle).
How this can happen? Is there some better way to implement color picking, that is robust and always correct on various graphics cards?
How to take a screenshot of an OpenGL window in C++ and save it to file.
I found the glReadPixels() function,
but I don't know what to do next. Where I can set path to a file, for example?
If not difficult, write code, please.
This piece of code captures the OpenGL window and export to a BMP file. You must have FreeImage library to run it.
// Make the BYTE array, factor of 3 because it's RBG.
BYTE* pixels = new BYTE[3 * width * height];
glReadPixels(0, 0, width, height, GL_RGB, GL_UNSIGNED_BYTE, pixels);
// Convert to FreeImage format & save to file
FIBITMAP* image = FreeImage_ConvertFromRawBits(pixels, width, height, 3 * width, 24, 0x0000FF, 0xFF0000, 0x00FF00, false);
FreeImage_Save(FIF_BMP, image, "C:/test.bmp", 0);
// Free resources
FreeImage_Unload(image);
delete [] pixels;
glReadPixels will copy the bits into a memory buffer that you supply. You have to manually format the data (to the image format of your choice) and write it to disk after glReadPixels returns.
Runnable example
Each time you click with the mouse on the window, a tmpX.ppm file is created with the current screenshot.
You can view this file for example with eog on Linux, and inspect it with a text editor.
To render without showing a window, see: How to use GLUT/OpenGL to render to a file?
#include <math.h>
#include <stdlib.h>
#include <stdio.h>
#define GL_GLEXT_PROTOTYPES 1
#include <GL/gl.h>
#include <GL/glu.h>
#include <GL/glut.h>
#include <GL/glext.h>
static GLubyte *pixels = NULL;
static const GLenum FORMAT = GL_RGBA;
static const GLuint FORMAT_NBYTES = 4;
static const unsigned int HEIGHT = 500;
static const unsigned int WIDTH = 500;
static unsigned int nscreenshots = 0;
static unsigned int time;
/* Model. */
static double angle = 0;
static double angle_speed = 45;
static void init(void) {
glReadBuffer(GL_BACK);
glClearColor(0.0, 0.0, 0.0, 0.0);
glPixelStorei(GL_PACK_ALIGNMENT, 1);
glViewport(0, 0, WIDTH, HEIGHT);
glMatrixMode(GL_PROJECTION);
glLoadIdentity();
glMatrixMode(GL_MODELVIEW);
pixels = malloc(FORMAT_NBYTES * WIDTH * HEIGHT);
time = glutGet(GLUT_ELAPSED_TIME);
}
static void deinit(void) {
free(pixels);
}
static void create_ppm(char *prefix, int frame_id, unsigned int width, unsigned int height,
unsigned int color_max, unsigned int pixel_nbytes, GLubyte *pixels) {
size_t i, j, k, cur;
enum Constants { max_filename = 256 };
char filename[max_filename];
snprintf(filename, max_filename, "%s%d.ppm", prefix, frame_id);
FILE *f = fopen(filename, "w");
fprintf(f, "P3\n%d %d\n%d\n", width, HEIGHT, 255);
for (i = 0; i < height; i++) {
for (j = 0; j < width; j++) {
cur = pixel_nbytes * ((height - i - 1) * width + j);
fprintf(f, "%3d %3d %3d ", pixels[cur], pixels[cur + 1], pixels[cur + 2]);
}
fprintf(f, "\n");
}
fclose(f);
}
static void draw_scene() {
glClear(GL_COLOR_BUFFER_BIT);
glLoadIdentity();
glRotatef(angle, 0.0f, 0.0f, -1.0f);
glBegin(GL_TRIANGLES);
glColor3f(1.0f, 0.0f, 0.0f);
glVertex3f( 0.0f, 0.5f, 0.0f);
glColor3f(0.0f, 1.0f, 0.0f);
glVertex3f(-0.5f, -0.5f, 0.0f);
glColor3f(0.0f, 0.0f, 1.0f);
glVertex3f( 0.5f, -0.5f, 0.0f);
glEnd();
}
static void display(void) {
draw_scene();
glutSwapBuffers();
glReadPixels(0, 0, WIDTH, HEIGHT, FORMAT, GL_UNSIGNED_BYTE, pixels);
}
static void idle(void) {
int new_time = glutGet(GLUT_ELAPSED_TIME);
angle += angle_speed * (new_time - time) / 1000.0;
angle = fmod(angle, 360.0);
time = new_time;
glutPostRedisplay();
}
void mouse(int button, int state, int x, int y) {
if (state == GLUT_DOWN) {
puts("screenshot");
create_ppm("tmp", nscreenshots, WIDTH, HEIGHT, 255, FORMAT_NBYTES, pixels);
nscreenshots++;
}
}
int main(int argc, char **argv) {
GLint glut_display;
glutInit(&argc, argv);
glutInitWindowSize(WIDTH, HEIGHT);
glutInitWindowPosition(100, 100);
glutInitDisplayMode(GLUT_DOUBLE | GLUT_RGBA);
glutCreateWindow(argv[0]);
init();
glutDisplayFunc(display);
glutIdleFunc(idle);
glutMouseFunc(mouse);
atexit(deinit);
glutMainLoop();
return EXIT_SUCCESS;
}
Compile with:
gcc main.c -lm -lGL -lGLU -lglut
Tested on Ubuntu 15.10, OpenGL 4.5.0 NVIDIA 352.63.
Vulkan
This example just worked: https://github.com/SaschaWillems/Vulkan/blob/b9f0ac91d2adccc3055a904d3a8f6553b10ff6cd/examples/screenshot/screenshot.cpp how to run it: Is it possible to do offscreen rendering without Surface in Vulkan?
Saving that data to a file is something you'll either have to do yourself or use a third-party library for - OpenGL has no such feature.
Windows .bmp is probably the easiest if you're looking to do it yourself - Wikipedia has a pretty good explanation of the file format. Otherwise you can use image saving/loading libraries: libpng, libjpeg, etc. for low-level control, or devIL (there are others, but this is my favorite, and it's an extremely versatile library that goes well with GL) for high-level "just do it" image i/o.
A simple and quick solution.
Outputs a TARGA file but can easily be converted to PNG (script provided).
No extra libraries required.
Will work with both C and C++ (with some minor changes).
Note: The output file should have the tga file extension.
Here is the code:
void saveScreenshotToFile(std::string filename, int windowWidth, int windowHeight) {
const int numberOfPixels = windowWidth * windowHeight * 3;
unsigned char pixels[numberOfPixels];
glPixelStorei(GL_PACK_ALIGNMENT, 1);
glReadBuffer(GL_FRONT);
glReadPixels(0, 0, windowWidth, windowHeight, GL_BGR_EXT, GL_UNSIGNED_BYTE, pixels);
FILE *outputFile = fopen(filename.c_str(), "w");
short header[] = {0, 2, 0, 0, 0, 0, (short) windowWidth, (short) windowHeight, 24};
fwrite(&header, sizeof(header), 1, outputFile);
fwrite(pixels, numberOfPixels, 1, outputFile);
fclose(outputFile);
printf("Finish writing to file.\n");
}
And calling the function:
saveScreenshotToFile("test.tga", 1200, 900);
A bash script to convert TARGA files to PNG:
for oldFileName in *.tga; do
[ -f "$oldFileName" ] || break # Break out if no .tga files found.
newFileName=${oldFileName//.tga/.png}
convert $oldFileName $newFileName
rm $oldFileName
echo "Converted $oldFileName to $newFileName"
done
You can save screenshot with #Rafael's answer and OpenCV:
void Game::saveScreenshotToFile(std::string filename, int windowWidth, int windowHeight) {
cv::Mat img(windowHeight, windowWidth, CV_8UC3);
glPixelStorei(GL_PACK_ALIGNMENT, (img.step & 3) ? 1 : 4);
glPixelStorei(GL_PACK_ROW_LENGTH, img.step/img.elemSize());
glReadPixels(0, 0, img.cols, img.rows, GL_BGR, GL_UNSIGNED_BYTE, img.data);
cv::flip(img, img, 0);
//cv::imshow("Image",img);
//cv::waitKey(0);
cv::imwrite(filename, img);
}
Thanks for OpenCV: https://stackoverflow.com/a/9098883/10152334
Generally, OpenGL don't provide functions to save image. I think the fastest and simplest way to do this is save to .PPM format. However, this kind of format is uncompressed which means it's file size would be very large. And it can be support only by quite a few programs nowadays.
I prefer to save image to .png file which is compressed but also gives lossless image and supported by many browsers. To save the OpenGL to .png format, I first recommend the PNGwriter. It's pretty simple and easy to use. For example, to save a pixel of a image with color (R, G, B) in the position (x, y), your code will be(see "quickstart" in the PNGwriter website):
pngwriter PNG(width, height, 1.0, fileName); // "1.0" stand for the white background
PNG.plot(x, y, R, G, B);
PNG.close();
Note that, since the PNGwriter save each pixel starting from the top-left corner of the image, while the array get from glReadPixels() start from the bottom-left of the window, your code to save the whole image might probably look like this:
GLfloat* pixels = new GLfloat[nPixels];
glReadPixels(0.0, 0.0, width, height,GL_RGB, GL_FLOAT, pixels);
pngwriter PNG(width, height, 1.0, fileName);
size_t x = 1;
size_t y = 1;
double R, G, B;
for(size_t i=0; i<npixels; i++) // "i" is the index for array "pixels"
{
switch(i%3)
{
case 2:
B = static_cast<double>(pixels[i]); break;
case 1:
G = static_cast<double>(pixels[i]); break;
case 0:
R = static_cast<double>(pixels[i]);
PNG.plot(x, y, R, G, B); // set pixel to position (x, y)
if( x == width ) // Move to the next row of image
{
x=1;
y++;
}
else // To the next pixel
{ x++; }
break;
}
}
PNG.close();
Despite an earlier question (asked here), our project is constrained to using glDrawPixels, so we have to do some hackery.
One of the feature requirements is to be able to have a magnified view show up on a clicked region of an image; so, looking at an image, I want to click the mouse, and have a 200% image window show up where the mouse is. As I drag my cursor, the window should follow the cursor.
The context is set up like:
The Big Red Book has code that looks like this:
Gl.glShadeModel(Gl.GL_FLAT);
Gl.glClearColor(0.1f, 0.1f, 0.1f, 0.0f);
Gl.glPixelStorei(Gl.GL_UNPACK_ALIGNMENT, 2);
Gl.glPolygonMode(Gl.GL_FRONT_AND_BACK, Gl.GL_LINE);
Gl.glDisable(Gl.GL_SCISSOR_TEST);
Gl.glDisable(Gl.GL_ALPHA_TEST);
Gl.glDisable(Gl.GL_STENCIL_TEST);
Gl.glDisable(Gl.GL_DEPTH_TEST);
Gl.glDisable(Gl.GL_BLEND);
Gl.glDisable(Gl.GL_DITHER);
Gl.glDisable(Gl.GL_LOGIC_OP);
Gl.glDisable(Gl.GL_LIGHTING);
Gl.glDisable(Gl.GL_FOG);
Gl.glDisable(Gl.GL_TEXTURE_1D);
Gl.glDisable(Gl.GL_TEXTURE_2D);
Gl.glPixelTransferi(Gl.GL_MAP_COLOR, Gl.GL_TRUE);
Gl.glPixelTransferf(Gl.GL_RED_SCALE, 1.0f);
Gl.glPixelTransferi(Gl.GL_RED_BIAS, 0);
Gl.glPixelTransferf(Gl.GL_GREEN_SCALE, 1.0f);
Gl.glPixelTransferi(Gl.GL_GREEN_BIAS, 0);
Gl.glPixelTransferf(Gl.GL_BLUE_SCALE, 1.0f);
Gl.glPixelTransferi(Gl.GL_BLUE_BIAS, 0);
Gl.glPixelTransferi(Gl.GL_ALPHA_SCALE, 1);
Gl.glPixelTransferi(Gl.GL_ALPHA_BIAS, 0);
And then the call to make the smaller-but-zoomed image looks like
int width = (int)((this.Width * 0.2)/2.0);
Gl.glReadBuffer(Gl.GL_FRONT_AND_BACK);
Gl.glRasterPos2i(0, 0);
Gl.glBitmap(0, 0, 0, 0, mStartX - (width*2), mStartY, null);
Gl.glPixelZoom(2.0f, 2.0f);
Gl.glCopyPixels(mStartX - width, mStartY, width, width, Gl.GL_COLOR);
where mStartY and mStartX are the points where the click happened.
Problem is, the window that shows up is really mangling the lookup tables, and really clamping the image down to essentially a black-and-white binary image (ie, no shades of grey).
The data is a black-and-white unsigned short array, and is set with this code:
float step = (65535.0f / (float)(max - min));
mColorTable = new ushort[65536];
int i;
for (i = 0; i < 65536; i++)
{
if (i < min)
mColorTable[i] = 0;
else if (i > max)
mColorTable[i] = 65535;
else
mColorTable[i] = (ushort)((float)(i - min) * step);
}
.... //some irrelevant code
Gl.glPixelMapusv(Gl.GL_PIXEL_MAP_R_TO_R, 65536, mColorTable);
Gl.glPixelMapusv(Gl.GL_PIXEL_MAP_G_TO_G, 65536, mColorTable);
Gl.glPixelMapusv(Gl.GL_PIXEL_MAP_B_TO_B, 65536, mColorTable);
Now, according to this documentation, I should use GL_PIXEL_MAP_I_TO_I and set INDEX_SCALE and INDEX_BIAS to zero, but doing that does not change the result, that the image is severely clamped. And by 'severely clamped' I mean it's either black or white, with very few shades of grey, but the original non-magnified image looks like what's expected.
So, how do I avoid the clamping of the magnified view? Should I make a second control that follows the cursor and gets filled in with data from the first control? That approach seems like it would take the array copies outside of the graphics card and into C#, which would almost by definition be slower, and so make the control nonresponsive.
Oh, I'm using C# and the Tao framework, if that matters.
Here's the answer. The problem is that the LUT is being applied twice, so before calling the copy, call:
Gl.glPixelTransferi(Gl.GL_MAP_COLOR, Gl.GL_FALSE);
Then, once done, call:
Gl.glPixelTransferi(Gl.GL_MAP_COLOR, Gl.GL_TRUE);
That way, the 'extreme clamping' I was describing is removed.
#thing2k-- your solution causes the copy to happen outside the graphics card, so slows down the drawing on mouse drag, but doesn't fix the double clamp.
Please, pretty please with loads of sugar, molasses, sprinkles and a mist of high-fructose corn syrup on top and all over, explain why you cannot just use texture-mapping to draw this imagery.
Texture-mapping is a core, basic, everyday, run of the mill, garden-variety, standard, typical, expected, and just generally nice feature of OpenGL. It is in version 1.4. Why not use it as a starting point?
If I understand you correctly then this should be close to what you after, using glReadPixels and glDrawPixels.
Sorry it's C++ not C# but the OpenGL function should still be the same.
// main.cpp
// glut Text
#ifdef __WIN32__
#define WIN32_LEAN_AND_MEAN
#include <windows.h>
#endif
#include <GL/glut.h>
#include <cstdio>
int WIDTH = 800;
int HEIGHT = 600;
int MouseButton, MouseY = 0, MouseX = 0;
const int size = 80;
char *image, rect[size*size*3];
int imagewidth, imageheight;
bool Init()
{
int offset;
FILE* file = fopen("image.bmp", "rb");
if (file == NULL)
return false;
fseek(file, 10, SEEK_SET);
fread(&offset, sizeof(int), 1, file);
fseek(file, 18, SEEK_SET);
fread(&imagewidth, sizeof(int), 1, file);
fread(&imageheight, sizeof(int), 1, file);
fseek(file, offset, SEEK_SET);
image = new char[imagewidth*imageheight*3];
if (image == NULL)
return false;
fread(image, 1, imagewidth*imageheight*3, file);
fclose(file);
return true;
}
void Reshape(int width, int height)
{
WIDTH = width;
HEIGHT = height;
glViewport(0 , 0, width, height);
glMatrixMode(GL_PROJECTION);
glLoadIdentity();
gluOrtho2D(0, width, 0, height);
}
void Display()
{
int size2 = size/2;
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
glMatrixMode(GL_MODELVIEW);
glLoadIdentity();
glRasterPos2i(0,0);
glPixelZoom(1.f, 1.f);
glDrawPixels(imagewidth, imageheight, 0x80E0/*GL_RGB*/, GL_UNSIGNED_BYTE, image);
glReadPixels(MouseX-size2, MouseY-size2, size, size, GL_RGB, GL_UNSIGNED_BYTE, rect);
glPixelZoom(2.f, 2.f);
glRasterPos2i(MouseX-size, MouseY-size);
glDrawPixels(size, size, GL_RGB, GL_UNSIGNED_BYTE, rect);
glFlush();
glutSwapBuffers();
}
void Mouse(int button, int state, int x, int y)
{
if (state == GLUT_DOWN)
MouseButton &= (1<<button);
else
MouseButton &= ~(1<<button);
}
void MouseMove(int x, int y)
{
MouseX = x;
MouseY = HEIGHT - y;
}
int main(int argc, char* argv[])
{
glutInit(&argc, argv);
if (Init() == false)
return 1;
glutInitWindowSize(WIDTH, HEIGHT);
glutInitDisplayMode(GLUT_DOUBLE | GLUT_RGBA);
glutCreateWindow("glut_Text");
glClearColor(0.25, 0.25, 0.25, 1.0);
glutReshapeFunc(Reshape);
glutDisplayFunc(Display);
glutIdleFunc(Display);
glutMouseFunc(Mouse);
glutMotionFunc(MouseMove);
glutPassiveMotionFunc(MouseMove);
glutMainLoop();
return 0;
}
Hope this helps.