Related
I am using OpenGL to simulate objects. And use stbi_write_png to save picture, but the size of the picture is only over 200K, and it is not very clear after zooming in. So I want to know if there are other ways to save high-definition pictures in C++ code.
My code shows as below:
int SaveScreenshot(const char *filename)
{
GLint viewport[4];
glGetIntegerv(GL_VIEWPORT, viewport);
int x = viewport[0];
int y = viewport[1];
int width = viewport[2];
int height = viewport[3];
char *data = (char*)malloc((size_t)(width * height * 4)); // 3 components (R, G, B)
if (!data)
return 0;
glPixelStorei(GL_PACK_ALIGNMENT, 1);
glReadPixels(x, y, width, height, GL_RGBA, GL_UNSIGNED_BYTE, data);
stbi_flip_vertically_on_write(1);
int saved = stbi_write_png(filename, width, height, 4, data, 0);
free(data);
return saved;
}
Couple options:
Render to a larger-than-default-framebuffer FBO & glReadPixels() that
Render to multiple tiles & glReadPixels() those, with a stitch-into-single-larger-image final pass
I am working on a project where I have to project the data from a camera with a resolution of 640x480 on a 4K screen in portrait mode.
The camera is the Kinect V1 but I will switch to version 2 with a better resolution (1920x1080).
My question is how to change the scale of a texture to display in order to get a correct result.
For the moment, I have managed to display on the entire screen but the image is flattened in width. The ideal would be to keep the proportionality and cut an X width on each side of the image.
I am using SDL with OpenGL, here is the concerned part of the code:
// window creation
auto window = SDL_CreateWindow("Imagine",
x,
y,
0,
0,
SDL_WINDOW_OPENGL | SDL_WINDOW_SHOWN | SDL_WINDOW_FULLSCREEN_DESKTOP | SDL_WINDOW_ALLOW_HIGHDPI | SDL_WINDOW_BORDERLESS);
// GL initialization and texture creation
void SdlNuitrackRenderHandler::initTexture(int width, int height)
{
glEnable(GL_TEXTURE_2D);
glEnableClientState(GL_VERTEX_ARRAY);
glDisableClientState(GL_COLOR_ARRAY);
glOrtho(0, _width, _height, 0, -1.0, 1.0);
glMatrixMode(GL_PROJECTION);
glPushMatrix();
glLoadIdentity();
glGenTextures(1, &_textureID);
width = power2(width);
height = power2(height);
if (_textureBuffer != 0)
delete[] _textureBuffer;
_textureBuffer = new uint8_t[width * height * 3];
memset(_textureBuffer, 0, sizeof(uint8_t) * width * height * 3);
glBindTexture(GL_TEXTURE_2D, _textureID);
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGB, width, height, 0, GL_RGB, GL_UNSIGNED_BYTE, NULL);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
// Set texture coordinates [0, 1] and vertexes position
{
_textureCoords[0] = (float) _width / width;
_textureCoords[1] = (float) _height / height;
_textureCoords[2] = (float) _width / width;
_textureCoords[3] = 0.0;
_textureCoords[4] = 0.0;
_textureCoords[5] = 0.0;
_textureCoords[6] = 0.0;
_textureCoords[7] = (float) _height / height;
_vertexes[0] = _width;
_vertexes[1] = _height;
_vertexes[2] = _width;
_vertexes[3] = 0.0;
_vertexes[4] = 0.0;
_vertexes[5] = 0.0;
_vertexes[6] = 0.0;
_vertexes[7] = _height;
}
// Texture rendering
// Render prepared background texture
void SdlNuitrackRenderHandler::renderTexture()
{
glClearColor(1, 1, 1, 1);
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
glEnable(GL_TEXTURE_2D);
glColor4f(1, 1, 1, 1);
glBindTexture(GL_TEXTURE_2D, _textureID);
glTexSubImage2D(GL_TEXTURE_2D, 0, 0, 0, _width, _height, GL_RGB, GL_UNSIGNED_BYTE, _textureBuffer);
glEnableClientState(GL_VERTEX_ARRAY);
glEnableClientState(GL_TEXTURE_COORD_ARRAY);
glVertexPointer(2, GL_FLOAT, 0, _vertexes);
glTexCoordPointer(2, GL_FLOAT, 0, _textureCoords);
glDrawArrays(GL_TRIANGLE_FAN, 0, 4);
glDisableClientState(GL_VERTEX_ARRAY);
glDisableClientState(GL_TEXTURE_COORD_ARRAY);
glDisable(GL_TEXTURE_2D);
}
While I agree with what was written in the comments about this being out-dated OpenGL code, the issue has nothing to do with OpenGL at its heart. You want to draw 1 rectangle with the correct aspect ratio inside another rectangle that has a different aspect ratio. You simply need to know where to place the vertices.
Typically with the TEXTURE_2D texture target, you want your texture coordinates to be 0-1 in both directions, unless you plan to crop the input image. There was a time when textures had to have a width and height that were a power of 2. That hasn't been the case in a very long time. So remove these 2 lines:
width = power2(width);
height = power2(height);
So the first thing is to set those properly:
_textureCoords[0] = 1.0;
_textureCoords[1] = 1.0;
_textureCoords[2] = 1.0;
_textureCoords[3] = 0.0;
_textureCoords[4] = 0.0;
_textureCoords[5] = 0.0;
_textureCoords[6] = 0.0;
_textureCoords[7] = 1.0;
(Consequently, that code is really hard to read and will be a pain to maintain. You should make the texture coordinates (and vertex coordinates) be a struct with an x and y value so it makes sense. Right now it's not obvious that it's 4 sets of 2D coordinates that are (max, max), (max, min), (min, min), (min, max). But I digress.)
Next, to figure out the texture coordinates, you need to know whether the video is going to be scaled to fit the width or the height. To do this, you can figure out
double widthScaleRatio = displayWidth / imageWidth; // <- using this scale will guarantee the width of the new image is the same as the display's width, but might crop the height
double heightScaleRatio = displayHeight / imageHeight; // <- using this scale will guarantee the height of the new image is the same as the display's height but might crop the width
double scale = 1.0;
// If scaling by the widthScaleRatio makes the height too big, use the heightScaleRatio
// Otherwise use the widthScaleRatio
if (imageHeight * widthScaleRatio > displayHeight)
{
scale = heightScaleRatio;
}
else
{
scale = widthScaleRatio;
}
Now scale you width and height by the scale:
double newWidth = imageWidth * scale;
double newHeight = imageHeight * scale;
and set your vertices based on that:
_vertexes[0] = newWidth;
_vertexes[1] = newHeight;
_vertexes[2] = newWidth;
_vertexes[3] = 0.0;
_vertexes[4] = 0.0;
_vertexes[5] = 0.0;
_vertexes[6] = 0.0;
_vertexes[7] = newHeight;
And the same caveat applies to making this code easier to read as with the texture coordinates.
EDIT: Here's a simple program to show how it works:
int main(){
double displayWidth = 2160;
double displayHeight = 4096;
double imageWidth = 640;
double imageHeight = 480;
double widthScaleRatio = displayWidth / imageWidth; // <- using this scale will guarantee the width of the new image is the same as the display's width, but might crop the height
double heightScaleRatio = displayHeight / imageHeight; // <- using this scale will guarantee the height of the new image is the same as the display's height but might crop the width
double scale = 1.0;
// If scaling by the widthScaleRatio makes the height too big, use the heightScaleRatio
// Otherwise use the widthScaleRatio
if (imageHeight * widthScaleRatio > displayHeight)
{
scale = heightScaleRatio;
}
else
{
scale = widthScaleRatio;
}
double newWidth = imageWidth * scale;
double newHeight = imageHeight * scale;
std::cout << "New size = (" << newWidth << ", " << newHeight << ")\n";
}
When I run it, I get:
New size = (2160, 1620)
I need to update an array of pixels to the screen every frame. It works initially, however when I try to resize the screen it glitches and eventually throws EXC_BAD_ACCESS 1. I already checked that the buffer is allocated to the correct size before every frame, however it does not seem to affect the result.
#include <stdio.h>
#include <stdlib.h>
#include <GLUT/GLUT.h>
unsigned char *buffer = NULL;
int width = 400, height = 400;
unsigned int screenTexture;
void Display()
{
for (int y = 0; y < height; y+=4) {
for (int x = 0; x < width; x++) {
buffer[(x + y * width) * 3] = 255;
}
}
glClear(GL_COLOR_BUFFER_BIT);
glEnable(GL_TEXTURE_2D);
// This function results in EXC_BAD_ACCESS 1, although the buffer is always correctly allocated
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGB, width, height, 0, GL_RGB, GL_UNSIGNED_BYTE, buffer);
glViewport(0, 0, width, height);
glMatrixMode(GL_PROJECTION);
glLoadIdentity();
glOrtho(0, width, height, 0, 0, 1);
glMatrixMode(GL_MODELVIEW);
glBegin (GL_QUADS);
glTexCoord2f(0,0); glVertex2i(0, 0);
glTexCoord2f(1,0); glVertex2i(width,0);
glTexCoord2f(1,1); glVertex2i(width,height);
glTexCoord2f(0,1); glVertex2i(0, height);
glEnd ();
glFlush();
glutPostRedisplay();
}
void Resize(int w, int h)
{
width = w;
height = h;
buffer = (unsigned char *)realloc(buffer, sizeof(unsigned char) * width * height * 3);
if (!buffer) {
printf("Error Reallocating buffer\n");
exit(1);
}
}
int main(int argc, char **argv)
{
glutInit(&argc, argv);
glutInitDisplayMode(GLUT_RGB | GLUT_SINGLE);
glutInitWindowSize(width, height);
glutCreateWindow("Rasterizer");
glutDisplayFunc(Display);
glutReshapeFunc(Resize);
glGenTextures(1, &screenTexture);
glBindTexture(GL_TEXTURE_2D, screenTexture);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_BASE_LEVEL, 0);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAX_LEVEL, 0);
glDisable(GL_DEPTH_TEST);
buffer = (unsigned char *)malloc(sizeof(unsigned char) * width * height * 3);
glutMainLoop();
}
After resizing the screen does not display properly either:
What is causing this problem? The code compiles and runs you just have to link GLUT and OpenGL.
As #genpfault mentioned, OpenGL reads 4 bytes per pixel instead of your assumption of 3.
Instead of changing GL_UNPACK_ALIGNMENT, you can also change your code to the correct assumption of 4 bytes per pixel via a simple struct:
struct pixel {
unsigned char r, g, b;
unsigned char unused;
};
Then, instead of using the magic constant 3, you can use the much clearer sizeof(struct pixel). This makes it easier to read and to convey the intent of the code, and it doesn't result in any extra code (as the structure is "effectively" an array of 4 bytes).
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGB, width, height, 0, GL_RGB, GL_UNSIGNED_BYTE, buffer);
^^^^^^
GL_UNPACK_ALIGNMENT defaults to 4, not 1. So OpenGL will read 4 bytes for every pixel, not the 3 that you're assuming.
Set GL_UNPACK_ALIGNMENT to 1 using glPixelStorei().
It sounds like you found something that works, but I don't think the problem was properly diagnosed. I believe the biggest issue is in the way you initialize your texture data here:
for (int y = 0; y < height; y+=4) {
for (int x = 0; x < width; x++) {
buffer[(x + y * width) * 3] = 255;
}
}
This only sets data in every 4th row, and then only for every 3rd byte within those rows. To initialize all the data to white, you need to increment the row number (y) by 1 instead of 4, and set all 3 components inside the loop:
for (int y = 0; y < height; y++) {
for (int x = 0; x < width; x++) {
buffer[(x + y * width) * 3 ] = 255;
buffer[(x + y * width) * 3 + 1] = 255;
buffer[(x + y * width) * 3 + 2] = 255;
}
}
You also need to set GL_UNPACK_ALIGNMENT to 1:
glPixelStorei(GL_UNPACK_ALIGNMENT, 1);
This controls the row alignment (not the pixel alignment, as suggested in a couple other answers). The default value for GL_UNPACK_ALIGNMENT is 4. But with 3 bytes per pixel in the GL_RGB format you are using, the size of a row is only a multiple of 4 bytes if the number of pixels is a multiple of 4. So for tightly packed rows with 3 bytes/pixel, the value needs to be set to 1.
How to take a screenshot of an OpenGL window in C++ and save it to file.
I found the glReadPixels() function,
but I don't know what to do next. Where I can set path to a file, for example?
If not difficult, write code, please.
This piece of code captures the OpenGL window and export to a BMP file. You must have FreeImage library to run it.
// Make the BYTE array, factor of 3 because it's RBG.
BYTE* pixels = new BYTE[3 * width * height];
glReadPixels(0, 0, width, height, GL_RGB, GL_UNSIGNED_BYTE, pixels);
// Convert to FreeImage format & save to file
FIBITMAP* image = FreeImage_ConvertFromRawBits(pixels, width, height, 3 * width, 24, 0x0000FF, 0xFF0000, 0x00FF00, false);
FreeImage_Save(FIF_BMP, image, "C:/test.bmp", 0);
// Free resources
FreeImage_Unload(image);
delete [] pixels;
glReadPixels will copy the bits into a memory buffer that you supply. You have to manually format the data (to the image format of your choice) and write it to disk after glReadPixels returns.
Runnable example
Each time you click with the mouse on the window, a tmpX.ppm file is created with the current screenshot.
You can view this file for example with eog on Linux, and inspect it with a text editor.
To render without showing a window, see: How to use GLUT/OpenGL to render to a file?
#include <math.h>
#include <stdlib.h>
#include <stdio.h>
#define GL_GLEXT_PROTOTYPES 1
#include <GL/gl.h>
#include <GL/glu.h>
#include <GL/glut.h>
#include <GL/glext.h>
static GLubyte *pixels = NULL;
static const GLenum FORMAT = GL_RGBA;
static const GLuint FORMAT_NBYTES = 4;
static const unsigned int HEIGHT = 500;
static const unsigned int WIDTH = 500;
static unsigned int nscreenshots = 0;
static unsigned int time;
/* Model. */
static double angle = 0;
static double angle_speed = 45;
static void init(void) {
glReadBuffer(GL_BACK);
glClearColor(0.0, 0.0, 0.0, 0.0);
glPixelStorei(GL_PACK_ALIGNMENT, 1);
glViewport(0, 0, WIDTH, HEIGHT);
glMatrixMode(GL_PROJECTION);
glLoadIdentity();
glMatrixMode(GL_MODELVIEW);
pixels = malloc(FORMAT_NBYTES * WIDTH * HEIGHT);
time = glutGet(GLUT_ELAPSED_TIME);
}
static void deinit(void) {
free(pixels);
}
static void create_ppm(char *prefix, int frame_id, unsigned int width, unsigned int height,
unsigned int color_max, unsigned int pixel_nbytes, GLubyte *pixels) {
size_t i, j, k, cur;
enum Constants { max_filename = 256 };
char filename[max_filename];
snprintf(filename, max_filename, "%s%d.ppm", prefix, frame_id);
FILE *f = fopen(filename, "w");
fprintf(f, "P3\n%d %d\n%d\n", width, HEIGHT, 255);
for (i = 0; i < height; i++) {
for (j = 0; j < width; j++) {
cur = pixel_nbytes * ((height - i - 1) * width + j);
fprintf(f, "%3d %3d %3d ", pixels[cur], pixels[cur + 1], pixels[cur + 2]);
}
fprintf(f, "\n");
}
fclose(f);
}
static void draw_scene() {
glClear(GL_COLOR_BUFFER_BIT);
glLoadIdentity();
glRotatef(angle, 0.0f, 0.0f, -1.0f);
glBegin(GL_TRIANGLES);
glColor3f(1.0f, 0.0f, 0.0f);
glVertex3f( 0.0f, 0.5f, 0.0f);
glColor3f(0.0f, 1.0f, 0.0f);
glVertex3f(-0.5f, -0.5f, 0.0f);
glColor3f(0.0f, 0.0f, 1.0f);
glVertex3f( 0.5f, -0.5f, 0.0f);
glEnd();
}
static void display(void) {
draw_scene();
glutSwapBuffers();
glReadPixels(0, 0, WIDTH, HEIGHT, FORMAT, GL_UNSIGNED_BYTE, pixels);
}
static void idle(void) {
int new_time = glutGet(GLUT_ELAPSED_TIME);
angle += angle_speed * (new_time - time) / 1000.0;
angle = fmod(angle, 360.0);
time = new_time;
glutPostRedisplay();
}
void mouse(int button, int state, int x, int y) {
if (state == GLUT_DOWN) {
puts("screenshot");
create_ppm("tmp", nscreenshots, WIDTH, HEIGHT, 255, FORMAT_NBYTES, pixels);
nscreenshots++;
}
}
int main(int argc, char **argv) {
GLint glut_display;
glutInit(&argc, argv);
glutInitWindowSize(WIDTH, HEIGHT);
glutInitWindowPosition(100, 100);
glutInitDisplayMode(GLUT_DOUBLE | GLUT_RGBA);
glutCreateWindow(argv[0]);
init();
glutDisplayFunc(display);
glutIdleFunc(idle);
glutMouseFunc(mouse);
atexit(deinit);
glutMainLoop();
return EXIT_SUCCESS;
}
Compile with:
gcc main.c -lm -lGL -lGLU -lglut
Tested on Ubuntu 15.10, OpenGL 4.5.0 NVIDIA 352.63.
Vulkan
This example just worked: https://github.com/SaschaWillems/Vulkan/blob/b9f0ac91d2adccc3055a904d3a8f6553b10ff6cd/examples/screenshot/screenshot.cpp how to run it: Is it possible to do offscreen rendering without Surface in Vulkan?
Saving that data to a file is something you'll either have to do yourself or use a third-party library for - OpenGL has no such feature.
Windows .bmp is probably the easiest if you're looking to do it yourself - Wikipedia has a pretty good explanation of the file format. Otherwise you can use image saving/loading libraries: libpng, libjpeg, etc. for low-level control, or devIL (there are others, but this is my favorite, and it's an extremely versatile library that goes well with GL) for high-level "just do it" image i/o.
A simple and quick solution.
Outputs a TARGA file but can easily be converted to PNG (script provided).
No extra libraries required.
Will work with both C and C++ (with some minor changes).
Note: The output file should have the tga file extension.
Here is the code:
void saveScreenshotToFile(std::string filename, int windowWidth, int windowHeight) {
const int numberOfPixels = windowWidth * windowHeight * 3;
unsigned char pixels[numberOfPixels];
glPixelStorei(GL_PACK_ALIGNMENT, 1);
glReadBuffer(GL_FRONT);
glReadPixels(0, 0, windowWidth, windowHeight, GL_BGR_EXT, GL_UNSIGNED_BYTE, pixels);
FILE *outputFile = fopen(filename.c_str(), "w");
short header[] = {0, 2, 0, 0, 0, 0, (short) windowWidth, (short) windowHeight, 24};
fwrite(&header, sizeof(header), 1, outputFile);
fwrite(pixels, numberOfPixels, 1, outputFile);
fclose(outputFile);
printf("Finish writing to file.\n");
}
And calling the function:
saveScreenshotToFile("test.tga", 1200, 900);
A bash script to convert TARGA files to PNG:
for oldFileName in *.tga; do
[ -f "$oldFileName" ] || break # Break out if no .tga files found.
newFileName=${oldFileName//.tga/.png}
convert $oldFileName $newFileName
rm $oldFileName
echo "Converted $oldFileName to $newFileName"
done
You can save screenshot with #Rafael's answer and OpenCV:
void Game::saveScreenshotToFile(std::string filename, int windowWidth, int windowHeight) {
cv::Mat img(windowHeight, windowWidth, CV_8UC3);
glPixelStorei(GL_PACK_ALIGNMENT, (img.step & 3) ? 1 : 4);
glPixelStorei(GL_PACK_ROW_LENGTH, img.step/img.elemSize());
glReadPixels(0, 0, img.cols, img.rows, GL_BGR, GL_UNSIGNED_BYTE, img.data);
cv::flip(img, img, 0);
//cv::imshow("Image",img);
//cv::waitKey(0);
cv::imwrite(filename, img);
}
Thanks for OpenCV: https://stackoverflow.com/a/9098883/10152334
Generally, OpenGL don't provide functions to save image. I think the fastest and simplest way to do this is save to .PPM format. However, this kind of format is uncompressed which means it's file size would be very large. And it can be support only by quite a few programs nowadays.
I prefer to save image to .png file which is compressed but also gives lossless image and supported by many browsers. To save the OpenGL to .png format, I first recommend the PNGwriter. It's pretty simple and easy to use. For example, to save a pixel of a image with color (R, G, B) in the position (x, y), your code will be(see "quickstart" in the PNGwriter website):
pngwriter PNG(width, height, 1.0, fileName); // "1.0" stand for the white background
PNG.plot(x, y, R, G, B);
PNG.close();
Note that, since the PNGwriter save each pixel starting from the top-left corner of the image, while the array get from glReadPixels() start from the bottom-left of the window, your code to save the whole image might probably look like this:
GLfloat* pixels = new GLfloat[nPixels];
glReadPixels(0.0, 0.0, width, height,GL_RGB, GL_FLOAT, pixels);
pngwriter PNG(width, height, 1.0, fileName);
size_t x = 1;
size_t y = 1;
double R, G, B;
for(size_t i=0; i<npixels; i++) // "i" is the index for array "pixels"
{
switch(i%3)
{
case 2:
B = static_cast<double>(pixels[i]); break;
case 1:
G = static_cast<double>(pixels[i]); break;
case 0:
R = static_cast<double>(pixels[i]);
PNG.plot(x, y, R, G, B); // set pixel to position (x, y)
if( x == width ) // Move to the next row of image
{
x=1;
y++;
}
else // To the next pixel
{ x++; }
break;
}
}
PNG.close();
Despite an earlier question (asked here), our project is constrained to using glDrawPixels, so we have to do some hackery.
One of the feature requirements is to be able to have a magnified view show up on a clicked region of an image; so, looking at an image, I want to click the mouse, and have a 200% image window show up where the mouse is. As I drag my cursor, the window should follow the cursor.
The context is set up like:
The Big Red Book has code that looks like this:
Gl.glShadeModel(Gl.GL_FLAT);
Gl.glClearColor(0.1f, 0.1f, 0.1f, 0.0f);
Gl.glPixelStorei(Gl.GL_UNPACK_ALIGNMENT, 2);
Gl.glPolygonMode(Gl.GL_FRONT_AND_BACK, Gl.GL_LINE);
Gl.glDisable(Gl.GL_SCISSOR_TEST);
Gl.glDisable(Gl.GL_ALPHA_TEST);
Gl.glDisable(Gl.GL_STENCIL_TEST);
Gl.glDisable(Gl.GL_DEPTH_TEST);
Gl.glDisable(Gl.GL_BLEND);
Gl.glDisable(Gl.GL_DITHER);
Gl.glDisable(Gl.GL_LOGIC_OP);
Gl.glDisable(Gl.GL_LIGHTING);
Gl.glDisable(Gl.GL_FOG);
Gl.glDisable(Gl.GL_TEXTURE_1D);
Gl.glDisable(Gl.GL_TEXTURE_2D);
Gl.glPixelTransferi(Gl.GL_MAP_COLOR, Gl.GL_TRUE);
Gl.glPixelTransferf(Gl.GL_RED_SCALE, 1.0f);
Gl.glPixelTransferi(Gl.GL_RED_BIAS, 0);
Gl.glPixelTransferf(Gl.GL_GREEN_SCALE, 1.0f);
Gl.glPixelTransferi(Gl.GL_GREEN_BIAS, 0);
Gl.glPixelTransferf(Gl.GL_BLUE_SCALE, 1.0f);
Gl.glPixelTransferi(Gl.GL_BLUE_BIAS, 0);
Gl.glPixelTransferi(Gl.GL_ALPHA_SCALE, 1);
Gl.glPixelTransferi(Gl.GL_ALPHA_BIAS, 0);
And then the call to make the smaller-but-zoomed image looks like
int width = (int)((this.Width * 0.2)/2.0);
Gl.glReadBuffer(Gl.GL_FRONT_AND_BACK);
Gl.glRasterPos2i(0, 0);
Gl.glBitmap(0, 0, 0, 0, mStartX - (width*2), mStartY, null);
Gl.glPixelZoom(2.0f, 2.0f);
Gl.glCopyPixels(mStartX - width, mStartY, width, width, Gl.GL_COLOR);
where mStartY and mStartX are the points where the click happened.
Problem is, the window that shows up is really mangling the lookup tables, and really clamping the image down to essentially a black-and-white binary image (ie, no shades of grey).
The data is a black-and-white unsigned short array, and is set with this code:
float step = (65535.0f / (float)(max - min));
mColorTable = new ushort[65536];
int i;
for (i = 0; i < 65536; i++)
{
if (i < min)
mColorTable[i] = 0;
else if (i > max)
mColorTable[i] = 65535;
else
mColorTable[i] = (ushort)((float)(i - min) * step);
}
.... //some irrelevant code
Gl.glPixelMapusv(Gl.GL_PIXEL_MAP_R_TO_R, 65536, mColorTable);
Gl.glPixelMapusv(Gl.GL_PIXEL_MAP_G_TO_G, 65536, mColorTable);
Gl.glPixelMapusv(Gl.GL_PIXEL_MAP_B_TO_B, 65536, mColorTable);
Now, according to this documentation, I should use GL_PIXEL_MAP_I_TO_I and set INDEX_SCALE and INDEX_BIAS to zero, but doing that does not change the result, that the image is severely clamped. And by 'severely clamped' I mean it's either black or white, with very few shades of grey, but the original non-magnified image looks like what's expected.
So, how do I avoid the clamping of the magnified view? Should I make a second control that follows the cursor and gets filled in with data from the first control? That approach seems like it would take the array copies outside of the graphics card and into C#, which would almost by definition be slower, and so make the control nonresponsive.
Oh, I'm using C# and the Tao framework, if that matters.
Here's the answer. The problem is that the LUT is being applied twice, so before calling the copy, call:
Gl.glPixelTransferi(Gl.GL_MAP_COLOR, Gl.GL_FALSE);
Then, once done, call:
Gl.glPixelTransferi(Gl.GL_MAP_COLOR, Gl.GL_TRUE);
That way, the 'extreme clamping' I was describing is removed.
#thing2k-- your solution causes the copy to happen outside the graphics card, so slows down the drawing on mouse drag, but doesn't fix the double clamp.
Please, pretty please with loads of sugar, molasses, sprinkles and a mist of high-fructose corn syrup on top and all over, explain why you cannot just use texture-mapping to draw this imagery.
Texture-mapping is a core, basic, everyday, run of the mill, garden-variety, standard, typical, expected, and just generally nice feature of OpenGL. It is in version 1.4. Why not use it as a starting point?
If I understand you correctly then this should be close to what you after, using glReadPixels and glDrawPixels.
Sorry it's C++ not C# but the OpenGL function should still be the same.
// main.cpp
// glut Text
#ifdef __WIN32__
#define WIN32_LEAN_AND_MEAN
#include <windows.h>
#endif
#include <GL/glut.h>
#include <cstdio>
int WIDTH = 800;
int HEIGHT = 600;
int MouseButton, MouseY = 0, MouseX = 0;
const int size = 80;
char *image, rect[size*size*3];
int imagewidth, imageheight;
bool Init()
{
int offset;
FILE* file = fopen("image.bmp", "rb");
if (file == NULL)
return false;
fseek(file, 10, SEEK_SET);
fread(&offset, sizeof(int), 1, file);
fseek(file, 18, SEEK_SET);
fread(&imagewidth, sizeof(int), 1, file);
fread(&imageheight, sizeof(int), 1, file);
fseek(file, offset, SEEK_SET);
image = new char[imagewidth*imageheight*3];
if (image == NULL)
return false;
fread(image, 1, imagewidth*imageheight*3, file);
fclose(file);
return true;
}
void Reshape(int width, int height)
{
WIDTH = width;
HEIGHT = height;
glViewport(0 , 0, width, height);
glMatrixMode(GL_PROJECTION);
glLoadIdentity();
gluOrtho2D(0, width, 0, height);
}
void Display()
{
int size2 = size/2;
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
glMatrixMode(GL_MODELVIEW);
glLoadIdentity();
glRasterPos2i(0,0);
glPixelZoom(1.f, 1.f);
glDrawPixels(imagewidth, imageheight, 0x80E0/*GL_RGB*/, GL_UNSIGNED_BYTE, image);
glReadPixels(MouseX-size2, MouseY-size2, size, size, GL_RGB, GL_UNSIGNED_BYTE, rect);
glPixelZoom(2.f, 2.f);
glRasterPos2i(MouseX-size, MouseY-size);
glDrawPixels(size, size, GL_RGB, GL_UNSIGNED_BYTE, rect);
glFlush();
glutSwapBuffers();
}
void Mouse(int button, int state, int x, int y)
{
if (state == GLUT_DOWN)
MouseButton &= (1<<button);
else
MouseButton &= ~(1<<button);
}
void MouseMove(int x, int y)
{
MouseX = x;
MouseY = HEIGHT - y;
}
int main(int argc, char* argv[])
{
glutInit(&argc, argv);
if (Init() == false)
return 1;
glutInitWindowSize(WIDTH, HEIGHT);
glutInitDisplayMode(GLUT_DOUBLE | GLUT_RGBA);
glutCreateWindow("glut_Text");
glClearColor(0.25, 0.25, 0.25, 1.0);
glutReshapeFunc(Reshape);
glutDisplayFunc(Display);
glutIdleFunc(Display);
glutMouseFunc(Mouse);
glutMotionFunc(MouseMove);
glutPassiveMotionFunc(MouseMove);
glutMainLoop();
return 0;
}
Hope this helps.