I would like to know how to make openGL to not "blur" an upscaled texture, as it seems that the bluring is set to default for transformations. The texure is a POT png file. The code used to define a texture and put it on the screen is this:
class Texture():
# simple texture class
# designed for 32 bit png images (with alpha channel)
def __init__(self,fileName):
self.texID=0
self.LoadTexture(fileName)
def LoadTexture(self,fileName):
try:
textureSurface = pygame.image.load(fileName).convert_alpha()
textureData = pygame.image.tostring(textureSurface, "RGBA", True)
self.w, self.h = textureSurface.get_size()
self.texID=glGenTextures(1)
glBindTexture(GL_TEXTURE_2D, self.texID)
glTexParameteri(GL_TEXTURE_2D,GL_TEXTURE_MAG_FILTER,GL_LINEAR)
glTexParameteri(GL_TEXTURE_2D,GL_TEXTURE_MIN_FILTER,GL_LINEAR)
glTexImage2D( GL_TEXTURE_2D, 0, GL_RGBA, textureSurface.get_width(),
textureSurface.get_height(), 0, GL_RGBA, GL_UNSIGNED_BYTE,
textureData )
except Exception as E:
print(E)
print ("can't open the texture: %s"%(fileName))
def __del__(self):
glDeleteTextures(self.texID)
def get_width(self):
return self.w
def get_height(self):
return self.h
def blit(texture, x, y):
"""
Function that blits a given texture on the screen
"""
#We put the texture onto the screen
glBindTexture(GL_TEXTURE_2D, texture.texID)
#Now we must position the image
glBegin(GL_QUADS)
#We calculate each of the points relative to the center of the screen
top = -y/(HEIGHT//2) + 1.0
left = x/(WIDTH//2) - 1.0
right = left + texture.w/(WIDTH//2)
down = top - texture.h/(HEIGHT//2)
#We position each point of the image
glTexCoord2f(0.0, 1.0)
glVertex2f(left, top)
glTexCoord2f(1.0,1.0)
glVertex2f(right, top)
glTexCoord2f(1.0,0.0)
glVertex2f(right, down)
glTexCoord2f(0.0,0.0)
glVertex2f(left, down)
glEnd()
I configured openGL as follows:
def ConfigureOpenGL(w, h):
#glShadeModel(GL_SMOOTH)
#glClearColor(0.0, 0.0, 0.0, 1.0)
#glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT)
glViewport(0, 0, w, h)
glMatrixMode(GL_PROJECTION)
#glLoadIdentity()
#gluOrtho2D(-8.0, 8.0, -6.0, 6.0)
glMatrixMode(GL_MODELVIEW)
glLoadIdentity()
glShadeModel(GL_SMOOTH)
glClearColor(0.0, 0.0, 0.0, 0.0)
glClearDepth(1.0)
glDisable(GL_DEPTH_TEST)
glDisable(GL_LIGHTING)
glDepthFunc(GL_LEQUAL)
glHint(GL_PERSPECTIVE_CORRECTION_HINT, GL_NICEST)
glEnable(GL_BLEND)
Surface = pygame.display.set_mode((WIDTH, HEIGHT), OPENGL|DOUBLEBUF)#|FULLSCREEN)
ConfigureOpenGL(WIDTH, HEIGHT)
Before putting anything in the screen i also call this method:
def OpenGLRender(self):
"""
Used to prepare the screen to render
"""
glClear(GL_COLOR_BUFFER_BIT|GL_DEPTH_BUFFER_BIT)
glLoadIdentity()
glDisable(GL_LIGHTING)
glEnable(GL_TEXTURE_2D)
glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA)
glClearColor(1.0, 1.0, 1.0, 1.0)
I'm using PyOpenGL 3.0.2
Use GL_NEAREST in your glTexParameteri() calls:
glTexParameteri(GL_TEXTURE_2D,GL_TEXTURE_MAG_FILTER,GL_NEAREST)
glTexParameteri(GL_TEXTURE_2D,GL_TEXTURE_MIN_FILTER,GL_NEAREST)
Related
I wanted to implement shaders in my pygame program. As far as I know this is not possible to apply shaders to pygame, so I tried using PyOpenGL with GLSL shaders. I've already written a working program to use shaders, but it's not really fast.
It should run each tick multiple times for different purposes. Also, I want it to take a pygame surface as input, apply a shader to it, and output a pygame surface to work with later. It accepts variables, that are inputs for the shader.
Running a shader once per tick I get about 100 fps, without the shader I get about 500 fps in a program displaying a single image with pygame. I've never used OpenGL before, so with some tweaking it will probably run faster.
Do you have code optimizations or ideas to make it run faster?
Are there better solutions and alternatives?
Thanks!
main.py
import pygame, sys, pygame.freetype
import shader
pygame.init()
shader.init()
font = pygame.freetype.SysFont(None, 20)
clock = pygame.time.Clock()
loadedImage = pygame.image.load("image.png")
shaderObj = shader.create("vertex.txt", "fragment.txt")
window = pygame.display.set_mode(loadedImage.get_size())
while True:
image = shader.apply(shaderObj, loadedImage)
window.blit(image, (0,0))
font.render_to(window, (5, 5), str(clock.get_fps()), (0, 0, 0), (255, 255, 255))
pygame.display.update()
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
shader.quit()
sys.exit()
clock.tick(100)
shader.py
from OpenGL.GL import *
from OpenGL.GL.shaders import *
from pygame.locals import *
import pygame, numpy, sys, glfw
pygame.init()
if not glfw.init():
sys.exit()
def getFileContent(file):
with open(file, 'r') as file:
content = file.read()
return content
def quit():
global glfwwindow
glfw.destroy_window(glfwwindow)
# Create a glfw window
# It's size will be edited later
def init():
global width, height, glfwwindow
width, height = 1, 1
glfw.window_hint(glfw.VISIBLE, False)
glfwwindow = glfw.create_window(width, height, "", None, None)
glfw.make_context_current(glfwwindow)
glViewport(0, 0, width, height)
# Do as much as possible before using the shader
# Ouput an array of data, which will not change over time
def create(vertexShaderPath, fragmentShaderPath):
global timeMessage
vertices = numpy.array((0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0), dtype=numpy.float32)
texcoords = numpy.array((-1.0, 1.0, -1.0, -1.0, 1.0, -1.0, 1.0, 1.0), dtype=numpy.float32)
texcoords = numpy.array((-1.0, -1.0, -1.0, 1.0, 1.0, 1.0, 1.0, -1.0), dtype=numpy.float32)
vertexShader = compileShader(getFileContent(vertexShaderPath), GL_VERTEX_SHADER)
fragmentShader = compileShader(getFileContent(fragmentShaderPath), GL_FRAGMENT_SHADER)
shader = (vertices, texcoords, vertexShader, fragmentShader)
return shader
# You need to create a shader first
# Give optional variables that can be used by the GLSL shader
def apply(shader, image, **variables):
vertices, texcoords, vertexShader, fragmentShader = shader
global glfwwindow, timeMessage
width, height = image.get_size()
glfw.set_window_size(glfwwindow, width, height)
glViewport(0, 0, width, height)
glClear(GL_COLOR_BUFFER_BIT)
textureData = pygame.image.tostring(image, "RGB", 1)
glTexImage2D(GL_TEXTURE_2D, 0, 3, width, height, 0, GL_RGB, GL_UNSIGNED_BYTE, textureData)
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR)
shaderProgram = glCreateProgram()
glAttachShader(shaderProgram, vertexShader)
glAttachShader(shaderProgram, fragmentShader)
glLinkProgram(shaderProgram)
glVertexAttribPointer(0, 2, GL_FLOAT, GL_FALSE, 0, vertices)
glVertexAttribPointer(1, 2, GL_FLOAT, GL_FALSE, 0, texcoords)
glEnableVertexAttribArray(0)
glEnableVertexAttribArray(1)
glFlush()
glClear(GL_COLOR_BUFFER_BIT|GL_DEPTH_BUFFER_BIT)
glUseProgram(shaderProgram)
# Send variables to the shader if provided
for variable in variables:
location = glGetUniformLocation(shaderProgram, variable)
glUniform1f(location, variables[variable])
glDrawArrays(GL_QUADS, 0, 4)
# Turn image to pygame surface
data = glReadPixels(0, 0, width, height, GL_RGBA, GL_UNSIGNED_BYTE)
return pygame.transform.flip(pygame.image.frombuffer(data, (width, height), "RGBA"), False, True)
vertex.txt
#version 120
attribute vec2 vPosition;
attribute vec2 vTexcoords;
varying vec2 fTexcoords;
void main()
{
gl_Position = vec4(vPosition.x, vPosition.y, 0.0, 1.0);
fTexcoords = vTexcoords;
}
fragment.txt
#version 120
varying vec2 fTexcoords;
uniform sampler2D textureObj;
void main()
{
gl_FragColor = texture2D(textureObj, fTexcoords);
}
You are running far more per frame than you need to this is the reason why it is not running as fast as you expect.
Your shader.apply() function is compiling the shader and loading the texture to GPU member every frame. This should only be done once at start-up.
Your inner loop should just include the glUseProgram glDrawArrays.
You should also be creating names for objects and binding them, you can get away with it for very simple programs that include one texture and one VAO.
in initializeGL in QGLWidget:
I'm loading a .exr image (128mb) 4000x2000 with imageio.imread
from imageio import imread
img_array = imread("c:/sample.exr")
self.textureID = glGenTextures(1)
in paintGL in QGLWidget:
I draw my single quad with glBegin(GL_QUADS) and glEnd() then feed the texture into glTexImage2D and bind it to the quad I created earlier.
glBindTexture(GL_TEXTURE_2D, self.textureID)
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGB32F, 1920, 1080, 0, GL_RGBA, GL_FLOAT, img_array)
I don't have any problem with loading the image or code itself, everything works perfectly, but I'm facing performance issue and navigating the scene starts to lag, I assume is because of the huge size of image.
Is there any way that I can reduce the size? or increase the performance of it?
Note: Even the image is numpy.float32 I only display it as GL_RGBA which means there is 0.4545 gamma multiplication, I don't know if that affects the performance but thought to point it out.
Thanks in advance.
Update:
here is the code:
import os
from PySide.QtGui import QColor
from PySide.QtOpenGL import QGLWidget
from imageio import imread
from OpenGL.GL import *
from OpenGL.GLU import *
from engine.nodes import (ImageNode, NodeType, CubeNode, Vector3)
class QImageProcessor(QGLWidget):
def __init__(self, parent=None):
super(QImageProcessor, self).__init__(parent)
self.model = list()
self.zoomVal = 1.2
self.local_translate = (0.0, 0.0, 0.0)
self.local_scale = (1.0, 1.0, 1.0)
self.xRot = 0
self.yRot = 0
self.zRot = 0
def initializeGL(self):
self.qglClearColor(QColor.fromCmykF(0.0, 0.1, 0.0, 0.882))
glViewport( 0, 0, self.width(), self.height())
glEnable(GL_TEXTURE_2D)
glEnable(GL_CULL_FACE)
glEnable(GL_MULTISAMPLE)
glEnable(GL_LINE_SMOOTH, GL_LINE_WIDTH, GL_ALIASED_LINE_WIDTH_RANGE)
glShadeModel(GL_FLAT)
glEnable(GL_DEPTH_TEST)
glHint(GL_LINE_SMOOTH_HINT, GL_NICEST)
glDepthRange (0.1, 1.0)
# adding images
image1 = "c:/sample.exr"
new_image1 = ImageNode(image1)
new_image1.TextureID = glGenTextures(1)
new_image1.Data = imread(image1, format='.exr')
self.model.append(new_image1)
image2 = "c:/sample2.jpg"
new_image2 = ImageNode(image2)
new_image2.TextureID = glGenTextures(1)
new_image2.Data = imread(image2, format='.jpg')
self.model.append(new_image2)
def paintGL(self):
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT)
gluOrtho2D(-self.zoomVal, +self.zoomVal, -self.zoomVal, +self.zoomVal)
glLoadIdentity()
glTranslated(*self.local_translate)
glRotated(self.xRot / 16.0, 1.0, 0.0, 0.0)
glRotated(self.yRot / 16.0, 0.0, 1.0, 0.0)
glRotated(self.zRot / 16.0, 0.0, 0.0, 1.0)
glScalef(*self.local_scale)
genList = glGenLists(1)
glNewList(genList, GL_COMPILE)
for node in self.model:
vertices = node.Vertices # list of vertices
edges = node.Edges # list of edges
face = node.Face # list of faces
texcoords = node.TextureCoordinates # list of texture coordinates
glPushAttrib(GL_ALL_ATTRIB_BITS)
glPolygonMode(GL_FRONT, GL_FILL)
glPixelStorei(GL_UNPACK_ALIGNMENT,1)
glBindTexture(GL_TEXTURE_2D, node.TextureID)
glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP)
glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP)
glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR)
glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR)
glTexImage2D(GL_TEXTURE_2D, 0, node.InternalFormat, node.Width, node.Height, 0, node.Format, node.Type, node.Data)
# face and UV
glBegin(GL_QUADS)
self.qglColor(QColor(255,255,255))
for vertex in face:
glVertex3fv(vertices[vertex])
glTexCoord2f(texcoords[vertex][0], texcoords[vertex][1])
glEnd()
glPopAttrib()
glEndList()
glCallList(genList)
I tried this:
I moved glTexCoord2f to initializeGL function, bind it once and then bind it to 0:
image2 = "c:/sample2.jpg"
new_image2 = ImageNode(image2)
new_image2.TextureID = glGenTextures(1)
new_image2.Data = imread(image2, format='.jpg')
glTexImage2D(GL_TEXTURE_2D, 0, new_image2.InternalFormat, new_image2.Width, new_image2.Height, 0, new_image2.Format, new_image2.Type, new_image2.Data)
glBindTexture(GL_TEXTURE_2D, new_image2.TextureID)
glBindTexture(GL_TEXTURE_2D, 0)
self.model.append(new_image2)
and then in paintGL bind it again,
def paintGL(self):
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT)
gluOrtho2D(-self.zoomVal, +self.zoomVal, -self.zoomVal, +self.zoomVal)
glLoadIdentity()
glTranslated(*self.local_translate)
glRotated(self.xRot / 16.0, 1.0, 0.0, 0.0)
glRotated(self.yRot / 16.0, 0.0, 1.0, 0.0)
glRotated(self.zRot / 16.0, 0.0, 0.0, 1.0)
glScalef(*self.local_scale)
genList = glGenLists(1)
glNewList(genList, GL_COMPILE)
for node in self.model:
vertices = node.Vertices # list of vertices
edges = node.Edges # list of edges
face = node.Face # list of faces
texcoords = node.TextureCoordinates # list of texture coordinates
glPushAttrib(GL_ALL_ATTRIB_BITS)
glPolygonMode(GL_FRONT, GL_FILL)
glPixelStorei(GL_UNPACK_ALIGNMENT,1)
glBindTexture(GL_TEXTURE_2D, node.TextureID)
glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP)
glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP)
glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR)
glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR)
# face and UV
glBegin(GL_QUADS)
self.qglColor(QColor(255,255,255))
for vertex in face:
glVertex3fv(vertices[vertex])
glTexCoord2f(texcoords[vertex][0], texcoords[vertex][1])
glEnd()
glPopAttrib()
glEndList()
glCallList(genList)
but it didn't show the texture, not sure if I'm missing something.
I'm making a game using pygame + pyopengl, and right now i'm trying to make a video player on this context. To do so I use ffmpeg to load different video formats, then convert each frame to an opengl texture, as designed below, and then play the video.
class Texture(object):
def __init__(self, data, w=0, h=0):
"""
Initialize the texture from 3 diferents types of data:
filename = open the image, get its string and produce texture
surface = get its string and produce texture
string surface = gets it texture and use w and h provided
"""
if type(data) == str:
texture_data = self.load_image(data)
elif type(data) == pygame.Surface:
texture_data = pygame.image.tostring(data, "RGBA", True)
self.w, self.h = data.get_size()
elif type(data) == bytes:
self.w, self.h = w, h
texture_data = data
self.texID = 0
self.load_texture(texture_data)
def load_image(self, data):
texture_surface = pygame.image.load(data).convert_alpha()
texture_data = pygame.image.tostring(texture_surface, "RGBA", True)
self.w, self.h = texture_surface.get_size()
return texture_data
def load_texture(self, texture_data):
self.texID = glGenTextures(1)
glBindTexture(GL_TEXTURE_2D, self.texID)
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST)
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST)
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, self.w,
self.h, 0, GL_RGBA, GL_UNSIGNED_BYTE,
texture_data)
Problem is that when i load all the textures of a given video, my RAM goes off the ceiling, about 800mb. But it's possible to work around this by blitting each texture as it loads, like shown below.
def render():
glClear(GL_COLOR_BUFFER_BIT|GL_DEPTH_BUFFER_BIT)
glLoadIdentity()
glDisable(GL_LIGHTING)
glEnable(GL_TEXTURE_2D)
glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA)
glClearColor(0, 0, 0, 1.0)
def Draw(texture, top, left, bottom, right):
"""
Draw the image on the Opengl Screen
"""
# Make sure he is looking at the position (0,0,0)
glBindTexture(GL_TEXTURE_2D, texture.texID)
glBegin(GL_QUADS)
# The top left of the image must be the indicated position
glTexCoord2f(0.0, 1.0)
glVertex2f(left, top)
glTexCoord2f(1.0, 1.0)
glVertex2f(right, top)
glTexCoord2f(1.0, 0.0)
glVertex2f(right, bottom)
glTexCoord2f(0.0, 0.0)
glVertex2f(left, bottom)
glEnd()
def update(t):
render()
Draw(t, -0.5, -0.5, 0.5, 0.5)
# Check for basic Events on the pygame interface
for event in pygame.event.get():
BASIC_Game.QUIT_Event(event)
pygame.display.flip()
Although this reduces the RAM consumption to an acceptable value it makes the loading time bigger than the video length.
I really don't understand why opengl works this way, but is there a way to make a texture efficient without blitting it first?
I can't tell for sure based off the code you have in your question right now, but I'm going to guess it's because you're creating a new Texture instance for each frame, which means that you're calling glGenTextures(1) for every frame of your video. This allocates a new buffer in memory for every frame of your video, and then stores a full, uncompressed version of the frame.
When you blit the image, you're not generating a new texture, but just overwriting the old one. This is the solution you want, but the way you're implementing it is inefficient.
There are a number of ways you can change the data in a texture without blitting on the CPU (assuming pygame blitting) to make things go faster, some are listed in this answer:
https://stackoverflow.com/a/13248668/1122135
i am new to using textures in pyglet (and OpenGL generally), and i am stumped over something that is probably a dumb mistake: i am attempting to apply a texture, derived from a png image, to a square that is composed of two triangles. i can successfully use indexed vertex lists to define geometry, but when i specify texture coordinates (u,v) for each vertex of each triangle, i get:
Traceback (most recent call last):
File "test_tex.py", line 37, in module
('t2f', texture_coords))
ValueError: Can only assign sequence of same size
suggesting that my list of texture coordinates is not the correct size. anyone see the problem? a related post that did not quite help me: Triangle texture mapping OpenGL
please check out my code below for details, thanks!
import pyglet
config = pyglet.gl.Config(sample_buffers=1, samples=4,
depth_size=16, double_buffer=True)
window = pyglet.window.Window(resizable=True, config=config, vsync=True)
# create vertex data
num_verts = 4
side_length = 1.0
half_side = side_length / 2.0
# vertex positions of a square centered at the origin,
# ordered counter-clockwise, starting at lower right corner
vertex_positions = [ half_side, -half_side,
half_side, half_side,
-half_side, half_side,
-half_side, -half_side]
# six pairs of texture coords, one pair (u,v) for each vertex
# of each triangle
texture_coords = [1.0, 0.0,
1.0, 1.0,
0.0, 1.0,
0.0, 1.0,
0.0, 0.0,
1.0, 0.0]
# indices of the two triangles that make the square
# counter-clockwise orientation
triangle_indices = [0, 1, 2,
2, 3, 0]
# use indexed vertex list
vertex_array = pyglet.graphics.vertex_list_indexed(num_verts,
triangle_indices,
('v2f', vertex_positions),
('t2f', texture_coords))
# enable face culling, depth testing
pyglet.gl.glEnable(pyglet.gl.GL_CULL_FACE)
pyglet.gl.glEnable(pyglet.gl.GL_DEPTH_TEST)
# texture set up
pic = pyglet.image.load('test.png')
texture = pic.get_texture()
pyglet.gl.glEnable(texture.target)
pyglet.gl.glBindTexture(texture.target, texture.id)
# set modelview matrix
pyglet.gl.glMatrixMode(pyglet.gl.GL_MODELVIEW)
pyglet.gl.glLoadIdentity()
pyglet.gl.gluLookAt(0, 0, 5, 0, 0, 0, 0, 1, 0)
#window.event
def on_resize(width, height):
pyglet.gl.glViewport(0, 0, width, height)
pyglet.gl.glMatrixMode(pyglet.gl.GL_PROJECTION)
pyglet.gl.glLoadIdentity()
pyglet.gl.gluPerspective(45.0, width / float(height), 1.0, 100.0)
return pyglet.event.EVENT_HANDLED
#window.event
def on_draw():
window.clear()
vertex_array.draw(pyglet.gl.GL_TRIANGLES)
pyglet.app.run()
It's probably complaining because you have 6 sets of texture coordinates, but only 4 vertices. You need texture coordinates for each vertex, so there should be 4 pairs of floats in your texture_coord array:
texture_coords = [1.0, 0.0,
1.0, 1.0,
0.0, 1.0,
0.0, 0.0]
I am attempting to make a simple drawing using openGL. However, the depth buffer doesn't appear to be working.
Other people with a similar problem are typically doing one of two things wrong:
Not including glEnable(GL_DEPTH_TEST)
Bad clipping values
However, my code does not have either of these problems.
...
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
gluPerspective(25.0,1.0,10.0,200.0);
// Set the camera location
glMatrixMode(GL_MODELVIEW);
glLoadIdentity();
gluLookAt(20.0, 10.0, 50.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0);
// Enable depth test
glEnable(GL_DEPTH_TEST);
// Cull backfacing polygons
glCullFace(GL_BACK);
glEnable(GL_CULL_FACE)
drawCoordinateAxis();
drawBox(5.0,2.0,5.0,0.8,0.0,0.0);
glTranslated(1.0,-1.0,1.0); //The box is 5x2x5, it is shifted 1 unit down and 1 in the x and z directions
drawBox(5.0,2.0,5.0,0.0,1.0,1.0);
...
When I execute my code, this is drawn. http://imgur.com/G9y41O1
Note that the blue box and the red box collide, so the red box should be covering part of the blue box.
The functions drawCoordinateAxis() and drawBox() just draw a few primitives, nothing fancy inside.
I am running this on Debian squeeze.
void reshape(GLint width, GLint height)
{
g_Width = width;
g_Height = height;
glViewport(0, 0, g_Width, g_Height);
glMatrixMode(GL_PROJECTION);
glLoadIdentity();
gluPerspective(65.0, (float)g_Width / g_Height, g_nearPlane, g_farPlane);
glMatrixMode(GL_MODELVIEW);
}
So set Matrix Mode to GL_PROJECTION first, then gluPerspective.... and then back to MODELVIEW mode.