I wanted to implement shaders in my pygame program. As far as I know this is not possible to apply shaders to pygame, so I tried using PyOpenGL with GLSL shaders. I've already written a working program to use shaders, but it's not really fast.
It should run each tick multiple times for different purposes. Also, I want it to take a pygame surface as input, apply a shader to it, and output a pygame surface to work with later. It accepts variables, that are inputs for the shader.
Running a shader once per tick I get about 100 fps, without the shader I get about 500 fps in a program displaying a single image with pygame. I've never used OpenGL before, so with some tweaking it will probably run faster.
Do you have code optimizations or ideas to make it run faster?
Are there better solutions and alternatives?
Thanks!
main.py
import pygame, sys, pygame.freetype
import shader
pygame.init()
shader.init()
font = pygame.freetype.SysFont(None, 20)
clock = pygame.time.Clock()
loadedImage = pygame.image.load("image.png")
shaderObj = shader.create("vertex.txt", "fragment.txt")
window = pygame.display.set_mode(loadedImage.get_size())
while True:
image = shader.apply(shaderObj, loadedImage)
window.blit(image, (0,0))
font.render_to(window, (5, 5), str(clock.get_fps()), (0, 0, 0), (255, 255, 255))
pygame.display.update()
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
shader.quit()
sys.exit()
clock.tick(100)
shader.py
from OpenGL.GL import *
from OpenGL.GL.shaders import *
from pygame.locals import *
import pygame, numpy, sys, glfw
pygame.init()
if not glfw.init():
sys.exit()
def getFileContent(file):
with open(file, 'r') as file:
content = file.read()
return content
def quit():
global glfwwindow
glfw.destroy_window(glfwwindow)
# Create a glfw window
# It's size will be edited later
def init():
global width, height, glfwwindow
width, height = 1, 1
glfw.window_hint(glfw.VISIBLE, False)
glfwwindow = glfw.create_window(width, height, "", None, None)
glfw.make_context_current(glfwwindow)
glViewport(0, 0, width, height)
# Do as much as possible before using the shader
# Ouput an array of data, which will not change over time
def create(vertexShaderPath, fragmentShaderPath):
global timeMessage
vertices = numpy.array((0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0), dtype=numpy.float32)
texcoords = numpy.array((-1.0, 1.0, -1.0, -1.0, 1.0, -1.0, 1.0, 1.0), dtype=numpy.float32)
texcoords = numpy.array((-1.0, -1.0, -1.0, 1.0, 1.0, 1.0, 1.0, -1.0), dtype=numpy.float32)
vertexShader = compileShader(getFileContent(vertexShaderPath), GL_VERTEX_SHADER)
fragmentShader = compileShader(getFileContent(fragmentShaderPath), GL_FRAGMENT_SHADER)
shader = (vertices, texcoords, vertexShader, fragmentShader)
return shader
# You need to create a shader first
# Give optional variables that can be used by the GLSL shader
def apply(shader, image, **variables):
vertices, texcoords, vertexShader, fragmentShader = shader
global glfwwindow, timeMessage
width, height = image.get_size()
glfw.set_window_size(glfwwindow, width, height)
glViewport(0, 0, width, height)
glClear(GL_COLOR_BUFFER_BIT)
textureData = pygame.image.tostring(image, "RGB", 1)
glTexImage2D(GL_TEXTURE_2D, 0, 3, width, height, 0, GL_RGB, GL_UNSIGNED_BYTE, textureData)
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR)
shaderProgram = glCreateProgram()
glAttachShader(shaderProgram, vertexShader)
glAttachShader(shaderProgram, fragmentShader)
glLinkProgram(shaderProgram)
glVertexAttribPointer(0, 2, GL_FLOAT, GL_FALSE, 0, vertices)
glVertexAttribPointer(1, 2, GL_FLOAT, GL_FALSE, 0, texcoords)
glEnableVertexAttribArray(0)
glEnableVertexAttribArray(1)
glFlush()
glClear(GL_COLOR_BUFFER_BIT|GL_DEPTH_BUFFER_BIT)
glUseProgram(shaderProgram)
# Send variables to the shader if provided
for variable in variables:
location = glGetUniformLocation(shaderProgram, variable)
glUniform1f(location, variables[variable])
glDrawArrays(GL_QUADS, 0, 4)
# Turn image to pygame surface
data = glReadPixels(0, 0, width, height, GL_RGBA, GL_UNSIGNED_BYTE)
return pygame.transform.flip(pygame.image.frombuffer(data, (width, height), "RGBA"), False, True)
vertex.txt
#version 120
attribute vec2 vPosition;
attribute vec2 vTexcoords;
varying vec2 fTexcoords;
void main()
{
gl_Position = vec4(vPosition.x, vPosition.y, 0.0, 1.0);
fTexcoords = vTexcoords;
}
fragment.txt
#version 120
varying vec2 fTexcoords;
uniform sampler2D textureObj;
void main()
{
gl_FragColor = texture2D(textureObj, fTexcoords);
}
You are running far more per frame than you need to this is the reason why it is not running as fast as you expect.
Your shader.apply() function is compiling the shader and loading the texture to GPU member every frame. This should only be done once at start-up.
Your inner loop should just include the glUseProgram glDrawArrays.
You should also be creating names for objects and binding them, you can get away with it for very simple programs that include one texture and one VAO.
Related
Here is the code:
int main(){
//init gl environment
//...
//create textures for pass 1
GLuint normal_color_output;
glCreateTextures(GL_TEXTURE_2D_MULTISAMPLE, 1, &normal_color_output);
glTextureStorage2DMultisample(normal_color_output, 8, GL_RGBA32F, 1000, 800, GL_TRUE);
GLuint high_color_output;
glCreateTextures(GL_TEXTURE_2D_MULTISAMPLE, 1, &high_color_output);
glTextureStorage2DMultisample(high_color_output,8, GL_R11F_G11F_B10F, 1000, 800,GL_TRUE);
//init framebuffer
GLuint render_buffer;
glCreateRenderbuffers(1, &render_buffer);
glNamedRenderbufferStorageMultisample(render_buffer, 8, GL_DEPTH24_STENCIL8, 1000, 800);
GLuint framebuffer;
glCreateFramebuffers(1, &framebuffer);
glNamedFramebufferTexture(framebuffer, GL_COLOR_ATTACHMENT0, normal_color_output,0);
glNamedFramebufferTexture(framebuffer, GL_COLOR_ATTACHMENT1, high_color_output, 0);
glNamedFramebufferRenderbuffer(framebuffer, GL_DEPTH_STENCIL_ATTACHMENT, GL_RENDERBUFFER, render_buffer);
const GLenum drawbuffers[] = {GL_COLOR_ATTACHMENT0,GL_COLOR_ATTACHMENT1};
glNamedFramebufferDrawBuffers(framebuffer, 2, drawbuffers);
glBindFramebuffer(GL_FRAMEBUFFER, framebuffer);
//init another framebuffer
//What I want to do is trying to achieve implementing my own msaa color resolve solution.
GLuint mix_framebuffer;
glCreateFramebuffers(1, &mix_framebuffer);
GLuint mix_renderbuffer;
glCreateRenderbuffers(1, &mix_renderbuffer);
glNamedRenderbufferStorage(mix_renderbuffer, GL_DEPTH24_STENCIL8, 1000, 800);
GLuint normal_antialiasing_texture, hdr_antialiasing_texture;
glCreateTextures(GL_TEXTURE_2D, 1, &normal_antialiasing_texture);
glTextureStorage2D(normal_antialiasing_texture, 1, GL_RGBA32F, 1000, 800);
glCreateTextures(GL_TEXTURE_2D, 1, &hdr_antialiasing_texture);
glTextureStorage2D(hdr_antialiasing_texture, 1, GL_RGBA32F, 1000, 800);
glNamedFramebufferTexture(mix_framebuffer, GL_COLOR_ATTACHMENT0, normal_antialiasing_texture, 0);
glNamedFramebufferTexture(mix_framebuffer, GL_COLOR_ATTACHMENT1, hdr_antialiasing_texture, 0);
glNamedFramebufferDrawBuffers(mix_framebuffer,2, drawbuffers);
glNamedFramebufferRenderbuffer(mix_framebuffer, GL_DEPTH_STENCIL_ATTACHMENT, GL_RENDERBUFFER, mix_renderbuffer);
glBindFramebuffer(GL_FRAMEBUFFER, mix_framebuffer);
//....
//draw commands
while (!glfwWindowShouldClose(window)) {
// pass 1
glBindFramebuffer(GL_FRAMEBUFFER, framebuffer);
glClear(GL_COLOR_BUFFER_BIT|GL_DEPTH_BUFFER_BIT);
glUseProgram(program);
glUniformMatrix4fv(3, 1, GL_FALSE, glm::value_ptr(camera.GetViewMat()));
model.Render(program);
glPointSize(20.f);
glUseProgram(light_shader);// I draw a point to show the light's position
glUniformMatrix4fv(0, 1, GL_FALSE, glm::value_ptr(camera.GetViewMat()));
glDrawArrays(GL_POINTS, 0, 1);
//pass 2
glBindFramebuffer(GL_FRAMEBUFFER, mix_framebuffer);
glUseProgram(mix_program);
glBindTextureUnit(0, normal_color_output);
glBindTextureUnit(1, high_color_output);
glClear(GL_COLOR_BUFFER_BIT);
glNamedFramebufferDrawBuffers(mix_framebuffer, 2, drawbuffers);
glDrawArrays(GL_POINTS, 0, 1);
//...
}
}
I use geometry shader to model a square, here is the code:
//mix_gs.glsl
#version 450 core
layout(points) in;
layout(triangle_strip) out;
layout(max_vertices = 4) out;
void main(){
gl_Position = vec4(-1,1,-1,1);
EmitVertex();
gl_Position = vec4(-1,-1,-1,1);
EmitVertex();
gl_Position = vec4(1,1,-1,1);
EmitVertex();
gl_Position = vec4(1,-1,-1,1);
EmitVertex();
EndPrimitive();
}
here is the mix_fs.glsl:
#version 450 core
layout(location = 0)out vec4 color;
layout(location = 1)out vec4 hdr_color;
layout(binding = 0) uniform sampler2DMS color_sdms;
layout(binding = 1) uniform sampler2DMS hdr_sdms;
void main(){
/*
for(int i=0;i<8;i++){
color += texelFetch(color_sdms,ivec2(gl_FragCoord.xy),i);
hdr_color += vec4(texelFetch(hdr_sdms,ivec2(gl_FragCoord.xy),i).xyz,1);
}
*/
color = vec4(1,0,0,1);//I just output a color
hdr_color = vec4(0,1,0,1);
}
I encount a problem that I find during the draw pass 2, gl cannot outout any color to textures bind to mix_framebuffer.
Here is the debugging info in RenderDoc:
draw pass 1 texture output:
draw pass 2's geometry output:
draw pass 2 texture input:
draw pass 2 texture output:
You can see, draw-pass 1's output was passed to draw-pass2's pipeline successfully, but there is no output to draw-pass2's textures. I don't know why.
If you don't see even the plain color, the first I'd recommend to check how it was discarded. There are no so many options:
glColorMask. Highly likely it's not your case, since pass 1 works;
Wrong face culling and polygon winding order (CW, CCW). By your geometry shader, it looks like CW;
Blending options;
Depth-stencil state. I see you use glNamedRenderbufferStorage(mix_renderbuffer, GL_DEPTH24_STENCIL8, 1000, 800); What are depth-stencil settings?
If everything above looks good, any glGetError messages? If it's ok, try to remove MRT for a debugging purposes and output the only color in the second pass. If it would work, probably some error in MRT + Depth buffer setup.
I want to make a program that shows the earth with a space texture as the background.
The earth is a 3D Uniform with a earth texture (.bmp).
The space with the stars is a texture (.bmp).
I have summarized what I have to do:
Create a new Model Matrix
Position it at the same place where the camera is
Disable depth test before drawing
Reverse culling
This is the Load function:
void load(){
//Load The Shader
Shader simpleShader("src/shader.vert", "src/shader.frag");
g_simpleShader = simpleShader.program;
// Create the VAO where we store all geometry (stored in g_Vao)
g_Vao = gl_createAndBindVAO();
//Create vertex buffer for positions, colors, and indices, and bind them to shader
gl_createAndBindAttribute(&(shapes[0].mesh.positions[0]), shapes[0].mesh.positions.size() * sizeof(float), g_simpleShader, "a_vertex", 3);
gl_createIndexBuffer(&(shapes[0].mesh.indices[0]), shapes[0].mesh.indices.size() * sizeof(unsigned int));
gl_createAndBindAttribute(uvs, uvs_size, g_simpleShader, "a_uv", 2);
gl_createAndBindAttribute(normal, normal_size, g_simpleShader, "a_normal", 2);
//Unbind Everything
gl_unbindVAO();
//Store Number of Triangles (use in draw())
g_NumTriangles = shapes[0].mesh.indices.size() / 3;
//Paths of the earth and space textures
Image* image = loadBMP("assets/earthmap1k.bmp");
Image* space = loadBMP("assets/milkyway.bmp");
//Generate Textures
glGenTextures(1, &texture_id);
glGenTextures(1, &texture_id2);
//Bind Textures
glBindTexture(GL_TEXTURE_2D, texture_id);
glBindTexture(GL_TEXTURE_2D, texture_id2);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
//We assign your corresponding data
glTexImage2D(GL_TEXTURE_2D,1,GL_RGB,image->width, image->height,GL_RGB,GL_UNSIGNED_BYTE,image->pixels);
glTexImage2D(GL_TEXTURE_2D,1,GL_RGB,space->width, space->height,GL_RGB,GL_UNSIGNED_BYTE,space->pixels);
}
This is the Draw function:
void draw(){
//1. Enable/Disable
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
glDisable(GL_DEPTH_TEST);
glEnable(GL_CULL_FACE);
glCullFace(GL_FRONT);
//2. Shader Activation
glUseProgram(g_simpleShader);
//3. Get All Uniform Locations
//Space:
GLuint model_loc2 = glGetUniformLocation (g_simpleShader, "u_model");
GLuint u_texture2 = glGetUniformLocation(g_simpleShader, "u_texture2");
GLuint u_light_dir2 = glGetUniformLocation(g_simpleShader,"u_light_dir2");
//Earth
GLuint model_loc = glGetUniformLocation(g_simpleShader, "u_model");
GLuint projection_loc = glGetUniformLocation(g_simpleShader, "u_projection");
GLuint view_loc = glGetUniformLocation(g_simpleShader, "u_view");
GLuint u_texture = glGetUniformLocation(g_simpleShader, "u_texture");
GLuint u_light_dir = glGetUniformLocation(g_simpleShader, "u_light_dir");
//4. Get Values From All Uniforms
mat4 model_matrix2 = translate(mat4(1.0f), vec3(1.0f,-3.0f,1.0f));
mat4 model_matrix = translate(mat4(1.0f),vec3(0.0f,-0.35f,0.0f);
mat4 projection_matrix = perspective(60.0f,1.0f,0.1f,50.0f);
mat4 view_matrix = lookAt(vec3( 1.0f, -3.0f, 1.0f),vec3(0.0f, 0.0f, 0.0f), vec3(0.0f, 1.0f, 0.0f)glm::vec3(0,1,0));
//5. Upload Uniforms To Shader
glUniformMatrix4fv(model_loc2, 1, GL_FALSE, glm::value_ptr(model_matrix2));
glUniformMatrix4fv(model_loc, 1, GL_FALSE, glm::value_ptr(model_matrix));
glUniformMatrix4fv(projection_loc, 1, GL_FALSE, glm::value_ptr(projection_matrix));
glUniformMatrix4fv(view_loc, 1, GL_FALSE, glm::value_ptr(view_matrix));
glUniform1i(u_texture, 0);
glUniform3f(u_light_dir, g_light_dir.x, g_light_dir.y, g_light_dir.z);
glUniform1i(u_texture2, 1);
glUniform3f(u_light_dir2, g_light_dir.x, g_light_dir.y, g_light_dir.z);
//6. Activate Texture Unit 0 and Bind our Texture Object
glActiveTexture(GL_TEXTURE0);
glActiveTexture(GL_TEXTURE1);
glBindTexture(GL_TEXTURE_2D, texture_id);
glBindTexture(GL_TEXTURE_2D, texture_id2);
//7. Bind VAO
gl_bindVAO(g_Vao);
//8. Draw Elements
glDrawElements(GL_TRIANGLES, 3 * g_NumTriangles, GL_UNSIGNED_INT, 0);
}
Also I have two Fragment Shaders:
The first one returns this:
fragColor = vec4(final_color, 1.0);
The second one returns this:
fragColor = vec4(texture_color.xyz, 1.0);
Also the Vertex Shader returns the position of the vertex:
gl_Position = u_projection * u_view * u_model * vec4( a_vertex , 1.0 );
When I compile, it only shows the earth while it should show the earth and the space as background. I have reviewed the code several times but I can not find out what it is.
Suposed result:
My Result
If I see it right among other things you are wrongly binding textures
glActiveTexture(GL_TEXTURE0);
glActiveTexture(GL_TEXTURE1);
glBindTexture(GL_TEXTURE_2D, texture_id);
glBindTexture(GL_TEXTURE_2D, texture_id2);
should be:
glActiveTexture(GL_TEXTURE0);
glBindTexture(GL_TEXTURE_2D, texture_id);
glActiveTexture(GL_TEXTURE1);
glBindTexture(GL_TEXTURE_2D, texture_id2);
but I prefer that last set active units is 0 ...
glActiveTexture(GL_TEXTURE1);
glBindTexture(GL_TEXTURE_2D, texture_id2);
glActiveTexture(GL_TEXTURE0);
glBindTexture(GL_TEXTURE_2D, texture_id);
that will save you a lot of troubles when you start combine code with single texture unit code ... Also hope you are properly unbinding the used texture units for the same reasons...
You got ugly seam on the edge 0/360deg of longitude
this might be caused by wrongly computed normal for lighting, wrong not seamless texture or just by forgeting to duplicate the edge points with correct texture coordinates for the last patch. See:
Applying map of the earth texture a Sphere
You can also add atmosphere,bump map, clouds to your planet:
Bump-map a sphere with a texture map
Andrea is right...
set matrices as unit matrix and render (+/-)1.0 rectangle at z=0.0 +/- aspect ratio correction without depth test, face culling and depth write ... That way you will avoid jitter and flickering stuff due to floating point errors.
Skybox is better but there are also other options to enhance
Is it possible to make realistic n-body solar system simulation in matter of size and mass?
and all the sublinks in there especially stars. You can combine skybox and stellar catalog together and much much more...
I don't think this is a problem with the image I'm loading. The image resolution is 256x256 so it's not the power of 2 issue. I've looked at other segfaults people got with glTexImage2D and the segfault still can't seem to go away. Sorry that the code isn't in C/C++ (i don't think this is the problem either) but it should still be easy to understand.
let surface = sdlimage.load("image.png") # equivalent to IMG_Load call in C
if surface.isNil:
echo "Image couldn't be loaded: ", sdl2.getError()
quit 1
glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA)
glEnable(GL_BLEND)
var tex: cuint
glGenTextures(1, addr tex)
glBindTexture(GL_TEXTURE_2D, tex)
glPixelStorei(GL_UNPACK_ALIGNMENT, 1)
glPixelStorei(GL_UNPACK_ROW_LENGTH, surface.pitch)
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR)
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR)
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA.GLint, surface.w.GLsizei, surface.h.GLsizei, 0, GL_RGBA, GL_UNSIGNED_BYTE, surface.pixels) # segfault
UPDATE:
I managed to get it to not segfault thanks to user1118321's answer (I checked the pixel format and saw that SDL used a "indexed" format, which I saw another user said was the problem when they fixed this issue for theirselves. Seems like creating a new surface is the right solution), but now it shows nothing on the screen. It's black when I set glClearColor to black, and it's white when i set glClearColor to white.
Updated image loading code:
let surface = sdlimage.load("image.png")
if surface.isNil:
echo "Image couldn't be loaded: ", sdl2.getError()
quit 1
var w = surface.w # may need to make this the next power of two
var h = surface.h # and this
var bpp: cint
var Rmask, Gmask, Bmask, Amask: uint32
if not pixelFormatEnumToMasks(SDL_PIXELFORMAT_ABGR8888, bpp,
Rmask, Gmask, Bmask, Amask):
quit "pixel format enum to masks " & $sdl2.getError()
let newSurface = createRGBSurface(0, w, h, bpp,
Rmask, Gmask, Bmask, Amask)
discard surface.setSurfaceAlphaMod(0xFF)
discard surface.setSurfaceBlendMode(BlendMode_None)
blitSurface(surface, nil, newSurface, nil)
var tex: cuint
glGenTextures(1, addr tex)
glBindTexture(GL_TEXTURE_2D, tex)
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR)
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR)
glTexImage2D(GL_TEXTURE_2D, 0, 4, w.GLsizei, h.GLsizei, 0, GL_RGBA, GL_UNSIGNED_BYTE, newSurface.pixels)
Image rendering code:
glUseProgram(shaderProgram)
glClear(GL_COLOR_BUFFER_BIT or GL_DEPTH_BUFFER_BIT)
glMatrixMode(GL_MODELVIEW)
glLoadIdentity()
glTranslatef(0.0, 0.0, -7.0)
glColor3f(1.0, 1.0, 1.0)
glBegin(GL_QUADS)
glTexCoord2i(0, 0)
glVertex3f(-0.5, -1.9, 0.0)
glTexCoord2i(1, 0)
glVertex3f(0.5, -1.9, 0.0)
glTexCoord2i(0, 1)
glVertex3f(-0.5, -1.4, 0.0)
glTexCoord2i(1, 1)
glVertex3f(0.5, -1.4, 0.0)
glEnd()
If shaders have something to do with it, here's my vertex shader:
#version 330 core
in vec4 data;
out vec3 ourColor;
void main() {
gl_Position = vec4(data.x, data.y, data.z, 1.0);
ourColor = vec3(data.w, data.w, data.w);
}
And the fragment shader:
#version 330 core
precision highp float;
in vec3 ourColor;
out vec4 color;
void main() {
color = vec4(ourColor, 1.0);
}
You should check glGetError() to see if there are any OpenGL errors occurring. You should also check the surface.pixelFormat to see whether you actually have 4 bytes per pixel in your image. If it's just RGB data and not RGBA, then glTexImage2D() will read past the end of the image data and likely crash with a segmentation fault.
So far as i have understood the vertex fetch stage is encapsulated by the VAO and the VAO is required to contain the vertex fetch stage state for piping between the buffer objects and vertex attributes as well as formatting the data in the buffer objects.
Both books that i have been reading on the subject i.Red book, Blue book both mention explicitly that the VAO must contain the vertex fetch stage state data
However when i actually create 2 texture objects and simply format the data once WITHOUT a VAO into which to store this information about the buffer, it still runs fine without any hiccups, and then i reload the first object back again, and again it works fine without any issues, so where is this information pulled from about the formatting of the data in the buffer object?
I even upload buffer data a second time to same buffer object which would imply that previous information held there would be reset? And the picture still renders fine to the window
So what exactly is going on? the Books say one thing, what happens in reality is totally different and opposite
Can somebody actually explain what IS actually needed here and what isnt? What is actually going on?
When do we actually need a VAO and when we can do without?
What's the point of extra code processing when it is not needed?
The code below:
int main(){
int scrW=1280, scrH=720;
//create context and shader program
init(scrW, scrH);
createShaders();
//create texture objects and load data from image to server memory
char object[2][25];
strcpy(object[0], "back.bmp");
strcpy(object[1], "256x256.bmp");
//triangle 1
GLfloat vertices[] =
// X Y U V
{ -1.0, -1.0, 0.0, 0.0,
1.0, -1.0, 1.0, 0.0,
1.0, 1.0, 1.0, 1.0,
-1.0, 1.0, 0.0, 1.0};
//glPointSize(40.0f);
//create and bound vertex buffer object(memory buffers)
GLuint vbo1 = createVbo();
//The state set by glVertexAttribPointer() is stored in the currently bound vertex array object (VAO) if vertex array object bound
//associates the format of the data for the currently bound buffer object to the vertex attribute so opengl knows how much and how to read it
glVertexAttribPointer(0, 2, GL_FLOAT, GL_FALSE, 4*sizeof(GLfloat), 0);
glEnableVertexAttribArray(0);
//shader vertex attribute for texture coordinates
glVertexAttribPointer(1, 2, GL_FLOAT, GL_FALSE, 4*sizeof(GLfloat), (const GLvoid*)(2 * sizeof(GLfloat)));
glEnableVertexAttribArray(1);
//upload vertices to buffer memory
//will upload data to currently bound/active buffer object
glBufferData(GL_ARRAY_BUFFER, sizeof(vertices), vertices, GL_STATIC_DRAW);
//load and create texture object from image data
GLuint tex1 = createTexture(object[0]);
glDrawArrays(GL_QUADS, 0, 4);
glXSwapBuffers ( dpy, glxWin );
sleep(3);
GLuint tex2 = createTexture(object[1]);
glDrawArrays(GL_QUADS, 0, 4);
glXSwapBuffers ( dpy, glxWin );
sleep(3);
glBindTexture(GL_TEXTURE_2D, tex1);
glDrawArrays(GL_QUADS, 0, 4);
glXSwapBuffers ( dpy, glxWin );
sleep(3);
//////////////de-initialize
glXMakeContextCurrent( dpy, 0, 0, NULL );
glXDestroyContext( dpy, context );
glXDestroyWindow(dpy, glxWin);
XDestroyWindow( dpy, win );
XCloseDisplay( dpy );
return 0;
}
and the shaders
const char* vertex_shader =
"#version 400\n"
"layout(location = 0) in vec2 vp;"
"layout(location = 1) in vec2 tex;"
"out vec2 texCoord;"
"void main () {"
" gl_Position = vec4 (vp, 0.0f, 1.0f);"
" texCoord = tex; "
"}";
const char* fragment_shader =
"#version 400\n"
"uniform sampler2D s;"
"in vec2 texCoord;"
"out vec4 color;"
"void main () {"
"color = texture(s, texCoord);"
"}";
in order to avoid any confusion , here is the init() procedure
static int att[] =
{
GLX_X_RENDERABLE , True,
GLX_DRAWABLE_TYPE , GLX_WINDOW_BIT,
GLX_RENDER_TYPE , GLX_RGBA_BIT,
GLX_X_VISUAL_TYPE , GLX_TRUE_COLOR,
GLX_RED_SIZE , 8,
GLX_GREEN_SIZE , 8,
GLX_BLUE_SIZE , 8,
GLX_ALPHA_SIZE , 8,
GLX_DEPTH_SIZE , 24,
GLX_STENCIL_SIZE , 8,
GLX_DOUBLEBUFFER , True,
//GLX_SAMPLE_BUFFERS , 1,
//GLX_SAMPLES , 4,
None
};
Display *dpy;
Window root;
XVisualInfo *vi;
Colormap cmap;
XSetWindowAttributes swa;
Window win;
GLXContext context;
GLXFBConfig *fbc;
GLXWindow glxWin;
int fbcount;
void init(int width, int height){
//set and choose displays for creating window
dpy = XOpenDisplay(NULL);
if (!dpy){
printf("Failed to open X display\n");
exit(1);
}
root = DefaultRootWindow(dpy);
//request a framebuffer configuration
fbc = glXChooseFBConfig(dpy, DefaultScreen(dpy), att, &fbcount);
if (!fbc){
printf( "Failed to retrieve a framebuffer config\n" );
exit(1);
}
vi = glXGetVisualFromFBConfig( dpy, fbc[0] );
if(vi==NULL){
printf("Error getting visual info\n");
exit(1);
}
swa.colormap = XCreateColormap( dpy, RootWindow( dpy, vi->screen ), vi->visual, AllocNone );
swa.background_pixmap = None ;
swa.border_pixel = 0;
swa.event_mask = StructureNotifyMask;
//Window XCreateWindow(display, parent, x, y, width, height, border_width, depth, class, visual, valuemask, attributes)
win = XCreateWindow( dpy, RootWindow( dpy, vi->screen ), 0, 0, width, height, 0, vi->depth, InputOutput, vi->visual, CWBorderPixel|CWColormap|CWEventMask, &swa );
if ( !win ){
printf( "Failed to create window.\n" );
exit(1);
}
context = glXCreateNewContext( dpy, fbc[0], GLX_RGBA_TYPE, NULL, True );
glxWin = glXCreateWindow(dpy, fbc[0], win, NULL);
XMapWindow(dpy, win);
glXMakeContextCurrent(dpy, glxWin, glxWin, context);
// start GLEW extension handler
glewExperimental = GL_TRUE;
GLuint err = glewInit();
if(err!=GLEW_OK){
fprintf(stderr, "Error: %s\n", glewGetErrorString(err));
exit(1);
}
XSelectInput(dpy, win, ButtonPressMask|KeyPressMask);
// tell GL to only draw onto a pixel if the shape is closer to the viewer
//glEnable (GL_DEPTH_TEST); // enable depth-testing
//glDepthFunc (GL_LESS); // depth-testing interprets a smaller value as "closer"
}
If you use a compatibility OpenGL context, you don't need a VAO. In a sense, there is a "default" VAO which is always bound. This is how it works in OpenGL 2.x, and this is part of what the "compatibility" means in "compatibility profile".
In you use a core OpenGL context, you do need a VAO. If you don't, your code simply won't work. If you want to continue pretending you don't need a VAO, you can create a single VAO and have it bound for the entire duration of your program.
The issue of choosing a core vs compatibility profiles has its nuances, but in general it is recommended to request a core profile if you are developing a new program. Not all systems have great support for compatibility profiles anyway. Mesa limits compatibility profiles to 3.0 and OS X limits them to 2.1. If you want a core profile, you have to explicitly request a core profile when you create the context.
I would like to know how to make openGL to not "blur" an upscaled texture, as it seems that the bluring is set to default for transformations. The texure is a POT png file. The code used to define a texture and put it on the screen is this:
class Texture():
# simple texture class
# designed for 32 bit png images (with alpha channel)
def __init__(self,fileName):
self.texID=0
self.LoadTexture(fileName)
def LoadTexture(self,fileName):
try:
textureSurface = pygame.image.load(fileName).convert_alpha()
textureData = pygame.image.tostring(textureSurface, "RGBA", True)
self.w, self.h = textureSurface.get_size()
self.texID=glGenTextures(1)
glBindTexture(GL_TEXTURE_2D, self.texID)
glTexParameteri(GL_TEXTURE_2D,GL_TEXTURE_MAG_FILTER,GL_LINEAR)
glTexParameteri(GL_TEXTURE_2D,GL_TEXTURE_MIN_FILTER,GL_LINEAR)
glTexImage2D( GL_TEXTURE_2D, 0, GL_RGBA, textureSurface.get_width(),
textureSurface.get_height(), 0, GL_RGBA, GL_UNSIGNED_BYTE,
textureData )
except Exception as E:
print(E)
print ("can't open the texture: %s"%(fileName))
def __del__(self):
glDeleteTextures(self.texID)
def get_width(self):
return self.w
def get_height(self):
return self.h
def blit(texture, x, y):
"""
Function that blits a given texture on the screen
"""
#We put the texture onto the screen
glBindTexture(GL_TEXTURE_2D, texture.texID)
#Now we must position the image
glBegin(GL_QUADS)
#We calculate each of the points relative to the center of the screen
top = -y/(HEIGHT//2) + 1.0
left = x/(WIDTH//2) - 1.0
right = left + texture.w/(WIDTH//2)
down = top - texture.h/(HEIGHT//2)
#We position each point of the image
glTexCoord2f(0.0, 1.0)
glVertex2f(left, top)
glTexCoord2f(1.0,1.0)
glVertex2f(right, top)
glTexCoord2f(1.0,0.0)
glVertex2f(right, down)
glTexCoord2f(0.0,0.0)
glVertex2f(left, down)
glEnd()
I configured openGL as follows:
def ConfigureOpenGL(w, h):
#glShadeModel(GL_SMOOTH)
#glClearColor(0.0, 0.0, 0.0, 1.0)
#glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT)
glViewport(0, 0, w, h)
glMatrixMode(GL_PROJECTION)
#glLoadIdentity()
#gluOrtho2D(-8.0, 8.0, -6.0, 6.0)
glMatrixMode(GL_MODELVIEW)
glLoadIdentity()
glShadeModel(GL_SMOOTH)
glClearColor(0.0, 0.0, 0.0, 0.0)
glClearDepth(1.0)
glDisable(GL_DEPTH_TEST)
glDisable(GL_LIGHTING)
glDepthFunc(GL_LEQUAL)
glHint(GL_PERSPECTIVE_CORRECTION_HINT, GL_NICEST)
glEnable(GL_BLEND)
Surface = pygame.display.set_mode((WIDTH, HEIGHT), OPENGL|DOUBLEBUF)#|FULLSCREEN)
ConfigureOpenGL(WIDTH, HEIGHT)
Before putting anything in the screen i also call this method:
def OpenGLRender(self):
"""
Used to prepare the screen to render
"""
glClear(GL_COLOR_BUFFER_BIT|GL_DEPTH_BUFFER_BIT)
glLoadIdentity()
glDisable(GL_LIGHTING)
glEnable(GL_TEXTURE_2D)
glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA)
glClearColor(1.0, 1.0, 1.0, 1.0)
I'm using PyOpenGL 3.0.2
Use GL_NEAREST in your glTexParameteri() calls:
glTexParameteri(GL_TEXTURE_2D,GL_TEXTURE_MAG_FILTER,GL_NEAREST)
glTexParameteri(GL_TEXTURE_2D,GL_TEXTURE_MIN_FILTER,GL_NEAREST)