I'm having problems with my OpenGL rendering. The RAM memory grows absurdly up to the point the entire system freezes. I've identified that if I comment the entire render function, no memory grows at all. Therefore the problem is that my OpenGL render function might be allocating memory for something and I'm not releasing it.
Can you identify what is the problem?
PS: the thing inside the if actually runs one single time, so the allocation of memory it does, occur only one time
This is my OpenGL render function:
void OpenGlVideoQtQuickRenderer::render()
{
if (this->firstRun) {
std::cout << "Creating QOpenGLShaderProgram " << std::endl;
this->firstRun = false;
program = new QOpenGLShaderProgram();
initializeOpenGLFunctions();
//this->m_F = QOpenGLContext::currentContext()->functions();
datas[0] = new unsigned char[width*height]; //Y
datas[1] = new unsigned char[width*height/4]; //U
datas[2] = new unsigned char[width*height/4]; //V
std::cout << program->addShaderFromSourceCode(QOpenGLShader::Fragment, tString2) << std::endl;
std::cout << program->addShaderFromSourceCode(QOpenGLShader::Vertex, vString2) << std::endl;
program->bindAttributeLocation("vertexIn",A_VER);
program->bindAttributeLocation("textureIn",T_VER);
std::cout << "program->link() = " << program->link() << std::endl;
}
program->bind();
glVertexAttribPointer(A_VER, 2, GL_FLOAT, 0, 0, ver);
glEnableVertexAttribArray(A_VER);
glVertexAttribPointer(T_VER, 2, GL_FLOAT, 0, 0, tex);
glEnableVertexAttribArray(T_VER);
unis[0] = program->uniformLocation("tex_y");
unis[1] = program->uniformLocation("tex_u");
unis[2] = program->uniformLocation("tex_v");
glGenTextures(3, texs);
//Y
glBindTexture(GL_TEXTURE_2D, texs[0]);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
glTexImage2D(GL_TEXTURE_2D, 0, GL_RED, width, height, 0, GL_RED, GL_UNSIGNED_BYTE, 0);
//U
glBindTexture(GL_TEXTURE_2D, texs[1]);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
glTexImage2D(GL_TEXTURE_2D, 0, GL_RED, width/2, height / 2, 0, GL_RED, GL_UNSIGNED_BYTE, 0);
//V
glBindTexture(GL_TEXTURE_2D, texs[2]);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
glTexImage2D(GL_TEXTURE_2D, 0, GL_RED, width / 2, height / 2, 0, GL_RED, GL_UNSIGNED_BYTE, 0);
glActiveTexture(GL_TEXTURE0);
glBindTexture(GL_TEXTURE_2D, texs[0]);
glTexSubImage2D(GL_TEXTURE_2D, 0, 0, 0, width, height, GL_RED, GL_UNSIGNED_BYTE, datas[0]);
glUniform1i(unis[0], 0);
glActiveTexture(GL_TEXTURE0+1);
glBindTexture(GL_TEXTURE_2D, texs[1]);
glTexSubImage2D(GL_TEXTURE_2D, 0, 0, 0, width/2, height / 2, GL_RED, GL_UNSIGNED_BYTE, datas[1]);
glUniform1i(unis[1],1);
glActiveTexture(GL_TEXTURE0+2);
glBindTexture(GL_TEXTURE_2D, texs[2]);
glTexSubImage2D(GL_TEXTURE_2D, 0, 0, 0, width / 2, height / 2, GL_RED, GL_UNSIGNED_BYTE, datas[2]);
glUniform1i(unis[2], 2);
glDrawArrays(GL_TRIANGLE_STRIP,0,4);
program->disableAttributeArray(A_VER);
program->disableAttributeArray(T_VER);
program->release();
}
The code is creating three new textures in each frame (glGenTextures) without ever releasing them (glDeleteTextures). Either you delete them at the end of the render method, or even better: You only create the textures once in the first block and then only upload new data to them.
Just for the records: Drawing from CPU memory by specifying the address in glVertexAttribPointer is only valid in OpenGL before Core profile. I highly suggest to use Vertex Buffer Objects instead.
Related
I set up framebuffer texture on which to draw a scene on as follows:
glGenFramebuffers(1, &renderFBO);
glGenTextures(1, &renderTexture);
glBindTexture(GL_TEXTURE_2D, renderTexture);
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA32F, W_WIDTH, W_HEIGHT, 0, GL_RGBA, GL_FLOAT, NULL);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_REPEAT);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_REPEAT);
int width, height;
glGetTexLevelParameteriv(GL_TEXTURE_2D, 0, GL_TEXTURE_WIDTH, &width);
glGetTexLevelParameteriv(GL_TEXTURE_2D, 0, GL_TEXTURE_HEIGHT, &height);
cout << "width: " << width << " height: " << height << endl;
glBindFramebuffer(GL_FRAMEBUFFER, renderFBO);
glFramebufferTexture2D(GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, GL_TEXTURE_2D, renderTexture, 0);
glBindFramebuffer(GL_FRAMEBUFFER, 0);
By validating width and height, they do indeed have the correct dimensions, 1024, 768.
Now I am trying to set up a second texture, to be used as a counter, meaning I am trying to count the occurences of every color value, by incrementing the appropriate texel in the texture. Since I am trying to count the colors, I use a 2D texture of size 256 x 3. In each cell I want a single unsigned integer, showing the number of times a specific color value has appeared up until that point. For instance, if the color (255,5,3) was encountered, I would to increment the numbers inside tex[255][0], tex[5][0], tex[3][0]
num_bins = 256;
GLuint* hist_data = new GLuint[num_bins * color_components]();
glGenTextures(1, &tex_output);
glActiveTexture(GL_TEXTURE0);
glBindTexture(GL_TEXTURE_2D, tex_output);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
glTexImage2D(GL_TEXTURE_2D, 0, GL_R32UI, 256, 3, 0, GL_RED, GL_UNSIGNED_INT, hist_data);
glBindImageTexture(0, tex_output, 0, GL_FALSE, 0, GL_READ_WRITE, GL_R32UI);
int width, height;
glGetTexLevelParameteriv(GL_TEXTURE_2D, 0, GL_TEXTURE_WIDTH, &width);
glGetTexLevelParameteriv(GL_TEXTURE_2D, 0, GL_TEXTURE_HEIGHT, &height);
cout << "width: " << width << " height: " << height << endl;
I used the sized and base internal format correspondence shown in the opengl specification. However when this code executes the output values are width=0, height=0. The input data doesn't seem to be the issue, because I tried providing NULL as the last argument and it still didn't work. What am I missing here?
EDIT:
Incrementation of the values in the texture is supposed to happen inside the following compute shader:
#version 450
layout(local_size_x = 1, local_size_y = 1) in;
layout(rgba32f, binding = 0) uniform image2D img_input;
layout(r32ui, binding = 1) uniform uimage2D img_output;
void main() {
// grabbing pixel value from input image
vec4 pixel_color = imageLoad(img_input, ivec2(gl_GlobalInvocationID.xy));
vec3 rgb = round(pixel_color.rgb * 255);
ivec2 r = ivec2(rgb.r, 0);
ivec2 g = ivec2(rgb.g, 1);
ivec2 b = ivec2(rgb.b, 2);
uint inc = 1;
imageAtomicAdd(img_output, r, inc);
imageAtomicAdd(img_output, g, inc);
imageAtomicAdd(img_output, b, inc);
}
You get a GL_INVALID_OPERATION error, because the texture format argument doesn't correspond with the internal format of the texture. For an unsigned integral format you have to use GL_RED_INTEGER instead of GL_RED (see glTexImage2D):
glTexImage2D(GL_TEXTURE_2D, 0, GL_R32UI, 256, 3, 0, GL_RED, GL_UNSIGNED_INT, hist_data);
glTexImage2D(GL_TEXTURE_2D, 0, GL_R32UI, 256, 3, 0, GL_RED_INTEGER, GL_UNSIGNED_INT, hist_data);
I recommend using Debug Output, it saves a lot of time and lets you find such errors quickly.
I started learning OpenGL recently and I've been messing with texture. I updated my texture using glTexImage2D but I've learned that it's better to use glTexSubImage2D, so I tried to change my code but i doesn't work.
Working code
void GLWidget::updateTextures(){
QImage t = img.mirrored();
glBindTexture(GL_TEXTURE_2D, tex);
glTexImage2D(GL_TEXTURE_2D, 0, 3, t.width(), t.height(), 0, GL_RGBA, GL_UNSIGNED_BYTE, t.bits());
glBindTexture( GL_TEXTURE_2D, 0);
}
Not working code
void GLWidget::updateTextures(){
QImage t = img.mirrored();
glBindTexture(GL_TEXTURE_2D, tex);
glTexSubImage2D(GL_TEXTURE_2D, 0, 0, 0, t.width(), t.height(), GL_RGBA, GL_UNSIGNED_BYTE, t.bits());
glBindTexture( GL_TEXTURE_2D, 0);
}
All I have is a black screen.
Thanks.
EDIT :
Here is the initialization of my texture :
void GLWidget::initializeGL(){
...
LoadGLTextures();
...
}
void GLWidget::LoadGLTextures(){
QImage t = img.mirrored();
glGenTextures(1, &tex);
glBindTexture(GL_TEXTURE_2D, tex);
glTexImage2D(GL_TEXTURE_2D, 0, 3, t.width(), t.height(), 0, GL_RGBA, GL_UNSIGNED_BYTE, t.bits());
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
glBindTexture( GL_TEXTURE_2D, 0);
}
img is a QImage variable containing the pixel data.
glGetError() Returns code 1281.
glTexSubImage2D updates the content of a previously allocated texture. glTexImage2D has to be called at least once to trigger the allocation:
void GLWidget::initializeGL(){
//...
QImage t = img.mirrored();
glBindTexture(GL_TEXTURE_2D, tex);
glTexImage2D(
GL_TEXTURE_2D,
0,
3,
t.width(),
t.height(),
0,
GL_RGBA,
GL_UNSIGNED_BYTE,
t.bits()
);
glBindTexture( GL_TEXTURE_2D, 0);
// ...
}
Update with glTexSubImage2D:
void GLWidget::updateTextures(){
QImage t = img.mirrored();
glBindTexture(GL_TEXTURE_2D, tex);
glTexSubImage2D(
GL_TEXTURE_2D,
0,
0,
0,
t.width(),
t.height(),
GL_RGBA,
GL_UNSIGNED_BYTE,
t.bits()
);
glBindTexture( GL_TEXTURE_2D, 0);
}
EDIT: the problem was glTexImage2D and glTexSubImage2D were called with different image sizes, generating the error GL_INVALID_VALUE (1281, 0x501) on glTexSubImage2D call.
I have a series of rectangles of different colours and I'm trying to add a texture to one of them. However when I apply the texture to the given rectangle, it just turns black. Below is the function I use to load the texture.
GLuint GLWidget:: LoadTexture(const char * pic, int width, int height){
GLuint Texture;
BYTE * data;
FILE * picfile;
picfile = fopen(pic, "rb");
if (picfile == NULL)
return 0;
data = (BYTE *)malloc(width * height * 3);
fread(data, width * height, 3, picfile);
fclose(picfile);
glGenTextures(1, &Texture);
glBindTexture(GL_TEXTURE_2D, Texture);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_REPEAT);
glTexParameteri (GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_REPEAT);
glTexParameteri (GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
glTexParameteri (GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGB8, width, height, 0, GL_RGB8, GL_UNSIGNED_BYTE, data);
return Texture;
}
In another function where the GL_QUADS are drawn, I then have...
GLuint myTex = LoadTexture("texture.bmp", 500, 500);
glEnable(GL_TEXTURE_2D);
glTexEnvf(GL_TEXTURE_ENV, GL_TEXTURE_ENV_MODE, GL_MODULATE);
glBindTexture(GL_TEXTURE_2D, myTex);
glBegin(GL_QUADS);
glTexCoord2f(1, 1); glVertex3f(42, 10, 42);
glTexCoord2f(1, 0); glVertex3f(42, 10, -42);
glTexCoord2f(0, 0); glVertex3f(-42,10,-42);
glTexCoord2f(0, 1); glVertex3f(-42,10, 42);
glEnd();
If anyone could let me know where I am going wrong that would be great, thanks!
This call
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGB8, width, height, 0, GL_RGB8, GL_UNSIGNED_BYTE, data);
is invalid. GL_RGB8 is a valid internalFormat, but it is not a valid enum for format. Use GL_RGB, GL_UNSIGNED_BYTE as format and type if your client-side data is 3 channels with 8 but unsigned int data per channel.
Another thing is
LoadTexture("texture.bmp", 500, 500);
This suggests that you are dealing with BMP files, but your loader only deals with completely raw image data.
When I bind my image using glTexImage2D, it renders fine.
First, the code in the fragment shader:
uniform sampler2D tex;
in vec2 tex_coord;
// main:
fragment_out = mix(
texture(tex, tex_coord),
vec4(tex_coord.x, tex_coord.y, 0.0, 1.0),
0.5
);
Using this code, it draws the image:
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, width, height,
0, GL_RGBA, GL_UNSIGNED_BYTE, image);
But if I bind it via glTexSubImage2D, the texture is drawn just black.
glTexStorage2D(GL_TEXTURE_2D,
1,
GL_RGBA,
width, height);
glTexSubImage2D(GL_TEXTURE_2D,
0,
0, 0,
width, height,
GL_RGBA,
GL_UNSIGNED_BYTE,
image);
When I replace GL_RGBA with GL_RGBA8 or GL_RGBA16 in the second code, the rendering is distorted.
This is the whole texture loading and binding code:
GLuint texture;
glGenTextures(1, &texture);
glActiveTexture(GL_TEXTURE0);
glBindTexture(GL_TEXTURE_2D, texture);
int width, height;
unsigned char *image = SOIL_load_image(Resource::getPath("Images/2hehwdv.jpg").c_str(), &width, &height, 0, SOIL_LOAD_RGBA);
std::cout << SOIL_last_result() << std::endl;
std::cout << width << "x" << height << std::endl;
glTexStorage2D(GL_TEXTURE_2D,
1,
GL_RGBA,
width, height);
glTexSubImage2D(GL_TEXTURE_2D,
0,
0, 0,
width, height,
GL_RGBA,
GL_UNSIGNED_BYTE,
image);
//glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, width, height, 0, GL_RGBA, GL_UNSIGNED_BYTE, image);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR_MIPMAP_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
glGenerateMipmap(GL_TEXTURE_2D);
Can someone explain this behavior to me?
glTexStorage2D( GL_TEXTURE_2D, 1, GL_RGBA, width, height );
^^^^^^^ not sized
GL_RGBA is an unsized format.
glTexStorage2D()'s internalFormat parameter must be passed a sized internal format.
A glGetError() call after glTexStorage2D() would have returned GL_INVALID_ENUM:
GL_INVALID_ENUM is generated if internalformat is not a valid sized internal format.
Try a sized format from Table 1 like GL_RGBA8.
I'm working on a game engine, that uses BMP texture files on MD3 model files. Unfortunately for me, it isn't working at all. I've tried everything, to no avail. Since I wrote the loaders myself, I looked at them, and fixed a few bugs in them, but still, I just get a gray lighted model.
Here's my rendering code:
void drawmd3(md3& model, GLuint tex)
{
int i = 0;
glLoadIdentity();
glTranslatef(-1.5,0,-24);
glRotatef(270,0,0,1);
glRotatef(-90,0,1,0);
glRotatef(a,0,0,1);
while(i<model.surfnum)
{
glBindTexture(GL_TEXTURE_2D, tex);
int j = 0;
while(j<model.surfs.at(i).trinum)
{
int tmp = model.surfs.at(i).tris.at(j).indexes[0];
int tmp1 = model.surfs.at(i).tris.at(j).indexes[1];
int tmp2 = model.surfs.at(i).tris.at(j).indexes[2];
float norms[3];
norms[0] = convert_normals0(model.surfs.at(i).verts.at(tmp).normal);
norms[1] = convert_normals1(model.surfs.at(i).verts.at(tmp).normal);
norms[2] = convert_normals2(model.surfs.at(i).verts.at(tmp).normal);
float norms1[3];
norms1[0] = convert_normals0(model.surfs.at(i).verts.at(tmp1).normal);
norms1[1] = convert_normals1(model.surfs.at(i).verts.at(tmp1).normal);
norms1[2] = convert_normals2(model.surfs.at(i).verts.at(tmp1).normal);
float norms2[3];
norms2[0] = convert_normals0(model.surfs.at(i).verts.at(tmp2).normal);
norms2[1] = convert_normals1(model.surfs.at(i).verts.at(tmp2).normal);
norms2[2] = convert_normals2(model.surfs.at(i).verts.at(tmp2).normal);
glBegin(GL_TRIANGLES);
glNormal3f(norms[0],norms[1],norms[2]);
glTexCoord2f(model.surfs.at(i).st.at(tmp).st[0],1-model.surfs.at(i).st.at(tmp).st[1]);
glVertex3f(model.surfs.at(i).verts.at(tmp).coord[0]/64, model.surfs.at(i).verts.at(tmp).coord[1]/64, model.surfs.at(i).verts.at(tmp).coord[2]/64);
glNormal3f(norms1[0],norms1[1],norms1[2]);
glTexCoord2f(model.surfs.at(i).st.at(tmp1).st[0],1-model.surfs.at(i).st.at(tmp1).st[1]);
glVertex3f(model.surfs.at(i).verts.at(tmp1).coord[0]/64, model.surfs.at(i).verts.at(tmp1).coord[1]/64, model.surfs.at(i).verts.at(tmp1).coord[2]/64);
glNormal3f(norms2[0],norms2[1],norms2[2]);
glTexCoord2f(model.surfs.at(i).st.at(tmp2).st[0],1-model.surfs.at(i).st.at(tmp2).st[1]);
glVertex3f(model.surfs.at(i).verts.at(tmp2).coord[0]/64, model.surfs.at(i).verts.at(tmp2).coord[1]/64, model.surfs.at(i).verts.at(tmp2).coord[2]/64);
glEnd();
j++;
}
glBindTexture(GL_TEXTURE_2D, 0);
i++;
}
}
Here's my init code:
glEnable(GL_TEXTURE_2D);
glGenTextures(1,&tex[0]);
glBindTexture(GL_TEXTURE_2D, tex[0]);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_REPEAT);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_REPEAT);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA8, img.img.imgwidth, img.img.imgheight, 0, GL_RGBA8, GL_UNSIGNED_BYTE, img.data);
That's your problem right there:
glTexImage2D(GL_TEXTURE_2D,
0,
GL_RGBA8,
img.img.imgwidth,
img.img.imgheight,
0,
GL_RGBA8, <<<<<<<<<<----------
GL_UNSIGNED_BYTE,
img.data);
The token you're using for the format parameter is not allowed. From the reference:
format
Specifies the format of the pixel data. The following symbolic values are accepted: GL_COLOR_INDEX, GL_RED, GL_GREEN, GL_BLUE, GL_ALPHA, GL_RGB, GL_BGR, GL_RGBA, GL_BGRA, GL_LUMINANCE, and GL_LUMINANCE_ALPHA.
Thus no texture will be loaded and you end up with the default white of a not setup texture.