Can't draw triangles in OpenGL but other primitives render fine - c++

Good afternoon,
I'm trying to learn to use a graphics library that uses OpenGL. I can draw 2D primitives (text and lines) but 3D triangles don't render. I've tried everything I can think of but as an OpenGL newcomer I've probably missed something obvious.
This code is not intended to be efficient. I'm trying to get it to work first.
Here's the setup at startup:
// 800 by 600 windows 32 bit depth
Driver->setDisplay( UDriver::CMode( ScreenWidth, ScreenHeight, 32 ) );
NL3D::CViewport viewport;
viewport.initFullScreen();
Driver->setViewport( viewport );
NLMISC::CMatrix mtx;
mtx.identity();
Driver->setViewMatrix( mtx );
Driver->setModelMatrix( mtx );
// screen size is same as pixel resolution
// CFrustum(float left, float right, float bottom, float top, float znear, float zfar, bool perspective= true)
Driver->setMatrixMode2D( CFrustum( 0.0f, ScreenWidth, 0.0f, ScreenHeight, -2.0f, 10000.0f, false ) );
Here's the code in my rendering loop:
static NL3D::CMaterial mat;
mat.initUnlit();
mat.setColor( CRGBA( 255, 255, 0, 128 ) );
float x = 200.0f;
float y = 200.0f;
float width = 200.0f; // (float)ScreenWidth * 0.125f;
float height = 200.0f; // (float)ScreenHeight * 0.125f;
static NL3D::CVertexBuffer vb;
if ( vb.getName().empty() )
vb.setName("drawBitmap");
vb.setVertexFormat( NL3D::CVertexBuffer::PositionFlag | NL3D::CVertexBuffer::TexCoord0Flag );
vb.setNumVertices( 4 );
{
NL3D::CVertexBufferReadWrite vba;
vb.lock( vba );
vba.setVertexCoord( 0, NLMISC::CVector( x, 0, y ) );
vba.setVertexCoord( 1, NLMISC::CVector( x + width, 0, y ) );
vba.setVertexCoord( 2, NLMISC::CVector( x + width, 0, y + height ) );
vba.setVertexCoord( 3, NLMISC::CVector( x, 0, y + height ) );
vba.setTexCoord( 0, 0, 0.f, 1.f );
vba.setTexCoord( 1, 0, 1.f, 1.f );
vba.setTexCoord( 2, 0, 1.f, 0.f );
vba.setTexCoord( 3, 0, 0.f, 0.f );
}
dynamic_cast<NL3D::CDriverUser*>(Driver)->getDriver()->activeVertexBuffer( vb );
static NL3D::CIndexBuffer pb;
if ( pb.getName().empty() )
pb.setName("drawBitmap");
pb.setFormat( NL_DEFAULT_INDEX_BUFFER_FORMAT );
pb.setNumIndexes( 6 );
{
CIndexBufferReadWrite iba;
pb.lock( iba );
iba.setTri( 0, 0, 1, 2 );
iba.setTri( 3, 2, 3, 0 );
}
dynamic_cast<NL3D::CDriverUser*>(Driver)->getDriver()->activeIndexBuffer( pb );
dynamic_cast<NL3D::CDriverUser*>(Driver)->getDriver()->renderTriangles( mat, 0, 2 );
Any suggestions?
Thanks

It turned out to be multiple OpenGL contexts. It wasn't setting things back before trying to draw.

Related

OpenGL flood fill not recognizing boundary

I'm trying to implement the flood fill algorithm in OpenGL, but I'm encountering an error doing so. The error is that the algorithm does not stop at the boundary, and simply keeps going until the edge of the window, and eventually crashes with a bad memory access error. I'm working on MacOS Mojave 10.14.4.
I think my implementation's logic is correct, however, I've printed out the color of each pixel from getPixel, and it is always white (the background color), even when getting the color of boundary pixels.
The code below draws a circle using Bresenham's Line algorithm (midpoint algorithm), and then flood fills it (unsuccessfully).
#include <GLUT/GLUT.h>
#include <iostream>
struct Color {
GLubyte r;
GLubyte g;
GLubyte b;
};
Color getPixelColor(GLint x, GLint y) {
Color color;
glReadPixels(x, y, 1, 1, GL_RGB, GL_UNSIGNED_BYTE, &color);
return color;
}
void setPixel (GLint x, GLint y) {
glBegin(GL_POINTS);
glVertex2i(x, y);
glEnd();
Color color = getPixelColor(x, y);
}
void setPixelColor(GLint x, GLint y, Color color) {
glColor3ub(color.r, color.g, color.b);
setPixel(x, y);
glEnd();
glFlush();
}
void floodFill4 (GLint x, GLint y, Color fillColor, Color interiorColor) {
Color color = getPixelColor(x, y);
if (color.r == interiorColor.r && color.g == interiorColor.g &&
color.b == interiorColor.b) {
setPixelColor(x, y, fillColor);
floodFill4 (x + 1, y, fillColor, interiorColor);
floodFill4 (x - 1, y, fillColor, interiorColor);
floodFill4 (x, y + 1, fillColor, interiorColor);
floodFill4 (x, y - 1, fillColor, interiorColor);
}
}
void drawCirclePoint(GLint x, GLint y, GLint cx, GLint cy) {
setPixel(cx+x, cy+y);
setPixel(cx+y, cy+x);
setPixel(cx-y, cy+x);
setPixel(cx-x, cy+y);
setPixel(cx-x, cy-y);
setPixel(cx-y, cy-x);
setPixel(cx+y, cy-x);
setPixel(cx+x, cy-y);
}
void drawCircle(GLint cx, GLint cy, GLint radius) {
int p = 1 - radius;
GLint x = 0;
GLint y = radius;
while (x < y) {
drawCirclePoint(x, y, cx, cy);
if (p < 0) {
x++;
p += (2 * x) + 1;
} else {
x++;
y--;
p += (2 * x) + 1 - (2 * y);
}
}
}
void displayMe(void) {
glClear(GL_COLOR_BUFFER_BIT);
glColor3ub(0, 0, 0);
GLint cx = 0;
GLint cy = 0;
GLint radius = 200;
// Draw head
glColor3ub(0, 0, 0);
drawCircle(cx, cy, radius);
glEnd();
glFlush();
Color interiorColor = {255, 255, 255};
Color fillColor = {0, 0, 255};
// floodFill4(100, 100, fillColor, interiorColor);
}
void init (void) {
glClearColor(1.0, 1.0, 1.0, 0.0); // Set display-window color to white.
glMatrixMode(GL_PROJECTION); // Set projection parameters.
glLoadIdentity();
gluOrtho2D(-1000.0, 1000.0, -1000.0, 1000.0);
glMatrixMode(GL_MODELVIEW);
}
int main(int argc, char** argv) {
glutInit(&argc, argv);
glutInitDisplayMode(GLUT_SINGLE | GLUT_RGB);
glutInitWindowSize(1000, 1000);
glutInitWindowPosition(100, 100);
glutCreateWindow("My Drawing");
init();
glutDisplayFunc(displayMe);
glutMainLoop();
return 0;
}
I've looked at this post, which seems similar, but wasn't able to find a solution.
If you're going to persist with glVertex() for point plotting make sure you set up your matrix stack transforms (GL_PROJECTION/GL_MODELVIEW) so they match the glReadPixels() coordinate system:
glMatrixMode( GL_PROJECTION );
glLoadIdentity();
gluOrtho2D( 0, glutGet(GLUT_WINDOW_WIDTH), 0, glutGet(GLUT_WINDOW_HEIGHT) );
glMatrixMode( GL_MODELVIEW );
glLoadIdentity();
Or switch to glRasterPos() + glDrawPixels() for setPixel*().
Better yet, do the flood-fill logic host-side & upload the results to a GL texture for display.
Either way you're going to run into a stack overflow with that recursive solution on even fairly reasonably sized inputs so you'll probably want to switch to an explicit stack/queue:
void floodFill4( GLint aX, GLint aY, Color fillColor, Color interiorColor )
{
typedef std::pair< GLint, GLint > Location;
std::queue< Location > locations;
locations.push( Location( aX, aY ) );
while( !locations.empty() )
{
const Location loc = locations.front();
locations.pop();
GLint x = loc.first;
GLint y = loc.second;
Color color = getPixelColor( x, y );
if( color.r == interiorColor.r &&
color.g == interiorColor.g &&
color.b == interiorColor.b )
{
setPixelColor( x, y, fillColor );
locations.push( Location( x, y - 1 ) );
locations.push( Location( x, y + 1 ) );
locations.push( Location( x - 1, y ) );
locations.push( Location( x + 1, y ) );
}
}
}

C++ Directx 11 Trying to XMMatrixTranslation a cube

Trying to modify the cube position with the XMMatrixTranslation function. Attached is the code...
// Rotate and modify position of cube around the origin
g_World = XMMatrixRotationY( t );
g_World = XMMatrixMultiply(g_World, XMMatrixTranslation(0, yDirection, 0));
// Modify the color
g_vMeshColor.x = ( sinf( t * 1.0f ) + 1.0f ) * 0.5f;
g_vMeshColor.y = ( cosf( t * 3.0f ) + 1.0f ) * 0.5f;
g_vMeshColor.z = ( sinf( t * 5.0f ) + 1.0f ) * 0.5f;
//
// Clear the back buffer
//
float ClearColor[4] = { 0.0f, 0.125f, 0.3f, 1.0f }; // red, green, blue, alpha
g_pImmediateContext->ClearRenderTargetView( g_pRenderTargetView, ClearColor );
//
// Clear the depth buffer to 1.0 (max depth)
//
g_pImmediateContext->ClearDepthStencilView( g_pDepthStencilView, D3D11_CLEAR_DEPTH, 1.0f, 0 );
//
// Update variables that change once per frame
//
CBChangesEveryFrame cb;
cb.mWorld = XMMatrixTranspose( g_World );
cb.vMeshColor = g_vMeshColor;
g_pImmediateContext->UpdateSubresource( g_pCBChangesEveryFrame, 0, NULL, &cb, 0, 0 );
//
// Render the cube
//
g_pImmediateContext->VSSetShader( g_pVertexShader, NULL, 0 );
g_pImmediateContext->VSSetConstantBuffers( 0, 1, &g_pCBNeverChanges );
g_pImmediateContext->VSSetConstantBuffers( 1, 1, &g_pCBChangeOnResize );
g_pImmediateContext->VSSetConstantBuffers( 2, 1, &g_pCBChangesEveryFrame );
g_pImmediateContext->PSSetShader( g_pPixelShader, NULL, 0 );
g_pImmediateContext->PSSetConstantBuffers( 2, 1, &g_pCBChangesEveryFrame );
g_pImmediateContext->PSSetShaderResources( 0, 1, &g_pTextureRV );
g_pImmediateContext->PSSetSamplers( 0, 1, &g_pSamplerLinear );
g_pImmediateContext->DrawIndexed( 36, 0, 0 );
//
// Present our back buffer to our front buffer
//
g_pSwapChain->Present( 0, 0 );
When I press the down button the position of the cube is supposed to change in the negative y direction. The cube disappears when the I press the key.
Thanks,
Scott

Creating SpaceBattle: bottlenecks (~100 polygons, 200 FPS), optimization

I just started with DirectX learning and decided to write a simple classic game: SpaceBattle. But now I have some performance problems mainly related to Render (I guess not to Update). As a result: I have 150-200 FPS with maximum 80-100 polygons. If I comment Render method, FPS grows up to 2500-3000. So here is my Render method:
void DX11Game::Render()
{
if( d3dContext_ == 0 )
return;
stride = sizeof( VertexPT );
offset = 0;
d3dContext_->IASetInputLayout(inputLayout);
d3dContext_->IASetVertexBuffers(0, 1, &vertexBuffer, &stride, &offset);
d3dContext_->IASetPrimitiveTopology(D3D11_PRIMITIVE_TOPOLOGY_TRIANGLELIST);
d3dContext_->VSSetShader(VS, 0, 0);
d3dContext_->PSSetShader(PS, 0, 0);
d3dContext_->PSSetSamplers(0, 1, &samplerState);
d3dContext_->VSSetConstantBuffers(0, 1, &cbMatricesBuffer);
d3dContext_->PSSetConstantBuffers(1, 1, &cbParametersBuffer);
switch (DX11Game::gameState)
{
case GAME_STATE::GAME_STATE_PLAY:
d3dContext_->ClearRenderTargetView(backBufferTarget_, backColor);
DrawAsteroids();
DrawShip();
DrawUserBullets();
DrawEnemies();
DrawGUI();
break;
case GAME_STATE::GAME_STATE_GAMEOVER:
d3dContext_->ClearRenderTargetView(backBufferTarget_, backColor);
DrawAsteroids();
DrawShip();
DrawUserBullets();
DrawGameOver();
break;
case GAME_STATE::GAME_STATE_SCORES:
break;
}
swapChain_->Present(0, 0);
}
And here is one of the methods that draws game objects. Others Draws is similar to this:
void DX11Game::DrawAsteroids()
{
std::vector<std::shared_ptr<Asteroid>>* asteroids = gameLogic->GetAsteroids();
std::vector<std::shared_ptr<Asteroid>>::iterator it = asteroids->begin();
while (it != asteroids->end())
{
float SIZE = *((*it)->GetSize());
float* temp = (*it)->GetTexCoord();
XMFLOAT4 texBounds = XMFLOAT4( temp[0], temp[1], temp[2], temp[3] );
vertices[0].pos = XMFLOAT3( SIZE, SIZE, 1.0f );
vertices[0].tex0 = XMFLOAT2( texBounds.x + texBounds.z, texBounds.y );
vertices[1].pos = XMFLOAT3( SIZE, -SIZE, 1.0f );
vertices[1].tex0 = XMFLOAT2( texBounds.x + texBounds.z, texBounds.y + texBounds.w );
vertices[2].pos = XMFLOAT3( -SIZE, -SIZE, 1.0f );
vertices[2].tex0 =XMFLOAT2( texBounds.x,texBounds.y + texBounds.w );
vertices[3].pos = XMFLOAT3( -SIZE, -SIZE, 1.0f );
vertices[3].tex0 =XMFLOAT2( texBounds.x, texBounds.y + texBounds.w );
vertices[4].pos = XMFLOAT3( -SIZE, SIZE, 1.0f );
vertices[4].tex0 = XMFLOAT2( texBounds.x, texBounds.y );
vertices[5].pos = XMFLOAT3( SIZE, SIZE, 1.0f );
vertices[5].tex0 =XMFLOAT2( texBounds.x +texBounds.z, texBounds.y );
XMMATRIX world = XMMatrixTranslation(
( *it )->GetPosition()[0],
( *it )->GetPosition()[1],
0.0f );
world = XMMatrixTranspose(world);
d3dContext_->PSSetShaderResources(0, 1, &texGameSprites);
d3dContext_->UpdateSubresource(vertexBuffer, 0, 0, vertices, 0, 0);
CB_Matrices matrices;
matrices.World = world;
matrices.Projection = proj;
d3dContext_->UpdateSubresource(cbMatricesBuffer, 0, 0, &matrices, 0, 0);
CB_Parameters params;
params.isTextured = true;
params.color[0] = 1.0f; params.color[1] = 1.0f;
params.color[2] = 0.0f; params.color[3] = 1.0f;
d3dContext_->UpdateSubresource(cbParametersBuffer, 0, 0, &params, 0, 0);
d3dContext_->Draw(6, 0);
it++;
}
}
So what I have to do to optimize my game?
Thanks!

Turning picture upside down when using glDrawPixels

I'm getting really strange results when using glDrawPixels() in combination with glRasterPos2*() and glPixelZoom(). I've got a picture and I'd like to show it like old framebuffers used to, i.e. (0,0) being in the topleft corner. Here's the code:
void GLWidget::resizeGL( int w, int h )
{
glViewport( 0, 0, w, h );
glMatrixMode( GL_PROJECTION );
glLoadIdentity();
glOrtho( 0, w, 0, h, -1, 1 );
}
void GLWidget::paintGL()
{
for( int i = 0; i < SCREEN_WIDTH; ++i )
{
displayBitmap[i] = 0xf81f;
}
for( int i = 239 * SCREEN_WIDTH; i < 239 * SCREEN_WIDTH + SCREEN_WIDTH; ++i )
{
displayBitmap[i] = 0xf800;
}
//glRasterPos2f( 0, SCREEN_HEIGHT - 0.1 );
glRasterPos2i( 0, SCREEN_HEIGHT - 1 );
glPixelZoom( 1, -1 );
glPixelStorei( GL_UNPACK_ALIGNMENT, 1 );
glDrawPixels( SCREEN_WIDTH, SCREEN_HEIGHT, GL_RGB, GL_UNSIGNED_SHORT_5_6_5, displayBitmap );
}
If executed as it is now, I'm getting the violet line at (0,1), i.e. one black line, then the violet line and then, in the invisible bottom area, is the red line.
OK, so I change
glRasterPos2i( 0, SCREEN_HEIGHT - 1 );
to
glRasterPos2i( 0, SCREEN_HEIGHT );
Nope, output is corrupted. Strangely, if I change it to:
glRasterPos2f( 0, SCREEN_HEIGHT - 0.1 );
it works, both lines are drawn, in the correct order (SCREEN_HEIGHT and SCREEN_HEIGHT - 1.0 lead to the same result as in the integer version).
What I'm doing wrong here? SCREEN_WIDTH = 320, SCREEN_HEIGHT = 240.
This works for me:
void display() {
unsigned char *pixels=captureScreenRegion(0,0,window_width,window_height);
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
glRasterPos2f(-1,1);
glPixelZoom( 1, -1 );
glDrawPixels(window_width,window_height,GL_RGB,GL_BYTE,pixels);
free(pixels);
glutSwapBuffers();
}
glDrawPixels is only poorly supported on consumer grade hardware. And in newer OpenGL versions it has been removed entirely. Instead of spending time trying to get this to work, just load your image into a texture and draw a textured quad with a trivial shader with it.
Trust me, messing around with glDrawPixels is not worth the effort.
I meet the same problem as you do. I solved it this way:
void VideoRenderWidget::resizeGL(int w, int h)
{
glViewport( 0, 0, w, h );
glMatrixMode( GL_PROJECTION );
glLoadIdentity();
glOrtho( 0, w, 0, h, 0.1, 1 );
glPixelZoom( 1, -1 );
glRasterPos3f(0, h - 1, -0.3);
}
void VideoRenderWidget::paintGL()
{
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
glDrawPixels(704, 576, GL_RGB, GL_UNSIGNED_BYTE, buffer_.data_);
}
This worked for me.. I am still developing this.. here 704 is video width and 576 is height.

glReadPixels GL_DEPTH_COMPONENT and color

How to get the depth and the color information out of any OpenGL drawing? I would like to save a depth image and a color image to the disk. What i tried is the following:
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
glEnable(GL_DEPTH_TEST);
glBegin(GL_POINTS);
glColor3f(1.0f,1.0f,1.0f);
for(int i=0; i<mesh->vertices.size();i++) {
if(! mesh->colors.empty()) {
glColor3f(mesh->colors[i][0],mesh->colors[i][1],mesh->colors[i][2]);
}
float x= mesh->vertices[i][0];
float y= mesh->vertices[i][1];
float z = mesh->vertices[i][2];
glVertex3f(x, y, z);
}
glEnd();
glFlush();
glFinish();
int width = 1280;
int height = 960;
GLfloat* depths;
depths = new GLfloat[ width * height ];
GLfloat * color;
color = new GLfloat[width * height];
glReadPixels (0, 0, width, height, GL_DEPTH_COMPONENT, GL_FLOAT, depths);
glReadPixels (0, 0, width, height, GL_BLUE, GL_FLOAT, color);
But it looks like only the depths array is filled?
For saving the render result in image, you must save colorbuffer information (not directly from depth buffer).
You can provide separate passes for color (to colorbuffer) and depth to same colorbuffer. And simple use glReadPixels two times, first after rendering color to colorbuffer and second after rendering depth in colorbuffer.
For write color and depth simultaneously in two separate color buffers by one pass you can use MRT ( Multiple Render Targets ), tutorial - http://www.opengl-tutorial.org/intermediate-tutorials/tutorial-14-render-to-texture/ .
I would choose MRT. :) After that you can save your results by using glReadPixels like in two passes technic.
But first you must setup from which colorbuffer you want read pixels by using glReadBuffer, default colorbuffer is GL_BACK, which mean default OpenGL context backbuffer. By using MRT you must use one of GL_COLOR_ATTACHMENTi for write in to colorbuffers and it also can be one of glReadBuffer value.
So, just simple setup glReadBuffer with one of GL_COLOR_ATTACHMENTi and use glReadPixels.
Try this:
#include <GL/freeglut.h>
#include <vector>
#include <sstream>
int mx = 0, my = 0;
void passiveMotion( int x, int y )
{
mx = x;
my = glutGet( GLUT_WINDOW_HEIGHT ) - y;
glutPostRedisplay();
}
void display()
{
glEnable(GL_DEPTH_TEST);
glClearColor( 0, 0, 0, 1 );
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
glMatrixMode(GL_PROJECTION);
glLoadIdentity();
const int w = glutGet( GLUT_WINDOW_WIDTH );
const int h = glutGet( GLUT_WINDOW_HEIGHT );
const double ar = (double)w / (double)h;
glOrtho( -10 * ar, 10 * ar, -10, 10, -10, 10 );
glMatrixMode(GL_MODELVIEW);
glLoadIdentity();
glColor3ub(0,255,0);
glPushMatrix();
glTranslated(2,2,-5);
glScalef(5,5,5);
glBegin(GL_QUADS);
glVertex2f(-1,-1);
glVertex2f(1,-1);
glVertex2f(1,1);
glVertex2f(-1,1);
glEnd();
glPopMatrix();
glColor3ub(255,0,0);
glPushMatrix();
glTranslated(0,0,0);
glScalef(5,5,5);
glBegin(GL_QUADS);
glVertex2f(-1,-1);
glVertex2f(1,-1);
glVertex2f(1,1);
glVertex2f(-1,1);
glEnd();
glPopMatrix();
glMatrixMode(GL_PROJECTION);
glLoadIdentity();
glOrtho( 0, w, 0, h, -1, 1 );
glMatrixMode(GL_MODELVIEW);
glLoadIdentity();
// print depth
{
GLfloat depth = 0.0f;
glReadPixels( mx, my, 1, 1, GL_DEPTH_COMPONENT, GL_FLOAT, &depth );
std::ostringstream oss;
oss << "Depth: " << depth;
glColor3ub( 255, 255, 255 );
glRasterPos2i( 10, 10 );
glutBitmapString( GLUT_BITMAP_9_BY_15, (const unsigned char*)oss.str().c_str() );
}
// print color
{
GLubyte color[4];
glReadPixels( mx, my, 1, 1, GL_RGBA, GL_UNSIGNED_BYTE, color );
std::ostringstream oss;
oss << "Color:"
<< " " << (unsigned int)color[0]
<< " " << (unsigned int)color[1]
<< " " << (unsigned int)color[2]
<< " " << (unsigned int)color[3];
glColor3ub( 255, 255, 255 );
glRasterPos2i( 10, 25 );
glutBitmapString( GLUT_BITMAP_9_BY_15, (const unsigned char*)oss.str().c_str() );
}
glutSwapBuffers();
}
int main( int argc, char** argv )
{
glutInit(&argc, argv);
glutInitDisplayMode(GLUT_RGBA | GLUT_DEPTH | GLUT_DOUBLE);
glutInitWindowSize(400,400);
glutCreateWindow("GLUT");
glutDisplayFunc( display );
glutPassiveMotionFunc( passiveMotion );
glutMainLoop();
return 0;
}