I am trying to write a simple blur fragment shader that supports float NaN values, but it is giving me unexpected results on OpenGL. I use floating point textures and set the green channel to 1 if the pixel is NaN, otherwise set the red channel to the float value.
On DirectX, it works fine, but on OpenGL it returns black instead of the green I am expecting for NaN values. I tested on Ubuntu with a GTX 1060 and Windows 10 with a GTX 750
DirectX output, expected result:
OpenGL output, incorrect result:
CG code:
Shader "FX/DepthBlur_R"
{
Properties
{
_MainTex ("Texture", 2D) = "white" {}
}
SubShader
{
// No culling or depth
Cull Off ZWrite Off ZTest Always
Pass
{
CGPROGRAM
#pragma vertex vert
#pragma fragment frag
#include "UnityCG.cginc"
struct appdata
{
float4 vertex : POSITION;
float2 uv : TEXCOORD0;
};
struct v2f
{
float2 uv : TEXCOORD0;
float4 vertex : SV_POSITION;
};
v2f vert (appdata v)
{
v2f o;
o.vertex = UnityObjectToClipPos(v.vertex);
o.uv = v.uv;
return o;
}
sampler2D _MainTex;
float4 _MainTex_TexelSize;
float4 frag (v2f i) : SV_Target
{
float2 col = tex2D(_MainTex, i.uv).rg;
float2 up = tex2D(_MainTex, i.uv + fixed2(0, _MainTex_TexelSize.y)).rg;
float2 down = tex2D(_MainTex, i.uv - fixed2(0, _MainTex_TexelSize.y)).rg;
float2 left = tex2D(_MainTex, i.uv - fixed2(_MainTex_TexelSize.x, 0)).rg;
float2 right = tex2D(_MainTex, i.uv + fixed2(_MainTex_TexelSize.x, 0)).rg;
int count = 0;
float sides = 0;
if (up.g < 0.1) { sides += up.r; count++; }
if (down.g < 0.1) { sides += down.r; count++; }
if (left.g < 0.1) { sides += left.r; count++; }
if (right.g < 0.1) { sides += right.r; count++; }
sides = sides / count;
float4 ret;
if (count == 0 && col.g > 0.1) { // Nothing valid
ret = fixed4(0, 1, 0, 1);
}
else if (count == 0) { // Only col is valid
ret = float4(col.r, 0, 0, 1);
}
else if (col.g > 0.1) { // Only sides are valid
ret = float4(sides, 0, 0, 1);
}
else {
ret = float4(((col.r + sides) / 2), 0, 0, 1);
}
return ret;
}
ENDCG
}
}
}
The result is also strange when I switched to debug colors, see the comments in the else section:
if (count == 0 && col.g > 0.1) { // Nothing valid
return fixed4(1, 0, 0, 1);
}
else if (count == 0) { // Only col is valid
return fixed4(0, 1, 0, 1);
}
else if (col.g > 0.1) { // Only sides are valid
return fixed4(0, 0, 1, 1);
}
else {
//Uncommenting this line gives the expected result
return fixed4(0, 0, 0, 1);
//Uncommenting this line results in blue for the "Nothing valid" section
//return float4(0, 0, 0, ((col.r + sides) / 2));
}
Somehow switching count from an int to a float fixed the issue. Does anyone know what might be happening to cause it?
Related
I am working on an assignment where I need to ray trace a sphere with a plane (floor). I have the sphere but I am having trouble with the plane. I use the ray-plane intersection formula:
t = -(o-p).n / d.n. I have this in Plane.h, however when I run my code I get errors from Ray.h. Could someone explain what I'm doing wrong? Any help would be appreciated.
Plane.h
`#include "..\..\raytrace\Ray.h"
class Plane
{
using Colour = cv::Vec3b; // RGB Value
private:
Vec3 normal_;
Vec3 distance_;
Colour color_;
public:
Plane();
Plane(Vec3 norm, Vec3 dis, Colour color) : normal_(norm), distance_(dis), color_(color) {}
Vec3 norm() const {
return normal_;
}
Vec3 dis() const {
return distance_;
}
Colour color() const {
return color_;
}
float findIntersection(Ray ray) {
Vec3 rayDirection = ray.mPosition();
float denominator = rayDirection.dot(normal_);
if (denominator == 0) {
return false;
}
else {
//mPosition() is origin in Ray.h
float t = -(((ray.mPosition() - distance_)).dot(normal_)) / denominator;
}
}
};
`
Ray.h
#include <Eigen/Dense>
#include <Eigen/Geometry>
#include <cmath>
#include "Image.h"
// Data types
typedef float Scalar; //**custom datatype: Scalar is float
typedef Eigen::Matrix<Scalar, 3, 1> Vec3; //***Vec3 is a custom datatype (specific kind)
typedef Eigen::Matrix<Scalar, 2, 1> Vec2;
typedef unsigned char uchar;
class Ray
{
private:
Vec3 mPosition_; //point
public:
Ray() {}
//constuctor, when we construct ray we get mPosition_
Ray(Vec3 mPosition) : mPosition_(mPosition) {
//
}
float t;
Vec3 mPosition() const {
return mPosition_;
}
public:
inline Vec3 generateRay(Vec3 const& pt) {
Vec3 origin = mPosition_;
Vec3 direction = pt - mPosition_; // d = s -e, pt is pixel Position
direction.normalize();
return pt + t * direction;
}
};
main.cpp
#include <cmath>
#include "Image.h"
#include "Ray.h"
#include "../build/raytrace/Plane.h"
//Color functions
using Colour = cv::Vec3b; // RGB Value
//Color is a Vec3b datatype, use Color instead of Vec3b, it has 3 vectors, hold 3 values b/w 0-255
Colour red() { return Colour(255, 0, 0); }
Colour green() { return Colour(0, 255,0); }
Colour blue() { return Colour(0, 0, 255); }
Colour white() { return Colour(255, 255, 255); }
Colour black() { return Colour(0, 0, 0); }
//bounding the channel wise pixel color between 0 to 255
//bounding the color value, if a value is beyond 255 clamp it to 255, and any value below 0 clamp to 0.
uchar Clamp(int color)
{
if (color < 0) return 0;
if (color >= 255) return 255;
return color;
}
int main(int, char**){
//Create an image object with 500 x 500 resolution.
Image image = Image(500, 500);
//Coordinates of image rectangle
Vec3 llc = Vec3(-1, -1, -1); //**llc - lower left corner
Vec3 urc = Vec3(1, 1, -1); //**urc - upper right corner
int width = urc(0) - llc(0);
int height = urc(1) - llc(1);
Vec2 pixelUV = Vec2((float)width / image.cols, (float)height / image.rows);
/// TODO: define camera position (view point), sphere center, sphere radius (Weightage: 5%)
Vec3 CameraPoint = Vec3(0, 0, 0); //**it is the origin
Vec3 SphereCenter = Vec3(0, 0, -5); //**it is the Sphere Position
float SphereRadius = 2.0;
Vec3 LightSource = Vec3(2.0, 0.0, 3.0); //**
Vec3 ambient = Vec3(0, 0, 0.5); //**
Vec3 diffuse = Vec3(224, 180, 255); //** 0, 255, 100 - green
Vec3 Origin = CameraPoint;
//end
for (int row = 0; row < image.rows; ++row) {
for (int col = 0; col < image.cols; ++col) {
//TODO: Build primary rays
//Find the pixel position (PixelPos) for each row and col and then construct the vector PixelPos-Origin
Vec3 pixelPos = Vec3(llc(0) + pixelUV(0) * (col + 0.5), llc(1) + pixelUV(1) * (row + 0.5), -1);
//create a ray object
Ray r; //**
//Vec3 rayDir = pixelPos - Origin; //**direction of the ray
Vec3 rayDir = r.generateRay(pixelPos); //**pixelPos-Origin
rayDir.normalize(); //**normalize the ray direction vector
//Ray-sphere intersection...(refer to the lecture slides and Section 4.4.1 of the textbook)
float a = rayDir.dot(rayDir);
Vec3 s0_r0 = Origin - SphereCenter; //***s0_r0 - sphere center - ray origin
float b = 2.0 * rayDir.dot(s0_r0);
float c = s0_r0.dot(s0_r0) - pow(SphereRadius, 2);
//compute the discriminant
float discriminant = pow(b, 2) - 4 * a * c;
//if the discriminant is greater than zero
if(discriminant > 0){
//find roots t1 and t2
float t1 = (-b - sqrt((pow(b, 2)) - 4.0 * a * c)) / (2.0 * a); //**
float t2 = (-b + sqrt((pow(b, 2)) - 4.0 * a * c)) / (2.0 * a); //**
//determine which one is the real intersection point
float t;
//Sphere s;
if (t1 < t2 && (t1 > 0 && t2 > 0)) {
t = t1;
//} //should this be after the if-statement below, so that it uses t = t1 and not just float t.????
if (t > 0) {
//Shade the pixel, normal is Intersection - SphereCenter, LightVector is LightSource- Intersection, make sure to normalize the vectors
Vec3 Intersection = Origin + (t * rayDir);
Vec3 Normal = Intersection - SphereCenter; //** normalize
Normal.normalize(); //**
Vec3 LightVector = LightSource - Intersection; //**normalize
LightVector.normalize(); //**
float diffuseTerm = LightVector.dot(Normal);
if (diffuseTerm < 0) diffuseTerm = 0;
Colour colour(0, 0, 0); //The ambient base
colour[0] = Clamp(ambient[0] + diffuse[0] * diffuseTerm);
colour[1] = Clamp(ambient[1] + diffuse[1] * diffuseTerm);
colour[2] = Clamp(ambient[2] + diffuse[2] * diffuseTerm);
image(row, col) = colour;
}
}//
else {
image(row, col) = black();
}
} else {
//No intersection, discriminant < 0
image(row, col) = red(); //**makes blue background colour
}
////**Plane intersection
//create a plane object
Plane plane(Vec3(-5, 0, -4), Vec3(0, 0, -1), black());
//Plane plane;
////if ray hits plane -> color black
//if (plane.findIntersection(rayDir) == 1) {
// image(row, col) = black();
//}
//else {
// image(row, col) = white();
//}
}
}
/// Required outputs: (1) Ray traced image of a sphere (2) Ray traced image when the camera is placed inside the sphere (complete black)
image.save("./result.png");
image.display();
return EXIT_SUCCESS;
}
Errors
enter image description here
#include is a shockingly simple directive. It literally just copy-pastes the content of the file.
main.cpp includes both Ray.h and Plane.h, and Plane.h includes Ray.h, so Ray.h ends up being included twice. That's why the compiler is complaining about a "class redefinition".
You can add #pragma once at the top of all your header files to let the compiler know know to skip the file if it was included already.
N.B. #pragma once is not officially part of the language, but it is supported by all compilers and has a few small advantages over the alternative.
This is what happens when I draw switching from the black texture to the lime green one in a simple for loop. It seems to have bits from the previously drawn texture.
Here's a simplified version of how my renderer works
Init(): Create my VAO and attrib pointers and generate element buffer and indicies
Begin(): Bind my vertex buffer and map the buffer pointer
Draw(): Submit a renderable to draw which gets 4 vertecies in the vertex buffer each get a position, color, texCoords, and a Texture Slot
End(): I delete the buffer pointer, bind my VAO, IBO, and textures to their active texture slots and draw the elements.
I do this every frame (except init). What I don't understand is if I draw PER TEXTURE, only having one active then this doesn't happen. It's when I have multiple active textures and they are bound.
Here's my renderer
void Renderer2D::Init()
{
m_Textures.reserve(32);
m_VertexBuffer.Create(nullptr, VERTEX_BUFFER_SIZE);
m_Layout.PushFloat(2); //Position
m_Layout.PushUChar(4); //Color
m_Layout.PushFloat(2); //TexCoords
m_Layout.PushFloat(1); //Texture ID
//VA is bound and VB is unbound
m_VertexArray.AddBuffer(m_VertexBuffer, m_Layout);
unsigned int* indices = new unsigned int[INDEX_COUNT];
int offset = 0;
for (int i = 0; i < INDEX_COUNT; i += 6)
{
indices[i + 0] = offset + 0;
indices[i + 1] = offset + 1;
indices[i + 2] = offset + 2;
indices[i + 3] = offset + 2;
indices[i + 4] = offset + 3;
indices[i + 5] = offset + 0;
offset += 4;
}
m_IndexBuffer.Create(indices, INDEX_COUNT);
m_VertexArray.Unbind();
}
void Renderer2D::Begin()
{
m_VertexBuffer.Bind();
m_Buffer = (VertexData*)m_VertexBuffer.GetBufferPointer();
}
void Renderer2D::Draw(Renderable2D& renderable)
{
const glm::vec2& position = renderable.GetPosition();
const glm::vec2& size = renderable.GetSize();
const Color& color = renderable.GetColor();
const glm::vec4& texCoords = renderable.GetTextureRect();
const float tid = AddTexture(renderable.GetTexture());
DT_CORE_ASSERT(tid != 0, "TID IS EQUAL TO ZERO");
m_Buffer->position = glm::vec2(position.x, position.y);
m_Buffer->color = color;
m_Buffer->texCoord = glm::vec2(texCoords.x, texCoords.y);
m_Buffer->tid = tid;
m_Buffer++;
m_Buffer->position = glm::vec2(position.x + size.x, position.y);
m_Buffer->color = color;
m_Buffer->texCoord = glm::vec2(texCoords.z, texCoords.y);
m_Buffer->tid = tid;
m_Buffer++;
m_Buffer->position = glm::vec2(position.x + size.x, position.y + size.y);
m_Buffer->color = color;
m_Buffer->texCoord = glm::vec2(texCoords.z, texCoords.w);
m_Buffer->tid = tid;
m_Buffer++;
m_Buffer->position = glm::vec2(position.x, position.y + size.y);
m_Buffer->color = color;
m_Buffer->texCoord = glm::vec2(texCoords.x, texCoords.w);
m_Buffer->tid = tid;
m_Buffer++;
m_IndexCount += 6;
}
void Renderer2D::End()
{
Flush();
}
const float Renderer2D::AddTexture(const Texture2D* texture)
{
for (int i = 0; i < m_Textures.size(); i++) {
if (texture == m_Textures[i]) // Compares memory addresses
return i + 1; // Returns the texture id plus one since 0 is null texture id
}
// If the texture count is already at or greater than max textures
if (m_Textures.size() >= MAX_TEXTURES)
{
End();
Begin();
}
m_Textures.push_back((Texture2D*)texture);
return m_Textures.size();
}
void Renderer2D::Flush()
{
m_VertexBuffer.DeleteBufferPointer();
m_VertexArray.Bind();
m_IndexBuffer.Bind();
for (int i = 0; i < m_Textures.size(); i++) {
glActiveTexture(GL_TEXTURE0 + i);
m_Textures[i]->Bind();
}
glDrawElements(GL_TRIANGLES, m_IndexCount, GL_UNSIGNED_INT, NULL);
m_IndexBuffer.Unbind();
m_VertexArray.Unbind();
m_IndexCount = 0;
m_Textures.clear();
}
Here's my fragment shader
#version 330 core
out vec4 FragColor;
in vec4 ourColor;
in vec2 ourTexCoord;
in float ourTid;
uniform sampler2D textures[32];
void main()
{
vec4 texColor = ourColor;
if(ourTid > 0.0)
{
int tid = int(ourTid - 0.5);
texColor = ourColor * texture(textures[tid], ourTexCoord);
}
FragColor = texColor;
}
I appreciate any help, let me know if you need to see more code
i don't know if you need this anymore but for the the record
you have a logical problem in your fragment code
let's think if your "ourTid" bigger than 0 let's take 1.0f for example
you subtract 0.5f , we cast it to int(0.5) it's 0 for sure now let's say that we need the texture number 2 and do the same process 2-0.5 = 1.5 "cast it to int" = 1
definitely you will have the previous texture every time
now the solution is easy you should add 0.5 instead of subtract it to be sure that the numbers interpolation is avoided and you got the correct texture.
I created some mountains and a grid-floor on a synthwave style using OpenGL. The only effect created on post-process is slight bloom, but something is wrong with lines:
Lines quickly begin to be drawn very poorly when i look further. sometimes they are not drawn , or just a piece. These lines are even drawn differently if i rotate the camera.
And it get worse the more i stick the camera to the floor:
I tried several things : disable anti-alisiasing of my GC (NVidia), set every filter texture to GL_LINEAR instead of GL_NEAREST, a 32 bits precision of depth buffer instead of 24. None of them worked.
What could be wrong?
Here's the code , i tried to remove as much code as i could
The init function:
void initBase(int argc, char* argv[]) {
YLogConsole::createInstance();
glutInit(&argc, argv);
glutSetOption(
GLUT_ACTION_ON_WINDOW_CLOSE,
GLUT_ACTION_GLUTMAINLOOP_RETURNS
);
glutInitWindowSize(BaseWidth, BaseHeight);
glutInitWindowPosition(0, 0);
glutInitDisplayMode(GLUT_DEPTH | GLUT_DOUBLE | GLUT_RGBA);
YLog::log(YLog::ENGINE_INFO, (toString(argc) + " arguments en ligne de commande.").c_str());
FullScreen = false;
for (int i = 0; i<argc; i++)
{
if (argv[i][0] == 'f')
{
YLog::log(YLog::ENGINE_INFO, "Arg f mode fullscreen.\n");
FullScreen = true;
}
}
MainWindowId = glutCreateWindow("Yocto");
glutReshapeWindow(BaseWidth, BaseHeight);
setFullScreen(FullScreen);
if (MainWindowId < 1)
{
YLog::log(YLog::ENGINE_ERROR, "Erreur creation de la fenetre.");
exit(EXIT_FAILURE);
}
GLenum glewInitResult = glewInit();
if (glewInitResult != GLEW_OK)
{
YLog::log(YLog::ENGINE_ERROR, ("Erreur init glew " + std::string((char*)glewGetErrorString(glewInitResult))).c_str());
exit(EXIT_FAILURE);
}
//Affichage des capacités du système
YLog::log(YLog::ENGINE_INFO, ("OpenGL Version : " + std::string((char*)glGetString(GL_VERSION))).c_str());
glutDisplayFunc(updateBase);
glutReshapeFunc(resizeBase);
glutKeyboardFunc(keyboardDown);
glutKeyboardUpFunc(keyboardUp);
glutSpecialFunc(specialDown);
glutSpecialUpFunc(specialUp);
glutMouseFunc(mouseClick);
glutMotionFunc(mouseMoveActive);
glutPassiveMotionFunc(mouseMovePassive);
glutIgnoreKeyRepeat(1);
//Initialisation du YRenderer
Renderer = YRenderer::getInstance();
Renderer->setRenderObjectFun(renderObjectsBase);
Renderer->setRender2DFun(render2dBase);
Renderer->setBackgroundColor(YColor());
Renderer->initialise(&TimerGPURender);
//On applique la config du YRenderer
glViewport(0, 0, Renderer->ScreenWidth, Renderer->ScreenHeight);
Renderer->resize(Renderer->ScreenWidth, Renderer->ScreenHeight);
//Ecrans de jeu
ScreenManager = new GUIScreenManager();
uint16 x = 10;
uint16 y = 10;
ScreenJeu = new GUIScreen();
ScreenStats = new GUIScreen();
//Bouton pour afficher les params
GUIBouton * btn = new GUIBouton();
btn->Titre = std::string("Params");
btn->X = x;
btn->Y = y;
btn->setOnClick(clickBtnParams);
ScreenJeu->addElement(btn);
y += btn->Height + 5;
btn = new GUIBouton();
btn->Titre = std::string("Stats");
btn->X = x;
btn->Y = y;
btn->setOnClick(clickBtnStats);
ScreenJeu->addElement(btn);
y += btn->Height + 1;
//Ecran de stats
y = btn->Height + 15;
LblFps = new GUILabel();
LblFps->Text = "FPS";
LblFps->X = x;
LblFps->Y = y;
LblFps->Visible = true;
ScreenStats->addElement(LblFps);
//Ecran de parametrage
x = 10;
y = 10;
ScreenParams = new GUIScreen();
GUIBouton * btnClose = new GUIBouton();
btnClose->Titre = std::string("Close");
btnClose->X = x;
btnClose->Y = y;
btnClose->setOnClick(clickBtnClose);
ScreenParams->addElement(btnClose);
ScreenStats->addElement(btnClose);
//Ecran a rendre
ScreenManager->setActiveScreen(ScreenJeu);
//Init YCamera
Renderer->Camera->setPosition(YVec3f(320, 320, 320));
Renderer->Camera->setLookAt(YVec3f(0, 0, 0));
Renderer->Camera->setProjectionPerspective(Instance->Fov,
(float)Instance->Renderer->ScreenWidth / (float)Instance->Renderer->ScreenHeight,
Instance->NearPlane, Instance->FarPlane);
//Init YTimer
Timer = new YTimer();
//Chargement des shaders
Instance->loadShaders();
//Init pour classe fille
init();
//On start le temps
Timer->start();
YLog::log(YLog::ENGINE_INFO, "[ Yocto initialized ]\nPress : \n - f to toggle fullscreen\n - F1 for png screen shot\n - F5 to hot-reload shaders");
}
The main loop:
void SynthEngine::renderObjects()
{
Renderer->updateMatricesFromOgl();
glUseProgram(shaderWorld);
Renderer->sendMatricesToShader(shaderWorld);
dec->getGround()->render();
}
UpdateMatriceFromOgl:
void updateMatricesFromOgl() {
float matMvTab[16];
glGetFloatv(GL_MODELVIEW_MATRIX, matMvTab);
memcpy(MatMV.Mat.t, matMvTab, 16 * sizeof(float));
MatMV.transpose();
float matProjTab[16];
glGetFloatv(GL_PROJECTION_MATRIX, matProjTab);
memcpy(MatP.Mat.t, matProjTab, 16 * sizeof(float));
MatP.transpose();
MatMVP = MatP;
MatMVP *= MatMV;
MatV.createViewMatrix(Camera->Position, Camera->LookAt, Camera->UpVec);
MatIV = MatV;
MatIV.invert();
MatM = MatIV;
MatM *= MatMV;
MatIM = MatM;
MatIM.invert();
MatNorm = MatM;
MatNorm.invert();
MatNorm.transpose();
MatIP = MatP;
MatIP.invert();
}
The render function (VBO) , textureIndex and textureCubeIndex are always 0:
void YVbo::render(GBuffer * inBuffer) {
//La stat globales
YRenderer::NbVBOFacesRendered += NbVertices / 3;
if (textureIndex)
{
glBindTexture(GL_TEXTURE_2D, textureIndex);
}
if (textureCubeIndex)
{
glBindTexture(GL_TEXTURE_CUBE_MAP, textureCubeIndex);
}
glBindVertexArray(VAO);
glBindBuffer(GL_ARRAY_BUFFER, VBO);
for (int i = 0; i<NbElements; i++)
glEnableVertexAttribArray(i);
if (StorageMethod == PACK_BY_ELEMENT_TYPE) {
for (int i = 0; i<NbElements; i++)
glVertexAttribPointer(i, Elements[i].NbFloats, GL_FLOAT, GL_FALSE, 0, (void*)(Elements[i].OffsetFloats * sizeof(float)));
} else {
for (int i = 0; i<NbElements; i++)
glVertexAttribPointer(i, Elements[i].NbFloats, GL_FLOAT, GL_FALSE, TotalNbFloatForOneVertice * sizeof(float), (void*)(Elements[i].OffsetFloats * sizeof(float)));
}
YEngine::Instance->TimerGPURender.startAccumPeriod();
glDrawArrays(GL_TRIANGLES, 0, NbVertices);
YEngine::Instance->TimerGPURender.endAccumPeriod();
glBindBuffer(GL_ARRAY_BUFFER, 0);
glBindVertexArray(0);
glBindTexture(GL_TEXTURE_2D, 0);
glBindTexture(GL_TEXTURE_CUBE_MAP, 0);
}
vertex shader of ShaderWorld :
#version 400
uniform mat4 mvp;
uniform float elapsed;
layout(location = 0) in vec3 position_in;
layout(location = 1) in vec4 color_border_in;
layout(location = 2) in vec4 color_fill_in;
out VertexAttrib
{
vec4 colorFill;
vec4 colorBorder;
} vertex;
void main()
{
gl_Position = mvp * vec4(position_in, 1);
vertex.colorBorder = color_border_in;
vertex.colorFill = color_fill_in;
}
geometry shader
#version 400
out vec4 color_border;
out vec4 color_fill;
out vec3 bary;
in VertexAttrib
{
vec4 colorFill;
vec4 colorBorder;
} vertex[];
layout(triangles) in;
layout(triangle_strip, max_vertices = 3) out;
void main()
{
for (int i = 0; i < 3; i++)
{
color_border = vertex[i].colorBorder;
color_fill = vertex[i].colorFill;
gl_Position = gl_in[i].gl_Position;
if (i == 0)
bary = vec3(0, 0, 1);
if (i == 1)
bary = vec3(0, 1, 0);
if (i == 2)
bary = vec3(1, 0, 0);
EmitVertex();
}
EndPrimitive();
}
fragment shader :
#version 400
in vec4 color_border;
in vec4 color_fill;
in vec3 bary;
layout (location = 0) out vec4 color;
layout (location = 1) out vec4 passColor;
float toleranceLight = 0.7;
void main()
{
vec4 interColor;
if ((bary.x) < 0.01 || (bary.y) < 0.01 || ((bary.z) < 0.01 && color_border.r == 0))
{
interColor = color_border;
}
else
{
interColor = color_fill;
}
if (max(interColor.r,max(interColor.g, interColor.b)) > toleranceLight)
{
passColor = interColor;
}
else
{
passColor = vec4(0,0,0,1);
}
color = interColor;
}
Your main issue here is the perspective interpolation of vec3 bary and its boolean nature causing artifacts around the edges of color_border and color_fill. Consider some sort of interpolation of between the edge and fill color based on the bary interpolant.
Alternatively, you can consider mapping a mask texture indicating edges vs. fill. You'll need ensure that you generate mipmaps and use it at runtime with an anisotropic filter.
On a separate note, you don't need a geometry shader at all in this case. Just use gl_VertexID % 3 directly from the vertex shader and output bary from there.
I am attempting to do some processing in the pixel shader on a texture. The data for the texture is coming from a memory chunk of 8 bit data. The problem I am facing is how to read the data in the shader.
Code to create the texture and ressource view:
In OnD3D11CreateDevice:
D3D11_TEXTURE2D_DESC tDesc;
tDesc.Height = 480;
tDesc.Width = 640;
tDesc.Usage = D3D11_USAGE_DYNAMIC;
tDesc.MipLevels = 1;
tDesc.ArraySize = 1;
tDesc.SampleDesc.Count = 1;
tDesc.SampleDesc.Quality = 0;
tDesc.Format = DXGI_FORMAT_R8_UINT;
tDesc.CPUAccessFlags = D3D11_CPU_ACCESS_WRITE;
tDesc.BindFlags = D3D11_BIND_SHADER_RESOURCE;
tDesc.MiscFlags = 0;
V_RETURN(pd3dDevice->CreateTexture2D(&tDesc, NULL, &g_pCurrentImage));
D3D11_SHADER_RESOURCE_VIEW_DESC rvDesc;
g_pCurrentImage->GetDesc(&tDesc);
rvDesc.Format = DXGI_FORMAT_R8_UINT;
rvDesc.Texture2D.MipLevels = tDesc.MipLevels;
rvDesc.Texture2D.MostDetailedMip = tDesc.MipLevels - 1;
rvDesc.ViewDimension = D3D_SRV_DIMENSION_TEXTURE2D;
V_RETURN(pd3dDevice->CreateShaderResourceView(g_pCurrentImage, &rvDesc, &g_pImageRV)); </code>
in OnD3D11FrameRender:
HRESULT okay;
if( !g_updateDone ) {
D3D11_MAPPED_SUBRESOURCE resource;
resource.pData = mImage.GetData();
resource.RowPitch = 640;
resource.DepthPitch = 1;
okay = pd3dImmediateContext->Map(g_pCurrentImage, 0, D3D11_MAP_WRITE_DISCARD, 0, &resource);
g_updateDone = true;
}
pd3dImmediateContext->PSSetShaderResources(0, 1, &g_pImageRV);
This returns no errors so far, everything seems to work.
The HLSL Shader:
//-----
// Textures and Samplers
//-----
Texture2D <int> g_txDiffuse : register( t0 );
SamplerState g_samLinear : register( s0 );
//-----
// shader input/output structure
//-----
struct VS_INPUT
{
float4 Position : POSITION; // vertex position
float2 TextureUV : TEXCOORD0;// vertex texture coords
};
struct VS_OUTPUT
{
float4 Position : SV_POSITION; // vertex position
float2 TextureUV : TEXCOORD0; // vertex texture coords
};
//-----
// Vertex shader
//-----
VS_OUTPUT RenderSceneVS( VS_INPUT input )
{
VS_OUTPUT Output;
Output.Position = input.Position;
Output.TextureUV = input.TextureUV;
return Output;
}
//-----
// Pixel Shader
//-----
float4 RenderScenePS( VS_OUTPUT In ) : SV_TARGET
{
int3 loc;
loc.x = 0;
loc.y = 0;
loc.z = 1;
int r = g_txDiffuse.Load(loc);
//float fTest = (float) r;
return float4( In.TextureUV.x, In.TextureUV.y, In.TextureUV.x + In.TextureUV.y, 1);
}
The thing is, I can't even debug it in PIX to see what r results in, because even with Shader optimization disabled, the line int r = ... is never reached
I tested
float fTest = (float) r;
return float4( In.TextureUV.x, In.TextureUV.y, In.TextureUV.x + In.TextureUV.y, fTest);
but this would result in "cannot map expression to pixel shader instruction set", even though it's a float.
So how do I read and use 8bit integers from a texture, and if possible, with no sampling at all.
Thanks for any feedback.
Oh my this is a really old question, I thought it said 2012!
But anyway as it's still open:
Due to the nature of GPU's being optimised for floating point arithmetic, you probably wont get a great deal of performance advantage by using a Texture2D<int> over a Texture2D<float>.
You could attempt to use a Texture2D<float> and then try:
return float4( In.TextureUV.x, In.TextureUV.y, In.TextureUV.x + In.TextureUV.y, g_txDiffuse.Load(loc));
loc.z = 1;
Should be 0 here, because texture mip levels is 1 in your case, and mipmaps start at 0 in HLSL for Load intrinsic.
I am trying to apply a texture to a quad, but I only get a black box instead of the texture. I am using DevIL to load images from files and OpenGL does the rest.
Here is what I am doing so far:
The following class abstracts the DevIL representation for an image.
#include "Image.h"
Image::Image()
{
ilGenImages(1, &this->imageId);
}
Image::~Image()
{
ilDeleteImages(1, &this->imageId);
}
ILint Image::getWidth()
{
return this->width;
}
ILint Image::getHeight()
{
return this->height;
}
ILint Image::getDepth()
{
return this->depth;
}
ILint Image::getBpp()
{
return this->bpp;
}
ILint Image::getFormat()
{
return this->format;
}
ILubyte* Image::getData()
{
return ilGetData();
}
bool Image::loadFromFile(wchar_t *filename)
{
// Load the image from file.
ILboolean retval = ilLoadImage(filename);
if (!retval) {
ILenum error;
while ((error = ilGetError()) != IL_NO_ERROR) {
wcout << error << L" " << iluErrorString(error);
}
return false;
}
this->width = ilGetInteger(IL_IMAGE_WIDTH);
this->height = ilGetInteger(IL_IMAGE_HEIGHT);
this->depth = ilGetInteger(IL_IMAGE_DEPTH);
this->bpp = ilGetInteger(IL_IMAGE_BPP);
this->format = ilGetInteger(IL_IMAGE_FORMAT);
return true;
}
bool Image::convert()
{
ILboolean retval = ilConvertImage(IL_RGBA, IL_UNSIGNED_BYTE);
if (!retval) {
ILenum error;
while ((error = ilGetError()) != IL_NO_ERROR) {
wcout << error << L" " << iluErrorString(error);
}
return false;
}
return true;
}
bool Image::scale(ILint width, ILint height, ILint depth)
{
ILboolean retval = iluScale(width, height, depth);
if (!retval) {
ILenum error;
while ((error = ilGetError()) != IL_NO_ERROR) {
wcout << error << L" " << iluErrorString(error);
}
return false;
}
return true;
}
void Image::bind()
{
ilBindImage(this->imageId);
}
This class abstracts the texture representation for OpenGL.
#include "Texture.h"
Texture::Texture(int width, int height)
{
glGenTextures(1, &this->textureId);
this->width = width;
this->height = height;
}
int Texture::getWidth()
{
return this->width;
}
int Texture::getHeight()
{
return this->height;
}
void Texture::initFilter()
{
// We will use linear interpolation for magnification filter.
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
// We will use linear interpolation for minifying filter.
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
}
void Texture::unpack()
{
glPixelStoref(GL_UNPACK_ALIGNMENT, 1);
}
void Texture::bind()
{
glBindTexture(GL_TEXTURE_2D, this->textureId);
}
Texture::~Texture()
{
glDeleteTextures(1, &this->textureId);
}
The following class contains the texture loading process.
#include "TextureLoader.h"
void TextureLoader::initialize()
{
if (ilGetInteger(IL_VERSION_NUM) < IL_VERSION) {
debug("Wrong DevIL version detected.");
return;
}
ilInit();
ilutRenderer(ILUT_OPENGL);
}
Texture* TextureLoader::createTexture(wchar_t *filename, Color *color)
{
// Generate some space for an image and bind it.
Image *image = new Image();
image->bind();
bool retval = image->loadFromFile(filename);
if (!retval) {
debug("Could not load image from file.");
return 0;
}
retval = image->convert();
if (!retval) {
debug("Could not convert image from RGBA to unsigned byte");
}
int pWidth = getNextPowerOfTwo(image->getWidth());
int pHeight = getNextPowerOfTwo(image->getHeight());
int size = pWidth * pHeight;
retval = image->scale(pWidth, pHeight, image->getDepth());
if (!retval) {
debug("Could not scale image from (w: %i, h: %i) to (w: %i, h: %i) with depth %i.", image->getWidth(), image->getHeight(), pWidth, pHeight, image->getDepth());
return 0;
}
// Generate some space for a texture and bind it.
Texture *texture = new Texture(image->getWidth(), image->getHeight());
texture->bind();
// Set the interpolation filters.
texture->initFilter();
// Unpack pixels.
texture->unpack();
ILubyte *imageData = image->getData();
TextureLoader::setColorKey(imageData, size, new Color(0, 0, 0));
TextureLoader::colorize(imageData, size, new Color(255, 0, 0));
debug("bpp: %i", image->getBpp());
debug("width: %i", image->getWidth());
debug("height: %i", image->getHeight());
debug("format: %i", image->getFormat());
// Map image data to texture data.
glTexImage2D(GL_TEXTURE_2D, 0, image->getBpp(), image->getWidth(), image->getHeight(), 0, image->getFormat(), GL_UNSIGNED_BYTE, imageData);
delete image;
return texture;
}
void TextureLoader::setColorKey(ILubyte *imageData, int size, Color *color)
{
for (int i = 0; i < size * 4; i += 4)
{
if (imageData[i] == color->r && imageData[i + 1] == color->g && imageData[i + 2] == color->b)
{
imageData[i + 3] = 0;
}
}
}
void TextureLoader::colorize(ILubyte *imageData, int size, Color *color)
{
for (int i = 0; i < size * 4; i += 4)
{
int rr = (int(imageData[i]) * int(color->r)) >> 8;
int rg = (int(imageData[i + 1]) * int(color->g)) >> 8;
int rb = (int(imageData[i + 2]) * int(color->b)) >> 8;
int fak = int(imageData[i]) * 5 - 4 * 256 - 138;
if (fak > 0)
{
rr += fak;
rg += fak;
rb += fak;
}
rr = rr < 255 ? rr : 255;
rg = rg < 255 ? rg : 255;
rb = rb < 255 ? rb : 255;
imageData[i] = rr > 0 ? (GLubyte) rr : 1;
imageData[i + 1] = rg > 0 ? (GLubyte) rg : 1;
imageData[i + 2] = rb > 0 ? (GLubyte) rb : 1;
}
}
The last class does the drawing.
#include "Texturizer.h"
void Texturizer::draw(Texture *texture, float x, float y, float angle)
{
// Enable texturing.
glEnable(GL_TEXTURE_2D);
// Bind the texture for drawing.
texture->bind();
// Enable alpha blending.
glEnable(GL_BLEND);
glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA);
int width = texture->getWidth();
int height = texture->getHeight();
// Create centered dimension vectors.
b2Vec2 vertices[4];
vertices[0] = 0.5f * b2Vec2(- width, - height);
vertices[1] = 0.5f * b2Vec2(+ width, - height);
vertices[2] = 0.5f * b2Vec2(+ width, + height);
vertices[3] = 0.5f * b2Vec2(- width, + height);
b2Mat22 matrix = b2Mat22();
matrix.Set(angle);
glBegin(GL_QUADS);
for (int i = 0; i < 4; i++) {
float texCoordX = i == 0 || i == 3 ? 0.0f : 1.0f;
float texCoordY = i < 2 ? 0.0f : 1.0f;
glTexCoord2f(texCoordX, texCoordY);
// Rotate and move vectors.
b2Vec2 vector = b2Mul(matrix, vertices[i]) + meter2pixel(b2Vec2(x, y));
glVertex2f(vector.x, vector.y);
}
glEnd();
glDisable(GL_BLEND);
glDisable(GL_TEXTURE_2D);
}
Last but not least, the following method initializes OpenGL (and triggers the initialization of DevIL):
void GraphicsEngine::initialize(int argc, char **argv)
{
// Initialize the window.
glutInit(&argc, argv);
glutInitDisplayMode(GLUT_RGBA | GLUT_DOUBLE);
glutInitWindowSize(WIDTH, HEIGHT);
// Set shading model.
glShadeModel(GL_SMOOTH);
// Create the window.
this->mainWindow = glutCreateWindow(TITLE);
// Set keyboard methods.
glutKeyboardFunc(&onKeyDownCallback);
glutKeyboardUpFunc(&onKeyUpCallback);
glutSpecialFunc(&onSpecialKeyDownCallback);
glutSpecialUpFunc(&onSpecialKeyUpCallback);
// Set mouse callbacks.
glutMouseFunc(&onMouseButtonCallback);
#ifdef FREEGLUT
glutMouseWheelFunc(&onMouseWheelCallback);
#endif
glutMotionFunc(&onMouseMotionCallback);
glutPassiveMotionFunc(&onMousePassiveMotionCallback);
// Set display callbacks.
glutDisplayFunc(&onDrawCallback);
glutReshapeFunc(&onReshapeCallback);
// Set a timer to control the frame rate.
glutTimerFunc(FRAME_PERIOD, onTimerTickCallback, 0);
// Set clear color.
glClearColor(1.0f, 1.0f, 1.0f, 1.0f);
Camera::getInstance()->subscribe(this);
// Initialize texture loader.
TextureLoader::initialize();
}
The image I am using already worked for another OpenGL/DevIL project, so it cannot be the source of the problem.
The texture is created inside of every class which represents a world object (it's a game...). The character is called Blobby and here are the most important parts of its implementation:
#include "Blobby.h"
Blobby::Blobby()
{
this->isJumping = false;
this->isRotating = false;
this->isWalking = false;
this->isDucking = false;
this->isStandingUp = false;
this->isOnGround = false;
this->isTouchingWall = false;
this->angle = 0;
this->direction = DIRECTION_UNKNOWN;
this->wallDirection = DIRECTION_UNKNOWN;
// Create a red blobby texture.
this->texture = TextureLoader::createTexture(L"D:/01.bmp", new Color(255, 0, 0));
ContactListener::getInstance()->subscribe(this);
}
void Blobby::draw()
{
GraphicsEngine::drawString(35, 40, "isOnGround = %s", this->isOnGround ? "true" : "false");
GraphicsEngine::drawString(35, 55, "inJumping = %s", this->isJumping ? "true" : "false");
GraphicsEngine::drawString(35, 70, "isRotating = %s", this->isRotating ? "true" : "false");
GraphicsEngine::drawString(35, 85, "isTouchingWall = %s (%i)", this->isTouchingWall ? "true" : "false", this->wallDirection);
Texturizer::draw(this->texture, this->getBody(0)->GetPosition().x, this->getBody(0)->GetPosition().y, this->getBody(0)->GetAngle());
AbstractEntity::draw(); // draws debug information... not important
}
The OpenGL timer callback calls a step method which ends here:
void Simulator::step()
{
// Update physics.
this->gameWorld->step();
b2Vec2 p = Camera::convertWorldToScreen(meter2pixel(this->cameraBlobby->getBody(0)->GetPosition().x), 300.0f);
if (p.x < 300) {
Camera::getInstance()->setViewCenter(Camera::convertScreenToWorld(400 - (300 - int(p.x)), 300));
} else if (p.x > 500) {
Camera::getInstance()->setViewCenter(Camera::convertScreenToWorld(400 + (int(p.x) - 500), 300));
}
for (unsigned int i = 0; i < this->gameWorld->getEntityCount(); i++) {
IEntity *entity = this->gameWorld->getEntity(i);
entity->draw();
}
}
IEntity is a pure virtual class (i.e. interface), AbstractEntity implements this interface and adds global methods. Blobby inherits from AbstractEntity and adds routines which are special for this world object.
EDIT:
I have uploaded a more recent version of the code (the whole project incl. dependencies) here:
http://upload.visusnet.de/uploads/BlobbyWarriors-rev19.zip (~9.5 MB)
I'm not familiar with DevIL, but... are you providing the right diffuse color for your vertices? If lighting is enabled, are there some lights pointing on the quad? Does the camera look at the front side of the quad?
EDIT:
You got a bug in the code, but not the one you posted here, but in the version in the archive you linked.
You call glColor3i(255, 255, 255), and it sets the diffuse color to (very nearly) black as expected. glColor3i does not accept the color values in the target (calculation or framebuffer) range. The possible values are scaled to the entire range of the int type. This means the maximum value (1.0 in float) is represented by MAX_INT (2,147,483,647)
, 0 is 0, and -1.0 is MIN_INT (-2,147,483,648). The 255 value you provided represents about 0.000000118, which is very nearly zero.
I believe you intended one of the following (completely equivalent) forms:
glColor3f(1.0, 1.0, 1.0), glColor3ub(255, 255, 255),
glColor3i(2147483647, 2147483647, 2147483647).
What is in the b2Mat22 matrix? Could it be that multiplying by this matrix is causing your vertices to be drawn in a clockwise order, because I think in that case your square's back would be facing you, and the texture might be on the other (invisible) side.
I had an issue like this a long time ago, I think back then it was a problem with the texture dimensions not being an exponent of 2 (128x128, 512x512, etc.). I'm sure they've fixed that by now, but it might be something to try.