I try now to create custom QML element, derived from QQuickItem. So I overrided QQuickItem::updatePaintNode and want now do draw a line. My code:
QSGNode *StrikeLine::updatePaintNode(QSGNode *oldNode, QQuickItem::UpdatePaintNodeData *)
{
QSGGeometryNode *node = 0;
QSGGeometry *geometry;
QSGFlatColorMaterial *material;
node = static_cast<QSGGeometryNode *>(oldNode);
if(!node) {
node = new QSGGeometryNode;
geometry = new QSGGeometry(QSGGeometry::defaultAttributes_Point2D(), 2);
geometry->setDrawingMode(GL_LINES);
geometry->setLineWidth(3);
material = new QSGFlatColorMaterial;
material->setColor(QColor(255, 0, 0));
node->setGeometry(geometry);
node->setFlag(QSGNode::OwnsGeometry);
node->setMaterial(material);
node->setFlag(QSGNode::OwnsMaterial);
getColor();
} else {
geometry = node->geometry();
material = static_cast<QSGFlatColorMaterial *>(node->material());
}
geometry->vertexDataAsPoint2D()[0].set(p_startPoint.x(), p_startPoint.y());
geometry->vertexDataAsPoint2D()[1].set(p_endPoint.x(), p_endPoint.y());
material->setColor(getColor());
node->markDirty(QSGNode::DirtyGeometry);
return node;
}
But my line looks so ugly. The edges are rough and it looks like DOS graphics at all. So my question - how can I apply smooth painting? I now it may be some shader or something but I cannot find any documentation.
The scene graph supports two types of antialiasing. Primitives such as rectangles and images will be antialiased by adding more vertices along the edge of the primitives so that the edges fade to transparent. This method called vertex antialiasing. If you requests a multisampled OpenGL context, the scene graph will prefer multisample based antialiasing (MSAA).
Vertex antialiasing can produce seams between edges of adjacent primitives, even when the two edges are mathmatically the same. Multisample antialiasing does not.
Multisample Antialiasing
Multisample antialiasing is a hardware feature where the hardware calculates a coverage value per pixel in the primitive. Some hardware can multisample at a very low cost, while other hardware may need both more memory and more GPU cycles to render a frame.
To enable multisample antialiasing you should set QSurfaceFormat with samples greater than 0 using QQuickWindow::setFormat()
QQuickView view;
QSurfaceFormat format = view.format();
format.setSamples(16);
view.setFormat(format);
view.show();
Vertex Antialiasing
Vertex antialiasing can be enabled and disabled on a per-item basis using the Item::antialiasing property. It will work regardless of what the underlying hardware supports and produces higher quality antialiasing, both for normally rendered primitives and also for primitives captured into framebuffer objects.
The downside to using vertex antialiasing is that each primitive with antialiasing enabled will have to be blended. In terms of batching, this means that the renderer needs to do more work to figure out if the primitive can be batched or not and due to overlaps with other elements in the scene, it may also result in less batching, which could impact performance.
To apply vertex antialiasing to custom QML element, derived from QQuickItem, follow next steps:
1) Create custom material and OpenGL shader program.
smoothcolormaterial.h
#include <QSGMaterial>
#include <QSGMaterialShader>
//----------------------------------------------------------------------
class QSGSmoothColorMaterial : public QSGMaterial
{
public:
QSGSmoothColorMaterial();
int compare(const QSGMaterial *other) const;
protected:
virtual QSGMaterialType *type() const;
virtual QSGMaterialShader *createShader() const;
};
//----------------------------------------------------------------------
class QSGSmoothColorMaterialShader : public QSGMaterialShader
{
public:
QSGSmoothColorMaterialShader();
virtual void updateState(const RenderState &state, QSGMaterial *newEffect, QSGMaterial *oldEffect);
virtual char const *const *attributeNames() const;
private:
void initialize();
int m_matrixLoc;
int m_opacityLoc;
int m_pixelSizeLoc;
};
smoothcolormaterial.cpp
QSGSmoothColorMaterial::QSGSmoothColorMaterial()
{
setFlag(RequiresFullMatrixExceptTranslate, true);
setFlag(Blending, true);
}
int QSGSmoothColorMaterial::compare(const QSGMaterial *other) const
{
Q_UNUSED(other)
return 0;
}
QSGMaterialType *QSGSmoothColorMaterial::type() const
{
static QSGMaterialType type;
return &type;
}
QSGMaterialShader *QSGSmoothColorMaterial::createShader() const
{
return new QSGSmoothColorMaterialShader();
}
//----------------------------------------------------------------------
QSGSmoothColorMaterialShader::QSGSmoothColorMaterialShader()
: QSGMaterialShader()
{
setShaderSourceFile(QOpenGLShader::Vertex, QStringLiteral(":/shaders/smoothcolor.vert"));
setShaderSourceFile(QOpenGLShader::Fragment, QStringLiteral(":/shaders/smoothcolor.frag"));
}
void QSGSmoothColorMaterialShader::updateState(const QSGMaterialShader::RenderState &state, QSGMaterial *newEffect, QSGMaterial *oldEffect)
{
Q_UNUSED(newEffect)
if (state.isOpacityDirty())
program()->setUniformValue(m_opacityLoc, state.opacity());
if (state.isMatrixDirty())
program()->setUniformValue(m_matrixLoc, state.combinedMatrix());
if (oldEffect == 0) {
// The viewport is constant, so set the pixel size uniform only once.
QRect r = state.viewportRect();
program()->setUniformValue(m_pixelSizeLoc, 2.0f / r.width(), 2.0f / r.height());
}
}
const char * const *QSGSmoothColorMaterialShader::attributeNames() const
{
static char const *const attributes[] = {
"vertex",
"vertexColor",
"vertexOffset",
0
};
return attributes;
}
void QSGSmoothColorMaterialShader::initialize()
{
m_matrixLoc = program()->uniformLocation("matrix");
m_opacityLoc = program()->uniformLocation("opacity");
m_pixelSizeLoc = program()->uniformLocation("pixelSize");
}
Fragment Shader
varying lowp vec4 color;
void main()
{
gl_FragColor = color;
}
Vertex Shader
uniform highp vec2 pixelSize;
uniform highp mat4 matrix;
uniform lowp float opacity;
attribute highp vec4 vertex;
attribute lowp vec4 vertexColor;
attribute highp vec2 vertexOffset;
varying lowp vec4 color;
void main()
{
highp vec4 pos = matrix * vertex;
gl_Position = pos;
if (vertexOffset.x != 0.) {
highp vec4 delta = matrix[0] * vertexOffset.x;
highp vec2 dir = delta.xy * pos.w - pos.xy * delta.w;
highp vec2 ndir = .5 * pixelSize * normalize(dir / pixelSize);
dir -= ndir * delta.w * pos.w;
highp float numerator = dot(dir, ndir * pos.w * pos.w);
highp float scale = 0.0;
if (numerator < 0.0)
scale = 1.0;
else
scale = min(1.0, numerator / dot(dir, dir));
gl_Position += scale * delta;
}
if (vertexOffset.y != 0.) {
highp vec4 delta = matrix[1] * vertexOffset.y;
highp vec2 dir = delta.xy * pos.w - pos.xy * delta.w;
highp vec2 ndir = .5 * pixelSize * normalize(dir / pixelSize);
dir -= ndir * delta.w * pos.w;
highp float numerator = dot(dir, ndir * pos.w * pos.w);
highp float scale = 0.0;
if (numerator < 0.0)
scale = 1.0;
else
scale = min(1.0, numerator / dot(dir, dir));
gl_Position += scale * delta;
}
color = vertexColor * opacity;
}
2) Create custom AttributeSet for QSGGeometry.
myquickitem.cpp
namespace
{
struct Color4ub
{
unsigned char r, g, b, a;
};
inline Color4ub colorToColor4ub(const QColor &c)
{
Color4ub color = { uchar(c.redF() * c.alphaF() * 255),
uchar(c.greenF() * c.alphaF() * 255),
uchar(c.blueF() * c.alphaF() * 255),
uchar(c.alphaF() * 255)
};
return color;
}
struct SmoothVertex
{
float x, y;
Color4ub color;
float dx, dy;
void set(float nx, float ny, Color4ub ncolor, float ndx, float ndy)
{
x = nx; y = ny; color = ncolor;
dx = ndx; dy = ndy;
}
};
const QSGGeometry::AttributeSet &smoothAttributeSet()
{
static QSGGeometry::Attribute data[] = {
QSGGeometry::Attribute::create(0, 2, GL_FLOAT, true),
QSGGeometry::Attribute::create(1, 4, GL_UNSIGNED_BYTE, false),
QSGGeometry::Attribute::create(2, 2, GL_FLOAT, false)
};
static QSGGeometry::AttributeSet attrs = { 3, sizeof(SmoothVertex), data };
return attrs;
}
}
3) Apply custom material and custom geometry to QSGGeometryNode.
myquickitem.cpp
QSGNode *MyQuickItem::updatePaintNode(QSGNode *oldNode, QQuickItem::UpdatePaintNodeData *data)
{
QSGGeometryNode *node = 0;
QSGGeometry *geometry;
QSGSmoothColorMaterial *material;
node = static_cast<QSGGeometryNode *>(oldNode);
if(!node) {
node = new QSGGeometryNode;
geometry = new QSGGeometry(smoothAttributeSet(), 0);
geometry->setDrawingMode(GL_TRIANGLE_STRIP);
material = new QSGSmoothColorMaterial();
node->setGeometry(geometry);
node->setFlag(QSGNode::OwnsGeometry);
node->setMaterial(material);
node->setFlag(QSGNode::OwnsMaterial);
} else {
geometry = node->geometry();
material = static_cast<QSGSmoothColorMaterial *>(node->material());
}
4) Get pointer to vertex data.
int vertexStride = geometry->sizeOfVertex();
int vertexCount = 8;
geometry->allocate(vertexCount, 0);
SmoothVertex *smoothVertices = reinterpret_cast<SmoothVertex *>(geometry->vertexData());
memset(smoothVertices, 0, vertexCount * vertexStride);
5) Set vertex data.
You need 4 points.
float lineWidth = 4;
float tlX = 0; float tlY = 0; //top-left
float blX = 0; float blY = 0 + lineWidth; //bottom-left
float trX = 500; float trY = 100; //top-right
float brX = 500; float brY = 100 + lineWidth; //bottom-right
float delta = lineWidth * 0.5f;
Color4ub fillColor = colorToColor4ub(QColor(255,0,0,255));
Color4ub transparent = { 0, 0, 0, 0 };
To draw antialiased line you should set 8 vertices to draw 6 triangles(2 for line, 4 for antialiasing). Vertices 0 and 2, 1 and 3, 4 and 6, 5 and 7 have the same coordinates, but different color and opposite vertex offset.
smoothVertices[0].set(trX, trY, transparent, delta, -delta);
smoothVertices[1].set(tlX, tlY, transparent, -delta, -delta);
smoothVertices[2].set(trX, trY, fillColor, -delta, delta);
smoothVertices[3].set(tlX, tlY, fillColor, delta, delta);
smoothVertices[4].set(brX, brY, fillColor, -delta, -delta);
smoothVertices[5].set(blX, blY, fillColor, delta, -delta);
smoothVertices[6].set(brX, brY, transparent, delta, delta);
smoothVertices[7].set(blX, blY, transparent, -delta, delta);
node->markDirty(QSGNode::DirtyGeometry);
return node;
}
Related
I am writing a basic Sphere-Tracer in a fragment shader, everything is doing fine if I just color points according to their surface normals, but as soon as I try to implement the reflecting algorithm, it takes a bit longer to compile, and just blackscreens. The fps starts out at 1 but quickly goes up to 1000, which is the code limit I put, telling me the shader is actually not doing anything and just ignores my code, which should be much slower than that. I initially thought I was hitting the instruction limit of my GPU, but I don't think that's the problem, as using the normal-shading code, I can set MAX_MARCH to high values, like 1000, but using reflections, even with MAX_REFLECTIONS and MAX_AA to one, which should be similar to the amount of instructions of normal-shading(maybe ~50 more, not significant I don't think). But I need to set MAX_MARCH to 1 for it to render, even setting it to two causes the bug.
Vertex shader:
//Draw a quad on the whole display and calculates sky color
#version 400 core
in vec3 position;
out vec2 uvPos;
out vec4 backColor;
void main(void){
gl_Position = vec4(position, 1.0);
uvPos = position.xy*0.5+0.5;
backColor = mix(vec4(1, 1, 1, 1), vec4(0.5, 0.7, 1, 1), uvPos.y);
}
Fragment shader:
#version 400 core
#define FLT_MAX 3.402823466e+38
#define FLT_MIN 1.175494351e-38
#define DBL_MAX 1.7976931348623158e+308
#define DBL_MIN 2.2250738585072014e-308
#define PI 3.141592653589793115997963468544185161590576171875
#define MAX_AA 1
#define MAX_MARCH 1000
#define MAX_REFLECTIONS 10
#define MAX_DIST 10
in vec2 uvPos;
in vec4 backColor;
out vec4 outColor;
int randomIterator = 0;
//############################################################ Structure definitions #########################################################################
struct Material{
int type;
vec3 albedo;
};
struct Object{
int type; //1:Sphere, 2:Box
vec3 center;
float radius;
vec3 size;
Material material;
};
struct Scene{
Object objects[3];
};
struct Ray{
vec3 origin;
vec3 dir;
};
struct HitRecord{
vec3 p;
vec3 n;
Object o;
Material mat;
float closest;
};
struct Camera{
vec3 origin;
vec3 lowerLeftCorner;
vec3 horizontal;
vec3 vertical;
};
//############################################################ Uniforms ####################################################################################
uniform float random[2048];
uniform vec2 resolution;
uniform Camera cam;
uniform Scene scene;
uniform int objectAmount;
//############################################################ Tools
float randf(){
return random[randomIterator++];
}
Ray getRay(Camera cam, vec2 v){
return Ray(cam.origin, normalize(cam.lowerLeftCorner+cam.horizontal*v.s+cam.vertical*v.t-cam.origin));
}
vec3 randOnBall(){
vec3 p;
do{
p = vec3(randf(), randf(), randf())*2-1;
}while(p.length() >= 1);
return p;
}
//############################################################ Signed Distance Functions
float sphereSDF(vec3 p, Object o){
return length(p-o.center)-o.radius;
}
float boxSDF(vec3 p, Object o){
vec3 q = abs(p-o.center) - o.size;
return (length(max(q, 0.0)) + min(max(q.x, max(q.y, q.z)), 0.0));
}
float sceneSDF(vec3 p, Scene s){
float dist = FLT_MAX;
for(int i = 0; i < objectAmount; i++){
switch(s.objects[i].type){
case 1:
dist = min(dist, sphereSDF(p, s.objects[i]));
break;
case 2:
dist = min(dist, boxSDF(p, s.objects[i]));
break;
default:
break;
}
}
return dist;
}
float sceneSDF(vec3 p, Scene s, inout HitRecord rec){
float dist = FLT_MAX;
for(int i = 0; i < objectAmount; i++){
float tmpDist=FLT_MAX;
switch(s.objects[i].type){
case 1:
tmpDist = sphereSDF(p, s.objects[i]);
break;
case 2:
tmpDist = boxSDF(p, s.objects[i]);
break;
default:
break;
}
if(tmpDist<dist){
dist = tmpDist;
rec.o = s.objects[i];
rec.mat = s.objects[i].material;
}
}
return dist;
}
//############################################################ Material Scatter Function
bool scatterDiffuse(Ray r, HitRecord rec, inout vec3 tmpAtt, inout Ray scattered){
tmpAtt = vec3(rec.mat.albedo);
scattered = Ray(rec.p, rec.n+randOnBall());
return true;
}
bool scatter(Ray r, HitRecord rec, inout vec3 tmpAtt, inout Ray scattered){
return scatterDiffuse(r, rec, tmpAtt, scattered); //Starting out with diffuse materials, planned to
add switch-case for different materials
}
//############################################################ Main
vec3 findSceneNormal(Scene s, vec3 p){
const float h = 0.0001; // replace by an appropriate value
const vec2 k = vec2(1,-1);
return normalize( k.xyy*sceneSDF( p + k.xyy*h, s ) +
k.yyx*sceneSDF( p + k.yyx*h, s ) +
k.yxy*sceneSDF( p + k.yxy*h, s ) +
k.xxx*sceneSDF( p + k.xxx*h, s ) );
}
float findSceneIntersect(Ray r, Scene scene, inout HitRecord rec){
float t = 0.005;
vec3 p;
for(int i = 0; i < MAX_MARCH; i++){
p = r.origin+t*r.dir;
float dist = abs(sceneSDF(p, scene, rec));
if(dist < 0.001){
rec.n = findSceneNormal(scene, p);
rec.p = p;
return t;
}else{
t += dist;
if(t >= MAX_DIST){
rec.p = r.origin+t*r.dir;
rec.n = vec3(0, 0, 0);
return -1;
}
}
}
return -1;
}
vec3 calcColor(Ray r){
vec3 color;
Material emptyMat = Material(0, vec3(0));
Object emptyO = Object(0, vec3(0), 0, vec3(0), emptyMat);
HitRecord rec = HitRecord(vec3(0), vec3(0), emptyO, emptyMat, 0);
float t = findSceneIntersect(r, scene, rec);
int reflections = 0;
vec3 att = vec3(1, 1, 1);
for(int ref = 0; ref < MAX_REFLECTIONS; ref++){
if(t != -1){
vec3 tmpAtt = vec3(0);
if(scatter(r, rec, tmpAtt, r)){
att *= tmpAtt;
t = findSceneIntersect(r, scene, rec);
reflections++;
}else {
att *= tmpAtt;
t = -1;
}
}else {
color = backColor.xyz*att;
break;
}
}
return color;
}
void main(void){
HitRecord rec = HitRecord(vec3(0), vec3(0), Object(-1, vec3(0), 0, vec3(0), Material(-1, vec3(0))), Material(-1, vec3(1, 1, 1)), 0);
#if 1 //Reflection rendering
vec3 color = vec3(0);
for(int s = 0; s < MAX_AA; s++){
vec2 uv = uvPos+(vec2(randf(), randf())*2-1)/resolution;
color += calcColor(getRay(cam, uv));
}
outColor = vec4(color/MAX_AA, 1);
#else //Coloring based on normals
Ray r = getRay(cam, uvPos);
float t = findSceneIntersect(r, scene, rec);
if(t == -1){
outColor = backColor;
}else {
outColor = vec4(rec.n*0.5+0.5, 1);
}
#endif
}
Java code where I load and compile the shader (Using LWJGL):
private static int loadShader(String file, int type) {
System.out.println("Loading shader at path: " + file);
StringBuilder shaderSource = new StringBuilder();
try{
BufferedReader reader = new BufferedReader(new FileReader(file));
String line;
while((line = reader.readLine())!=null){
shaderSource.append(line).append("//\n");
}
reader.close();
}catch(IOException e){
e.printStackTrace();
System.exit(-1);
}
int shaderID = glCreateShader(type);
glShaderSource(shaderID, shaderSource);
glCompileShader(shaderID);
if(glGetShaderi(shaderID, GL_COMPILE_STATUS )== GL_FALSE){
System.out.println(glGetShaderInfoLog(shaderID, 500));
System.err.println("Could not compile shader!");
System.exit(-1);
}
return shaderID;
}
The function loadShader in my code does not give me any error as it does with syntax errors and doesn't exit the program, thus GL_COMPILE_STATUS is not false.
I am fully aware nested for loops and my use of conditionals is far from efficient performance-wise, but I would expect it to be slow, not completely broken. I am running this on an Intel UHD 630, which according to https://www.intel.ca/content/www/ca/en/support/products/98909/graphics/graphics-for-7th-generation-intel-processors/intel-hd-graphics-630.html supports openGL 4.4. Therefore, according to GLSL maximum number of instructions, I should have access to 65536 instructions in my frag shader and fully dynamic branching. For these reasons, I don't think instruction limit is the problem and any help would be greatly appreciated. If you need any more information, I'll add it as soon as possible. If the CPU code is necessary, I can add it too, but I don't think it's the issue, as changing only the shader can trigger this bug.
Edit 1:
Both glGetShaderInfoLog and glGetProgramInfoLog return nothing when called after the program has been validated and linked.
I created simple c++ class for drawing square in opengl es 2.0. It puts square on specific place with specific color and opacity. All is fine, except opacity. I set color and opacity with function "setColor". I expect 0.0f full transparent and 1.0 full visible. Looks like, for black color it works, but white color is still full visible, does not matter what I set to opacity. For other colors is opacity weird too.
At beginning I set
glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA);
glEnable(GL_BLEND);
My class :
GLSquare::GLSquare() {
vecs = new std::vector<GLfloat>();
indices = new std::vector<GLshort>();
colors = new std::vector<GLfloat>();
indicesCount = 6;
for (int i = 0; i < 12; i++) {
vecs->push_back(0.0f);
}
for (int i=0; i<16; i+=4) {
colors->push_back(0.0f);
colors->push_back(0.0f);
colors->push_back(0.0f);
colors->push_back(1.0f);
}
GLshort ind[] = { 0, 1, 2, 0, 2, 3 };
for (int i = 0; i < indicesCount; i++) {
indices->push_back(ind[i]);
}
}
GLSquare::~GLSquare() {
delete vecs;
delete colors;
delete indices;
}
void GLSquare::draw(Matrix *matrix) {
glUseProgram(program->id);
glEnableVertexAttribArray(program->positionHandle);
glVertexAttribPointer(program->positionHandle, 3, GL_FLOAT, false, 12, &vecs->front());
glEnableVertexAttribArray(program->colorHandle);
// Prepare the background coordinate data
glVertexAttribPointer(program->colorHandle, 4, GL_FLOAT, false, 0, &colors->front());
glUniformMatrix4fv(program->matrixHandle, 1, false, matrix->projectionAndView);
glDrawElements(GL_TRIANGLES, indicesCount, GL_UNSIGNED_SHORT, &indices->front());
glDisableVertexAttribArray(program->positionHandle);
glDisableVertexAttribArray(program->colorHandle);
}
void GLSquare::set(float left, float top, float width, float height) {
position[0] = left;
position[1] = top;
size[0] = width;
size[1] = height;
vecs->at(0) = left;
vecs->at(1) = top;
vecs->at(3) = left;
vecs->at(4) = top + height;
vecs->at(6) = left + width;
vecs->at(7) = top + height;
vecs->at(9) = left + width;
vecs->at(10) = top;
}
void GLSquare::setColor(Color color, GLfloat opacity) {
for (int i=0; i<16; i+=4) {
colors->at(i) = color.r;
colors->at(i + 1) = color.g;
colors->at(i + 2) = color.b;
colors->at(i + 3) = opacity;
}
}
my fragment shader is simple :
precision mediump float;
varying vec4 v_Color;
void main()
{
gl_FragColor = v_Color;
}
My vertex shader :
uniform mat4 uMVPMatrix;
attribute vec4 vPosition;
attribute vec4 a_Color;
varying vec4 v_Color;
void main()
{
gl_Position = uMVPMatrix * vPosition;
v_Color = a_Color;
}
Gettings colorHandle from shader:
program->colorHandle = glGetAttribLocation(programID, "a_Color");
Here is how it looks with :
square->setColor(Color(1.0f,1.0f,1.0f), 0.1f);
white rectangle in left top corner
and here is with black color, opacity 0.1
I found the problem. Code works correctly, other 3rd party library overrides glBlendFunc. Fixing it solved the issue
so I'm trying to render some text using FTGL, GLFW and GLEW libraries in my C++ engine following this tutorial: https://www.youtube.com/watch?v=Rsdc6I80aFQ.
But when I do so where the text should be instead of characters I get triangles of some kind (screenshot):
I wanted to use the same shader as when I'm rendering normal sprites, so I set the model matrix in shader to identity matrix and instead I'm passing character positions to the shader with VBO.
Here's font class:
class Font
{
private:
/* Variables */
ftgl::texture_atlas_t* m_atlas;
ftgl::texture_font_t* m_font;
public:
/* Constructors */
Font(const char* filename, const float size)
: m_atlas(ftgl::texture_atlas_new(512, 512, 1)),
m_font(ftgl::texture_font_new_from_file(m_atlas, size, filename))
{}
/* Functions */
void bindFontTexture() const
{
GLcall( glBindTexture(GL_TEXTURE_2D, m_atlas->id) );
}
// Getters
inline ftgl::texture_font_t* getFTGLFont() const { return m_font; }
};
And here is label class (labels are 2D renderables with some text in my engine):
class Label2D : public Renderable2D
{
protected:
/* Variables */
std::string m_text;
Color m_color;
Font* m_font;
public:
/* Constructors */
Label2D(const std::string& text, const vector2& pos, const vector2& size, Font* font)
: Renderable2D(pos, size), m_text(text), m_font(font)
{}
Label2D(const std::string& text, const vector2& pos, const vector2& size, Font* font, const Color& color)
: Renderable2D(pos, size), m_text(text), m_font(font), m_color(color)
{}
/* Functions */
void bindFontTexture() const
{
m_font->bindFontTexture();
}
// Setters
void setText(const std::string& text)
{
m_text = text;
}
void setColor(const Color& color)
{
m_color = color;
}
void setFont(Font* font)
{
m_font = font;
}
// Getters
inline const std::string& getText() const { return m_text; }
inline const Color& getColor() const { return m_color; }
inline ftgl::texture_font_t* getFTGLFont() const { return m_font->getFTGLFont(); }
};
Here's my text rendering code. Now I DO know this code is written terribly since I shouldn't create a VAO and VBO everytime I want to render some text, but I did it only for test purposes, I just want to be sure that library is working fine, then I will organize the code:
void Layer2D::m_renderLabels(const bool renderLighting)
{
// Uniforms Setup
m_textureShader.start();
m_textureShader.setUniformMatrix4f("u_modelMatrix", matrix4(1.0f));f));
// Rendering Labels from std::vector
for (Label2D* label : m_labels)
{
// Binding
m_textureVAO.bind();
label->bindFontTexture();
// Starting Shader
m_textureShader.start();
// Preparation Stuff
std::vector<float> positions;
std::vector<float> textureCoordinates;
float offsetX = label->getPosition().x;
// Loading Characters
for (unsigned int i = 0; i < label->getText().length(); i++)
{
// Loading Glyph
ftgl::texture_glyph_t* glyph = ftgl::texture_font_get_glyph(label->getFTGLFont(), &label->getText().at(i));
if (glyph != NULL)
{
if (i > 0)
{
float kerning = texture_glyph_get_kerning(glyph, &label->getText().at(i - 1));
offsetX += kerning;
}
// Glyph Position/Offset/Size Calculations
float x0 = offsetX + glyph->offset_x;
float y0 = label->getPosition().y + glyph->offset_y;
float x1 = x0 + glyph->width;
float y1 = y0 - glyph->height;
float u0 = glyph->s0;
float v0 = glyph->t0;
float u1 = glyph->s1;
float v1 = glyph->t1;
positions.push_back(x0);
positions.push_back(y0);
positions.push_back(x0);
positions.push_back(y1);
positions.push_back(x1);
positions.push_back(y1);
positions.push_back(x1);
positions.push_back(y0);
textureCoordinates.push_back(u0);
textureCoordinates.push_back(v0);
textureCoordinates.push_back(u0);
textureCoordinates.push_back(v1);
textureCoordinates.push_back(u1);
textureCoordinates.push_back(v1);
textureCoordinates.push_back(u1);
textureCoordinates.push_back(v0);
offsetX += glyph->advance_x;
}
}
VertexArray textVAO;
VertexBuffer* textPosVBO = new VertexBuffer(&positions[0], positions.size(), 2);
VertexBuffer* textTexCoordsVBO = new VertexBuffer(&textureCoordinates[0], textureCoordinates.size(), 2);
textVAO.addAttribute(textPosVBO, 0);
textVAO.addAttribute(textTexCoordsVBO, 1);
textVAO.bind();
// Rendering
GLcall( glDrawArrays(GL_TRIANGLE_STRIP, 0, 4 * label->getText().length()) );
}
}
And lastly that's the shader code:
Vertex:
#shader vertex
#version 330 core
layout(location = 0) in vec2 in_position;
layout(location = 1) in vec2 in_textureCoordinates;
out vec2 pass_textureCoordinates;
uniform mat4 u_projectionMatrix;
uniform mat4 u_viewMatrix;
uniform mat4 u_modelMatrix;
void main()
{
pass_textureCoordinates = in_textureCoordinates;
gl_Position = u_projectionMatrix * u_viewMatrix * u_modelMatrix * vec4(in_position, 0.0, 1.0);
}
And fragment:
#shader fragment
#version 330 core
in vec2 pass_textureCoordinates;
out vec4 out_color;
uniform sampler2D u_textureSampler;
void main()
{
vec4 textureColor = texture2D(u_textureSampler, pass_textureCoordinates);
out_color = textureColor;
out_color = vec4(1.0, 0.0, 0.0, 1.0); //done for test purposes only (it still doesn't work when this line is removed)
}
I'm sorry if I wasn't specific enough, if I could provide any more information to help You help me, just please comment down below.
Thanks in advance!
EDIT:
If You were wondering, I do set projection and view matrices in other parts of code:
m_textureShader.start();
m_textureShader.setUniformMatrix4f("u_projectionMatrix", m_projectionMatrix);
m_textureShader.setUniformMatrix4f("u_viewMatrix", viewMatrix);
EDIT: After swapping the last 2 vertices I get quads now, but still not a text (screenshot) The quads aren't red because I removed the debugging line from fragment shader.
I'm in the process of translating a piece of OpenGL code to Vulkan. The code recreates a rendered scene from an image (on a hemisphere projection) with depth information encoded. Note that I also load the model view matrix used for the projection to recreate the scene. The translation has been pretty straightforward but I'm running into issues due to the new Vulkan coordinate system.
The original OpenGL shader with comments follows:
#version 430
layout (triangles) in;
layout (triangle_strip, max_vertices = 3) out;
in vec2 posGeom[];
out vec2 texCoord;
uniform mat4 view;
uniform mat4 projection;
uniform float threshold;
uniform vec3 quantization;
uniform mat4 inverseStaticView;
uniform sampler2D rgbdTexture;
//get the image space for each pixel of our hemisphere image
vec3 getSphereRay(const vec2 coord) {
//get length of ray from camera to point on image plane
float len = 1 - coord.x * coord.x - coord.y * coord.y;
if (len > 0)
return vec3(coord, -sqrt(len));//scale to unit length vector as viewing ray
else
return vec3(0);
}
vec4 getPosition(const in vec2 inCoord, const in float depth) {
vec2 coord = inCoord;
//reverse the stretching from sphere to quad (based on y-coordinate)
float percent = sqrt(1.0 - coord.y * coord.y);
coord.x = coord.x * percent;
//scale ray with corresponding depth
vec3 normal = getSphereRay(coord) * depth;
//move from image space to world space by inverse view matrix
return inverseStaticView * vec4(normal, 1);
}
bool hasZeroDepth = false;
//get the real depth from quantized and packed depth by inverting the gamma correction and inverting min, max
float getDepth(int idx) {
float depth = texture(rgbdTexture, posGeom[idx] * 0.5 + 0.5).w;
if(depth == 0)
hasZeroDepth = true;
float minDepth = quantization.x;
float maxDepth = quantization.y;
float gamma = quantization.z;
depth = pow(depth, gamma);
depth = depth * (maxDepth - minDepth) + minDepth;
return depth;
}
//emit the position and texcoord
void emitPosition(int idx, float depth) {
texCoord = posGeom[idx] * 0.5 + 0.5;
gl_Position = projection * view * getPosition(posGeom[idx], depth);
EmitVertex();
}
void main() {
float d0 = getDepth(0);
float d1 = getDepth(1);
float d2 = getDepth(2);
//do not emit tris with zero (invalid) depth
if(!hasZeroDepth) {
float minDepth = min(d0, min(d1, d2));
float maxDepth = max(d0, max(d1, d2));
float minDist = maxDepth - minDepth;
float avgDepth = (d0 + d1 + d2) / 3;
float thres = threshold;
//look at tri stretching factor
if(minDist / avgDepth < thres) {
//emit original tri
emitPosition(0, d0);
emitPosition(1, d1);
emitPosition(2, d2);
} else {
//emit tri with maxDepth to only show background
emitPosition(0, maxDepth);
emitPosition(1, maxDepth);
emitPosition(2, maxDepth);
}
}
}
In the Vulkan shader, I account for the Vulkan coordinate system by inverting the y value. I also must normalize the world values for reasons that are unclear to me (otherwise what's rendered is completely nonsense). The shader code follows:
#version 450
layout (triangles) in;
layout (triangle_strip, max_vertices = 3) out;
layout(binding = 0) uniform UniformBufferObject {
mat4 modelView;
mat4 inverseStaticModelView;
float quantization;
} ubo;
layout(binding = 1) uniform sampler2D texSampler;
layout(location = 0) in vec2 posGeom[];
layout(location = 0) out vec2 texCoord;
bool hasZeroDepth = false;
float minDepth = 0;
float maxDepth = 1.0;
vec3 unproject(vec2 win) {
float scale = 1 - win.y * win.y;
// Invert y to account for Vulkan coordinate system.
float y = win.y * -1;
// Scale x to account for hemisphere projection.
float x = win.x * scale;
float z = -sqrt(1 - x * x - y * y);
if(z < 0){
vec4 outVals = ubo.inverseStaticModelView * vec4(x, y, z, 1.0);
return vec3(outVals[0], outVals[1], outVals[2]) / outVals.w;
}else
return vec3(0);
}
vec3 reconstructWorldPosition(vec2 ndc, float depth) {
vec3 pos = unproject(ndc);
return depth * normalize(pos);
}
float getDepth(int idx) {
float depth = texture(texSampler, posGeom[idx] * 0.5 + 0.5).w;
if(depth == 0)
hasZeroDepth = true;
depth = pow(depth, ubo.quantization);
return depth;
}
void emitPosition(int idx, float depth) {
vec2 pos = posGeom[idx].xy;
texCoord = pos * 0.5 + 0.5;
vec3 positionFromDepth = reconstructWorldPosition(pos, depth);
gl_Position = ubo.modelView * vec4(positionFromDepth,1);
EmitVertex();
}
void main() {
float d0 = getDepth(0);
float d1 = getDepth(1);
float d2 = getDepth(2);
if(!hasZeroDepth) {
float minDepth = min(d0, min(d1, d2));
float maxDepth = max(d0, max(d1, d2));
float minDist = maxDepth - minDepth;
float avgDepth = (d0 + d1 + d2) / 3.0;
float thres = 0.1;
if(minDist / avgDepth < thres ) {
emitPosition(0, d0);
emitPosition(1, d1);
emitPosition(2, d2);
} else {
emitPosition(0, maxDepth);
emitPosition(1, maxDepth);
emitPosition(2, maxDepth);
}
}
}
Images of the output of the two programs are contained in this album: http://imgur.com/a/KUl57
The Vulkan output appears to almost be correct except for some odd artifacts in the lower left hand of the scene. My suspicion is that the scaling to the x coordinate to account for the hemisphere projection is causing the issue. I've played around with the scaling and other parts of the shader but I can't seem to get it right. Am I overlooking something else that is different between Vulkan and OpenGL, especially with regards to the coordinate system?
I have a sphere. I can map texture on it. But now my texture is outside sphere. And I need inside. My user sit like inside sphere, so he can view inside it (rotate and zoom). So simply like a sky dome, but sphere. Maybe I need fix uv texture coordinates or enable something?
Here code for generating sphere:
class Sphere : public ParametricSurface {
public:
Sphere(float radius) : m_radius(radius)
{
ParametricInterval interval = { ivec2(20, 20), vec2(Pi, TwoPi), vec2(8, 14) };
SetInterval(interval);
}
vec3 Evaluate(const vec2& domain) const
{
float u = domain.x, v = domain.y;
float x = m_radius * sin(u) * cos(v);
float y = m_radius * cos(u);
float z = m_radius * -sin(u) * sin(v);
return vec3(x, y, z);
}
private:
float m_radius;
};
vec2 ParametricSurface::ComputeDomain(float x, float y) const
{
return vec2(x * m_upperBound.x / m_slices.x, y * m_upperBound.y / m_slices.y);
}
void ParametricSurface::GenerateVertices(float * vertices) const
{
float* attribute = vertices;
for (int j = 0; j < m_divisions.y; j++) {
for (int i = 0; i < m_divisions.x; i++) {
// Compute Position
vec2 domain = ComputeDomain(i, j);
vec3 range = Evaluate(domain);
attribute = range.Write(attribute);
// Compute Normal
if (m_vertexFlags & VertexFlagsNormals) {
float s = i, t = j;
// Nudge the point if the normal is indeterminate.
if (i == 0) s += 0.01f;
if (i == m_divisions.x - 1) s -= 0.01f;
if (j == 0) t += 0.01f;
if (j == m_divisions.y - 1) t -= 0.01f;
// Compute the tangents and their cross product.
vec3 p = Evaluate(ComputeDomain(s, t));
vec3 u = Evaluate(ComputeDomain(s + 0.01f, t)) - p;
vec3 v = Evaluate(ComputeDomain(s, t + 0.01f)) - p;
vec3 normal = u.Cross(v).Normalized();
if (InvertNormal(domain))
normal = -normal;
attribute = normal.Write(attribute);
}
// Compute Texture Coordinates
if (m_vertexFlags & VertexFlagsTexCoords) {
float s = m_textureCount.x * i / m_slices.x;
float t = m_textureCount.y * j / m_slices.y;
attribute = vec2(s, t).Write(attribute);
}
}
}
}
void ParametricSurface::GenerateLineIndices(unsigned short * indices) const
{
unsigned short * index = indices;
for (int j = 0, vertex = 0; j < m_slices.y; j++) {
for (int i = 0; i < m_slices.x; i++) {
int next = (i + 1) % m_divisions.x;
*index++ = vertex + i;
*index++ = vertex + next;
*index++ = vertex + i;
*index++ = vertex + i + m_divisions.x;
}
vertex += m_divisions.x;
}
}
void ParametricSurface::GenerateTriangleIndices(unsigned short * indices) const
{
unsigned short * index = indices;
for (int j = 0, vertex = 0; j < m_slices.y; j++) {
for (int i = 0; i < m_slices.x; i++) {
int next = (i + 1) % m_divisions.x;
*index++ = vertex + i;
*index++ = vertex + next;
*index++ = vertex + i + m_divisions.x;
*index++ = vertex + next;
*index++ = vertex + next + m_divisions.x;
*index++ = vertex + i + m_divisions.x;
}
vertex += m_divisions.x;
}
}
And here is VBO creation:
+ (DrawableVBO *)createVBO:(SurfaceType)surfaceType
{
ISurface * surface = [self createSurface:surfaceType]; // just Sphere type
surface->SetVertexFlags(VertexFlagsNormals | VertexFlagsTexCoords); // which vertexes I need
// Get vertice from surface.
//
int vertexSize = surface->GetVertexSize();
int vBufSize = surface->GetVertexCount() * vertexSize;
GLfloat * vbuf = new GLfloat[vBufSize];
surface->GenerateVertices(vbuf);
// Get triangle indice from surface
//
int triangleIndexCount = surface->GetTriangleIndexCount();
unsigned short * triangleBuf = new unsigned short[triangleIndexCount];
surface->GenerateTriangleIndices(triangleBuf);
// Create the VBO for the vertice.
//
GLuint vertexBuffer;
glGenBuffers(1, &vertexBuffer);
glBindBuffer(GL_ARRAY_BUFFER, vertexBuffer);
glBufferData(GL_ARRAY_BUFFER, vBufSize * sizeof(GLfloat), vbuf, GL_STATIC_DRAW);
// Create the VBO for the triangle indice
//
GLuint triangleIndexBuffer;
glGenBuffers(1, &triangleIndexBuffer);
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, triangleIndexBuffer);
glBufferData(GL_ELEMENT_ARRAY_BUFFER, triangleIndexCount * sizeof(GLushort), triangleBuf, GL_STATIC_DRAW);
delete [] vbuf;
delete [] triangleBuf;
delete surface;
DrawableVBO * vbo = [[DrawableVBO alloc] init];
vbo.vertexBuffer = vertexBuffer;
vbo.triangleIndexBuffer = triangleIndexBuffer;
vbo.vertexSize = vertexSize;
vbo.triangleIndexCount = triangleIndexCount;
return vbo;
}
Here is my light setup:
- (void)setupLights
{
// Set up some default material parameters.
//
glUniform3f(_ambientSlot, 0.04f, 0.04f, 0.04f);
glUniform3f(_specularSlot, 0.5, 0.5, 0.5);
glUniform1f(_shininessSlot, 50);
// Initialize various state.
//
glEnableVertexAttribArray(_positionSlot);
glEnableVertexAttribArray(_normalSlot);
glUniform3f(_lightPositionSlot, 1.0, 1.0, 5.0);
glVertexAttrib3f(_diffuseSlot, 0.8, 0.8, 0.8);
}
And finally shaders:
fragment:
precision mediump float;
varying vec4 vDestinationColor;
varying vec2 vTextureCoordOut;
uniform sampler2D Sampler;
void main()
{
gl_FragColor = texture2D(Sampler, vTextureCoordOut) * vDestinationColor;
}
vertex:
uniform mat4 projection;
uniform mat4 modelView;
attribute vec4 vPosition;
attribute vec2 vTextureCoord;
uniform mat3 normalMatrix;
uniform vec3 vLightPosition;
uniform vec3 vAmbientMaterial;
uniform vec3 vSpecularMaterial;
uniform float shininess;
attribute vec3 vNormal;
attribute vec3 vDiffuseMaterial;
varying vec4 vDestinationColor;
varying vec2 vTextureCoordOut;
void main(void)
{
gl_Position = projection * modelView * vPosition;
vec3 N = normalMatrix * vNormal;
vec3 L = normalize(vLightPosition);
vec3 E = vec3(0, 0, 1);
vec3 H = normalize(L + E);
float df = max(0.0, dot(N, L));
float sf = max(0.0, dot(N, H));
sf = pow(sf, shininess);
vec3 color = vAmbientMaterial + df * vDiffuseMaterial + sf * vSpecularMaterial;
vDestinationColor = vec4(color, 1);
vTextureCoordOut = vTextureCoord;
}
Some monkey code but I fix his. Firstly we enable culling and disable front side rendering:
glEnable(GL_CULL_FACE);
glCullFace(GL_FRONT);
Then I change position of the light source:
glUniform3f(_lightPositionSlot, 1.0, 1.0, -2.5);
(I even don't need the light, so next step - I must disable it at all). But finally I have a sphere, user is inside it, can rotate it, zoom in and out and see the texture!