I am stuck with glReadPixels: it appears that no data is written in the output array.
From the manual (https://www.opengl.org/sdk/docs/man/docbook4/xhtml/glReadPixels.xml) it's written that If an error is generated, no change is made to the contents of data., but apparently no error is returned by glGetError().
I really can't understand what's wrong... Here's the code - in golang - that samples random point in the screen. It always print [0 0 0], although no pixel should be black (not even the background).
package main
import (
"fmt"
"github.com/go-gl/gl/v4.1-core/gl"
"github.com/veandco/go-sdl2/sdl"
"math/rand"
"os"
"runtime"
)
func main() {
// Random crashes, this seems to fix
runtime.LockOSThread()
var wWidth, wHeight int = 500, 500
// Initialize SDL
if err := sdl.Init(sdl.INIT_EVERYTHING); err != nil {
panic(err)
}
defer sdl.Quit()
// Initialize OpenGL
if err := gl.Init(); err != nil {
panic(err)
}
// Create the OpenGL-capable window
win, err := sdl.CreateWindow(
"glReadPixels Test",
sdl.WINDOWPOS_CENTERED,
sdl.WINDOWPOS_CENTERED,
wWidth, wHeight,
sdl.WINDOW_OPENGL)
if err != nil {
panic(err)
}
defer win.Destroy()
// Setup OpenGL context
var ctx sdl.GLContext
ctx, err = sdl.GL_CreateContext(win)
if err != nil {
panic(err)
}
defer sdl.GL_DeleteContext(ctx)
// Settings
initGL(wWidth, wHeight)
for {
for event := sdl.PollEvent(); event != nil; event = sdl.PollEvent() {
switch event.(type) {
case *sdl.QuitEvent:
os.Exit(0)
}
}
// Clear and draw
gl.Clear(gl.COLOR_BUFFER_BIT)
gl.DrawArrays(gl.TRIANGLE_STRIP, 0, 4)
rx, ry := rand.Intn(wWidth), rand.Intn(wHeight)
gl.ReadBuffer(gl.FRONT) // Not working with gl.BACK either
data := make([]byte, 3)
//data[0], data[1], data[2] = 123, 213, 132 // Test if it's overwritten
gl.ReadPixels(int32(rx), int32(ry), 1, 1, gl.RGB, gl.UNSIGNED_BYTE, gl.Ptr(&data))
fmt.Println("Read at", rx, ry, data)
// Show on window
sdl.GL_SwapWindow(win)
// Check for errors...
if gl.GetError() != gl.NO_ERROR {
fmt.Println("GL ERROR Somewhere!")
}
sdl.Delay(uint32(300))
}
}
func initGL(winW, winH int) {
gl.ClearColor(0.2, 0.2, 0.3, 1.0)
gl.Viewport(0, 0, int32(winW), int32(winH))
// Shader sources
vertShaderSrc := gl.Str(`
#version 130
in vec3 vertPos;
in vec2 vertUV;
out vec2 fragUV;
void main() {
gl_Position = vec4(vertPos, 1.0f);
fragUV = vertUV;
}
` + "\x00")
fragShaderSrc := gl.Str(`#version 130
in vec2 fragUV;
out vec3 outColor;
void main() {
if (fragUV.x < 0.5 ^^ fragUV.y < 0.5)
outColor = vec3(1.0, 0.0, 0.0);
else
outColor = vec3(0.0, 0.0, 1.0);
}
` + "\x00")
// Shaders
var vShader, fShader uint32
vShader = gl.CreateShader(gl.VERTEX_SHADER)
fShader = gl.CreateShader(gl.FRAGMENT_SHADER)
gl.ShaderSource(vShader, 1, &vertShaderSrc, nil)
gl.ShaderSource(fShader, 1, &fragShaderSrc, nil)
gl.CompileShader(vShader)
gl.CompileShader(fShader)
// Program
var progr uint32 = gl.CreateProgram()
gl.AttachShader(progr, vShader)
gl.AttachShader(progr, fShader)
gl.BindAttribLocation(progr, 0, gl.Str("vertPos\x00"))
gl.BindAttribLocation(progr, 1, gl.Str("vertUV\x00"))
gl.LinkProgram(progr)
gl.UseProgram(progr)
const N float32 = -0.5
const P float32 = 0.5
// Setup the "canvas": a simple quad
canvasData := []float32{
N, N, 0, 0.0, 0.0,
P, N, 0, 1.0, 0.0,
N, P, 0, 0.0, 1.0,
P, P, 0, 1.0, 1.0,
}
var vboID uint32
gl.GenBuffers(1, &vboID)
gl.BindBuffer(gl.ARRAY_BUFFER, vboID)
gl.BufferData(gl.ARRAY_BUFFER, len(canvasData)*4, gl.Ptr(canvasData), gl.STATIC_DRAW)
var vaoID uint32
gl.GenVertexArrays(1, &vaoID)
gl.BindVertexArray(vaoID)
gl.EnableVertexAttribArray(0)
gl.EnableVertexAttribArray(1)
gl.VertexAttribPointer(0, 3, gl.FLOAT, false, 5*4, gl.PtrOffset(0))
gl.VertexAttribPointer(1, 2, gl.FLOAT, false, 5*4, gl.PtrOffset(3*4))
}
The problem is likely
gl.ReadPixels(int32(rx), int32(ry), 1, 1, gl.RGB, gl.UNSIGNED_BYTE, gl.Ptr(&data))
Specifically
gl.Ptr(&data)
The documentation for that function is:
// Ptr takes a slice or pointer (to a singular scalar value or the first
// element of an array or slice) and returns its GL-compatible address.
You're passing a POINTER to a slice. This is... problematic if you look at the implementation.
func Ptr(data interface{}) unsafe.Pointer {
if data == nil {
return unsafe.Pointer(nil)
}
var addr unsafe.Pointer
v := reflect.ValueOf(data)
switch v.Type().Kind() {
case reflect.Ptr:
e := v.Elem()
switch e.Kind() {
case
reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64,
reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64,
reflect.Float32, reflect.Float64:
addr = unsafe.Pointer(e.UnsafeAddr())
}
case reflect.Uintptr:
addr = unsafe.Pointer(v.Pointer())
case reflect.Slice:
addr = unsafe.Pointer(v.Index(0).UnsafeAddr())
default:
panic(fmt.Sprintf("Unsupported type %s; must be a pointer, slice, or array", v.Type()))
}
return addr
}
As you can see, if you pass in a pointer to anything other than a basic uint/float/int, it will fail silently and return a null unsafe.Pointer if it's not of the correct type. So you either want to pass in gl.Ptr(&data[0]) or gl.Ptr(data). It's honestly kind of surprising this wasn't segfaulting.
Related
I'm trying to make frustrum culling via compute shader. For that I have a pair of buffers for instanced vertex attributes, and a pair of buffers for indirect draw commands. My compute shader checks if instance coordinates from first buffer are within bounding volume, referencing first draw buffer for counts, subgroupBallot and bitCount to see offset within subgroup, then add results from other subgroups and a global offset, and finally stores the result in second buffer. The global offset is stored in second indirect draw buffer.
The problem is that, when under load, frustum may be few(>1) frames late to the moving camera, with wide lines of disappeared objects on edge. It seems weird to me because culling and rendering are done within same command buffer.
When taking capture in renderdoc, taking a screenshot alt+printScreen, or pausing the render-present thread, things snap back to as they should be.
My only guess is that compute shader from past frame continues to execute even when new frame starts to be drawn, though this should not be happening due to pipeline barriers.
Shader code:
#version 460
#extension GL_KHR_shader_subgroup_ballot : require
struct drawData{
uint indexCount;
uint instanceCount;
uint firstIndex;
uint vertexOffset;
uint firstInstance;
};
struct instanceData{
float x, y, z;
float a, b, c, d;
};
layout(local_size_x = 128, local_size_y = 1, local_size_z = 1) in;
layout(set = 0, binding = 0) uniform A
{
mat4 cam;
vec4 camPos;
vec4 l;
vec4 t;
vec4 r;
vec4 b;
};
layout(set = 0, binding = 1) buffer B
{
uint count;
drawData data[];
} Draw[2];
layout(set = 0, binding = 2) buffer C
{
instanceData data[];
} Instance[2];
shared uint offsetsM[32];
void main()
{
const uint gID = gl_LocalInvocationID.x;
const uint lID = gl_SubgroupInvocationID;
const uint patchSize = gl_WorkGroupSize.x;
Draw[1].data[0] = Draw[0].data[0];//copy data like index count
Draw[1].count = Draw[0].count;
uint offsetG = 0;//accumulating offset within end buffer
uint loops = Draw[0].data[0].instanceCount/patchSize;//constant loop count
for(uint i = 0; i<loops;++i){
uint posa = i*patchSize+gID;//runs better this way for some reason
vec3 pos = camPos.xyz-vec3(Instance[0].data[posa].x, Instance[0].data[posa].y, Instance[0].data[posa].z);//position relative to camera
mat4x3 lrtb = mat4x3(l.xyz, r.xyz, t.xyz, b.xyz);
vec4 dist = pos*lrtb+Model.data[0].rad;//dot products and radius tolerance
bool Pass = posa<Draw[0].data[0].instanceCount&&//is real
(dot(pos, pos)<l.w*l.w) &&//not too far
all(greaterThan(dist, vec4(0))); //within view frustum
subgroupBarrier();//no idea what is the best, put what works
uvec4 actives = subgroupBallot(Pass);//count passed instances
if(subgroupElect())
offsetsM[gl_SubgroupID] = bitCount(actives).x+bitCount(actives).y;
barrier();
uint offsetL = bitCount(actives&gl_SubgroupLtMask).x+bitCount(actives&gl_SubgroupLtMask).y;//offset withing subgroup
uint ii = 0;
if(Pass){
for(; ii<gl_SubgroupID; ++ii)
offsetG+= offsetsM[ii];//offsets before subgroup
Instance[1].data[offsetG+offsetL] = Instance[0].data[posa];
for(; ii<gl_NumSubgroups; ++ii)
offsetG+= offsetsM[ii];}//offsets after subgroup
else for(; ii<gl_NumSubgroups; ++ii)
offsetG+= offsetsM[ii];//same but no data copying
}
if(gID == 0)
Draw[1].data[0].instanceCount = offsetG;
}
For renderpass after the compute I have dependencies:
{//1
deps[1].srcSubpass = VK_SUBPASS_EXTERNAL;
deps[1].dstSubpass = 0;
deps[1].srcStageMask = VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT;
deps[1].dstStageMask = VK_PIPELINE_STAGE_DRAW_INDIRECT_BIT;
deps[1].srcAccessMask = VK_ACCESS_SHADER_WRITE_BIT;
deps[1].dstAccessMask = VK_ACCESS_INDIRECT_COMMAND_READ_BIT;
deps[1].dependencyFlags = 0;
}
{//2
deps[2].srcSubpass = VK_SUBPASS_EXTERNAL;
deps[2].dstSubpass = 0;
deps[2].srcStageMask = VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT;
deps[2].dstStageMask = VK_PIPELINE_STAGE_VERTEX_INPUT_BIT;
deps[2].srcAccessMask = VK_ACCESS_SHADER_WRITE_BIT;
deps[2].dstAccessMask = VK_ACCESS_VERTEX_ATTRIBUTE_READ_BIT;
deps[2].dependencyFlags = 0;
}
The command buffer is(fully reused as is, one for each image in swapchain):
vkBeginCommandBuffer(cmd, &begInfo);
vkCmdBindDescriptorSets(cmd, VK_PIPELINE_BIND_POINT_COMPUTE, layoutsPipe[1],
0, 1, &descs[1], 0, 0);
vkCmdBindPipeline(cmd, VK_PIPELINE_BIND_POINT_COMPUTE, pipes[1]);
vkCmdDispatch(cmd, 1, 1, 1);
VkBufferMemoryBarrier bufMemBar[2];
{//mem bars
{//0 indirect
bufMemBar[0].srcAccessMask = VK_ACCESS_SHADER_WRITE_BIT;
bufMemBar[0].dstAccessMask = VK_ACCESS_INDIRECT_COMMAND_READ_BIT;
bufMemBar[0].buffer = bufferIndirect;
bufMemBar[0].offset = 0;
bufMemBar[0].size = -1;
}
{//1 vertex instance
bufMemBar[1].srcAccessMask = VK_ACCESS_SHADER_WRITE_BIT;
bufMemBar[1].dstAccessMask = VK_ACCESS_VERTEX_ATTRIBUTE_READ_BIT;
bufMemBar[1].buffer = bufferInstance;
bufMemBar[1].offset = 0;
bufMemBar[1].size = -1;
}
}
vkCmdPipelineBarrier(cmd, VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT,
VK_PIPELINE_STAGE_DRAW_INDIRECT_BIT, 0, 0, 0, 1, &bufMemBar[0], 0, 0);
vkCmdPipelineBarrier(cmd, VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT,
VK_PIPELINE_STAGE_VERTEX_INPUT_BIT , 0, 0, 0, 1, &bufMemBar[1], 0, 0);
VkRenderPassBeginInfo passBegInfo;
passBegInfo.renderPass = pass;
passBegInfo.framebuffer = chain.frames[i];
passBegInfo.renderArea = {{0, 0}, chain.dim};
VkClearValue clears[2]{{0},{0}};
passBegInfo.clearValueCount = 2;
passBegInfo.pClearValues = clears;
vkCmdBeginRenderPass(cmd, &passBegInfo, VK_SUBPASS_CONTENTS_INLINE);
vkCmdBindDescriptorSets(cmd, VK_PIPELINE_BIND_POINT_GRAPHICS, layoutsPipe[0], 0, 1, &descs[0], 0, 0);
vkCmdBindPipeline (cmd, VK_PIPELINE_BIND_POINT_GRAPHICS, pipes[0]);
VkBuffer buffersVertex[2]{bufferVertexProto, bufferInstance};
VkDeviceSize offsetsVertex[2]{0, 0};
vkCmdBindVertexBuffers(cmd, 0, 2, buffersVertex, offsetsVertex);
vkCmdBindIndexBuffer (cmd, bufferIndex, 0, VK_INDEX_TYPE_UINT32);
vkCmdDrawIndexedIndirectCount(cmd, bufferIndirect, 0+4,
bufferIndirect, 0,
count.maxDraws, sizeof(VkDrawIndexedIndirectCommand));
vkCmdEndRenderPass(cmd);
vkEndCommandBuffer(cmd);
Rendering and presentation are synchronised with two semaphores - imageAvailable, and renderFinished. Frustum calculation is in right order on CPU. Validation layers are enabled.
The problem was that I lacked host synchronisation. Indeed, even within same command buffer, there are no host synchronisation guarantees (and that makes sense, since it enables us to use events).
I'm using Pango to layout my text and NV Path to render glyphs.
Having difficulty in finding correct methods for getting per glyph positions. As you can see at the moment I'm calculating this values according to line and glyph indexes.
But Pango has better methods for this; like per glyph, per line, extent queries. My problem is that this methods got no documentation and I wasn't able to find any samples.
How can i get correct glyph positions from Pango for this type of application?
std::vector<uint32_t> glyphs;
std::vector<GLfloat> positions;
int lineCount = pango_layout_get_line_count( pangoLayout );
for ( int l = 0; l < lineCount; ++l )
{
PangoLayoutLine* line = pango_layout_get_line_readonly( pangoLayout, l );
GSList* runs = line->runs;
float xOffset = 0.0f;
while( runs )
{
PangoLayoutRun* run = static_cast<PangoLayoutRun*>( runs->data );
glyphs.resize( run->glyphs->num_glyphs, 0 );
positions.resize( run->glyphs->num_glyphs * 2, 0 );
for( int g = 0; g < run->glyphs->num_glyphs; ++g )
{
glyphs[g] = run->glyphs->glyphs[g].glyph;
// Need Correct Values Here
positions[ g * 2 + 0 ] = xOffset * NVPATH_DEFUALT_EMSCALE;
positions[ g * 2 + 1 ] = (float)l * NVPATH_DEFUALT_EMSCALE;
xOffset += PANGO_PIXELS( run->glyphs->glyphs[g].geometry.width ) / getFontSize();
}
const Font::RefT font = getFont( pango_font_description_get_family( pango_font_describe( run->item->analysis.font ) ) );
glEnable( GL_STENCIL_TEST );
glStencilFillPathInstancedNV( run->glyphs->num_glyphs,
GL_UNSIGNED_INT,
&glyphs[0],
font->nvPath,
GL_PATH_FILL_MODE_NV,
0xFF,
GL_TRANSLATE_2D_NV,
&positions[0]
);
glStencilFunc( GL_NOTEQUAL, 0, 0xFF );
glStencilOp( GL_KEEP, GL_KEEP, GL_ZERO );
glColor3f( 0.0, 0.0, 0.0 );
glCoverFillPathInstancedNV( run->glyphs->num_glyphs,
GL_UNSIGNED_INT,
&glyphs[0],
font->nvPath,
GL_BOUNDING_BOX_OF_BOUNDING_BOXES_NV,
GL_TRANSLATE_2D_NV,
&positions[0]
);
glDisable( GL_STENCIL_TEST );
runs = runs->next;
}
}
I've been search for a good text methodolgy recently and found one at http://www.braynzarsoft.net/Articles/index.php?p=VA&article=Easy-Font-Rendering-in-DirectX-11 , which is a good site. I can't seem to get it to run! I've resolved most of the errors but, however on debug, I get HLSL errors:
D3D11: ERROR: ID3D11DeviceContext::Draw: The Vertex Shader expects application provided input data (which is to say data other than hardware auto-generated values such as VertexID or InstanceID). Therefore an Input Assembler object is expected, but none is bound. [ EXECUTION ERROR #349: DEVICE_DRAW_INPUTLAYOUT_NOT_SET ]
Creeped? I am! Since this message is spamming like mad, the issue is most likely happening in my DrawText() function or a bad implementation in the initialize function (Below)
The InitializeGeneralResources() function:
void InfiniteText::InitializeGeneralResources(){
float textureWidth=1024.0f;
UINT numLetters=32;
D3DX11CompileFromFile(L"Font.hlsl", NULL, NULL, "FONT_VS", "vs_5_0",0,0,0,&FontvsBuffer,0,0);
D3DX11CompileFromFile(L"Font.hlsl", NULL, NULL, "FONT_PS", "ps_5_0",0,0,0,&FontpsBuffer,&ppErrorMsgs,0);
iD3D.Device->CreateVertexShader(FontvsBuffer->GetBufferPointer(),FontvsBuffer->GetBufferSize(), NULL, &Fontvs);
iD3D.Device->CreatePixelShader(FontpsBuffer->GetBufferPointer(),FontpsBuffer->GetBufferSize(), NULL, &Fontps);
ID3D11InputLayout* InLayout;
D3D11_INPUT_ELEMENT_DESC IEDesc[]={
{"POSITION",0,DXGI_FORMAT_R32G32B32_FLOAT,0,0,D3D11_INPUT_PER_VERTEX_DATA,0},
{"TEXCOORD",0,DXGI_FORMAT_R32G32_FLOAT,0,12,D3D11_INPUT_PER_VERTEX_DATA,0},
};
UINT NumElements = ARRAYSIZE(IEDesc);
iD3D.Device->CreateInputLayout(IEDesc,NumElements,FontvsBuffer->GetBufferPointer(),FontvsBuffer->GetBufferSize(),&InLayout);
iD3D.DeviceContext->IASetInputLayout(InLayout);
D3D11_SAMPLER_DESC FontSamplerDesc;
FontSamplerDesc.MaxAnisotropy=1;
FontSamplerDesc.AddressU=D3D11_TEXTURE_ADDRESS_WRAP;
FontSamplerDesc.AddressV=D3D11_TEXTURE_ADDRESS_WRAP;
FontSamplerDesc.AddressW=D3D11_TEXTURE_ADDRESS_WRAP;
FontSamplerDesc.Filter=D3D11_FILTER_MIN_MAG_MIP_LINEAR;
FontSamplerDesc.MipLODBias=0.0f;
D3DX11CreateShaderResourceViewFromFile(iD3D.Device, L"Font.dds", NULL, NULL, &FontSRV, NULL);
iD3D.Device->CreateSamplerState(&FontSamplerDesc, &FontSRVSampler);
D3D11_BUFFER_DESC Vbufferdescription;
ZeroMemory(&Vbufferdescription, sizeof(Vbufferdescription));
Vbufferdescription.BindFlags=D3D11_BIND_VERTEX_BUFFER;
Vbufferdescription.Usage=D3D11_USAGE_DYNAMIC;
Vbufferdescription.CPUAccessFlags=D3D11_CPU_ACCESS_WRITE;
Vbufferdescription.ByteWidth=sizeof(VertexText)*6*numLetters;
Vbufferdescription.MiscFlags=0;
iD3D.Device->CreateBuffer(&Vbufferdescription, NULL, &FontVertexBuffer);
}
I have a rather simple HLSL file:
cbuffer ConstantBuffer:register( b0 )
{
float4x4 WVP;
}
struct VOut
{
float4 position : SV_POSITION;
float2 TexCoord : TEXCOORD0;
};
VOut FONT_VS(float4 position : POSITION, float2 TexCoord : TEXCOORD)
{
VOut output;
output.position = mul(position, WVP);
output.TexCoord = TexCoord;
return output;
}
float2 FONT_PS(float4 position : SV_POSITION, float2 TexCoord : TEXCOORD0) : SV_TARGET
{
return TexCoord;
}
(The DrawString() function)
bool InfiniteText::DrawString(char* Text, float xPos, float yPos){
int letterSize = sizeof(VertexText)*6;
int textSize = strlen(Text);
if(textSize > numLetters)
textSize=numLetters;
float cScreenWidth = 32.0f/iD3D.cWidth;
float cScreenHeight= 32.0f/iD3D.cHeight;
float TexelWidth= 32.0f/textureWidth;
D3D11_MAPPED_SUBRESOURCE MappedSub;
iD3D.DeviceContext->Map(FontVertexBuffer, 0, D3D11_MAP_WRITE_DISCARD, 0, &MappedSub);
VertexText* Sprite = (VertexText*)MappedSub.pData;
const int indexA = static_cast<int>('A');
const int indexZ = static_cast<int>('Z');
for(int i=0; i<textSize;i++){
float thisStartX=xPos+(cScreenWidth*static_cast<char>(i));
float thisEndX=thisStartX + cScreenWidth;
float thisStartY=yPos;
float thisEndY=thisStartY + cScreenHeight;
Sprite[0].Translation=XMFLOAT3(thisEndX,thisEndY,1.0f);
Sprite[1].Translation=XMFLOAT3(thisEndX,yPos,1.0f);
Sprite[2].Translation=XMFLOAT3(thisStartX,yPos,1.0f);
Sprite[3].Translation=XMFLOAT3(thisStartX,yPos,1.0f);
Sprite[4].Translation=XMFLOAT3(thisStartX,thisEndY,1.0f);
Sprite[5].Translation=XMFLOAT3(thisEndX,thisEndY,1.0f);
UINT TexLookup=0;
UINT Letter= static_cast<int>(Text[i]);
if (Letter < indexA || Letter > indexZ){
TexLookup=(indexA - indexZ) +1;
}
else{
TexLookup=(Letter-indexA);
}
float texStart = 0.0f + (TexelWidth*static_cast<float>(TexLookup));
float TexEnd = texStart + TexelWidth;
Sprite[0].TextureCoord = XMFLOAT2(TexEnd,0.0f);
Sprite[1].TextureCoord = XMFLOAT2(TexEnd,1.0f);
Sprite[2].TextureCoord = XMFLOAT2(texStart,1.0f);
Sprite[3].TextureCoord = XMFLOAT2(texStart,1.0f);
Sprite[4].TextureCoord = XMFLOAT2(texStart,0.0f);
Sprite[5].TextureCoord = XMFLOAT2(TexEnd,0.0f);
Sprite= Sprite + 6;
}
iD3D.DeviceContext->Unmap(FontVertexBuffer,0); //MAP END
UINT stride=sizeof(VertexText);
UINT offset=0;
iD3D.DeviceContext->VSSetShader(iText.Fontvs,0,0);
iD3D.DeviceContext->PSSetShader(iText.Fontps,0,0);
//Projection=XMMatrixPerspectiveFovLH(XM_PIDIV2, 1.0, 0.0f, 1000.0f);
Projection=iD3D.mProjection;
iD3D.WorldCB.mWorldVP=XMMatrixTranspose(Projection);
iD3D.DeviceContext->UpdateSubresource(iD3D.MatrixBuffer, 0, NULL, &iD3D.WorldCB, 0, 0);
iD3D.DeviceContext->VSSetConstantBuffers(0,1,&iD3D.MatrixBuffer);
iD3D.DeviceContext->IASetVertexBuffers(0,1, &iText.FontVertexBuffer, &stride, &offset);
iD3D.DeviceContext->PSSetShaderResources(0,1, &iText.FontSRV);
iD3D.DeviceContext->PSSetSamplers(0,1,&iText.FontSRVSampler);
iD3D.DeviceContext->IASetPrimitiveTopology( D3D_PRIMITIVE_TOPOLOGY_TRIANGLELIST );
iD3D.DeviceContext->Draw(6*textSize,0);
return true;
}
If you made it this far, thank you. I think that it might be that my .hlsl file might not be configured properly to receive textures, and I might need to know how textures interface properly with the shader to produce the output. Thanks!
The HLSL was not written correctly to include Texture2D x: register( t0 ) or SamplerState x : register( s0 ), taking in necessary resources for the texture. Another problem has surfaced, but this question has been solved.
I seem to have ran into a strange issue with OpenGL. Everything works fine with my class until I make the map too big (around 800x800 is the max), and then OpenGL doesn't draw anything. I have made calls to glGetBufferSubData, and as far as I could tell the data seemed correct in both the vertex and index buffers, yet nothing is being drawn? At first I assumed an overflow somewhere in my code, but according to std::numeric_limits my vertex and index iterators don't seem to come anywhere close to the max size of a (signed) int. I use a lot of wrapper classes around OpenGL objects, but they are very simple, usually inline calls to their OpenGL equivalent. Same for the "M_" typedefs around primitive types. Below are the main loop I render in, the class where I believe the issue lies, and 2 screenshots of the output.
Correct output: http://i.imgur.com/cvC1T7L.png
Blank ouput, after expanding map: http://i.imgur.com/MmmNgj4.png
Main loop:
int main(){
//open window
Memento::MainWindow& main_window = Memento::MainWindow::GetInstance();
Memento::MainWindow::Init();
main_window.SetTitle("Memento");
main_window.Open();
//matrices
glmx_mat4 ortho_matrix = {};
glmx_mat4_ortho(0.0f, 800.0f, 600.0f, 0.0f, 5.0f, 25.0f, ortho_matrix);
glmx_mat4 modelview_matrix = {};
glmx_mat4_identity(modelview_matrix);
glmx_vec3 translate_vec = {0.0f, 0.0f, -10.0f};
glmx_mat4_translate(modelview_matrix, translate_vec, modelview_matrix);
glmx_mat4_multiply(ortho_matrix, modelview_matrix, ortho_matrix);
//shaders
Memento::GLShader default_vert_shader("default.vert", GL_VERTEX_SHADER);
default_vert_shader.Compile();
Memento::GLShader default_frag_shader("default.frag", GL_FRAGMENT_SHADER);
default_frag_shader.Compile();
//program
Memento::GLProgram default_program;
default_program.Create();
default_program.AttachShader(default_vert_shader);
default_program.AttachShader(default_frag_shader);
Memento::GLVertexArray default_vert_array;
default_vert_array.Create();
default_vert_array.Bind();
//BufferGameMap class- where I believe the issue lies
Memento::TextureAtlas atlas1("atlas/cat_image.png", "atlas/cat_source.xml");
Memento::BufferGameMap map1("tryagain.tmx", atlas1);
//bind buffers
map1.GetVertexBuffer().Bind();
map1.GetIndexBuffer().Bind();
//upload vertex attributes
default_vert_array.EnableIndex(0);
default_vert_array.IndexData(0, 2, GL_FLOAT, NULL, 8 * sizeof(Memento::M_float));
default_vert_array.BindIndex(default_program, 0, "map_vert");
//link, validate, and use program
default_program.Link();
default_program.Validate();
default_program.Use();
//upload matrix as uniform
glUniformMatrix4fv(default_program.GetUniformLocation("modelviewprojection_matrix"),
1, GL_FALSE, ortho_matrix);
//main draw loop
while(not glfwGetKey(GLFW_KEY_ESC)){
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
glDrawElements(GL_TRIANGLES, map1.GetIndexBufferLength(), GL_UNSIGNED_INT, NULL);
glfwSwapBuffers();
}
//close window & exit
main_window.Close();
return (0);
}
BufferGameMap class (issue is probably here!):
Memento::BufferGameMap::BufferGameMap(std::string const& file, const Memento::TextureAtlas& atlas):
TmxMap::GameMap(), background_color_color4(), vertex_buffer(), index_buffer(),
vertex_buffer_len(0), index_buffer_len(0){
Create(file, atlas);
}
Memento::M_void Memento::BufferGameMap::Create(std::string const& file, const Memento::TextureAtlas& atlas){
if(IsCreated())Destroy();
TmxMap::GameMap::CreateFromFile(file);
std::vector<TmxMap::Layer> const& layers = GetLayers();
if(not layers.empty()){
const std::vector<TmxMap::Layer>::const_iterator layers_end = layers.end();
std::vector<TmxMap::Layer>::const_iterator layers_iter = layers.begin();
Memento::M_float* vertex_buffer_data = NULL;
Memento::M_uint* index_buffer_data = NULL;
for(; layers_iter != layers_end; ++layers_iter){
vertex_buffer_len += layers_iter -> GetMapTiles().size() * (4 * (2 +
2 + 2 + 2));
index_buffer_len += layers_iter -> GetMapTiles().size() * 6;
}
vertex_buffer_data = new Memento::M_float[vertex_buffer_len];
index_buffer_data = new Memento::M_uint[index_buffer_len];
//fill data to send to the gl
Memento::M_sizei vertex_buffer_iter = 0, index_buffer_iter = 0, index_buffer_quad_iter = 0;
//map data
const Memento::M_uint map_size_x = GetMapSize().x, map_size_y = GetMapSize().y;
const Memento::M_float map_tile_size_x = GetTileSize().x, map_tile_size_y = GetTileSize().y;
//per layer data
std::vector<TmxMap::MapTile> const* map_tiles = NULL;
std::vector<TmxMap::MapTile>::const_iterator map_tiles_iter, map_tiles_end;
//per tile data
Memento::M_float map_origin_x = 0.0f, map_origin_y = 0.0f;
for(layers_iter = layers.begin(); layers_iter != layers_end; ++layers_iter){
map_tiles = &layers_iter -> GetMapTiles();
for(map_tiles_iter = map_tiles -> begin(), map_tiles_end = map_tiles -> end();
map_tiles_iter != map_tiles_end; ++map_tiles_iter,
vertex_buffer_iter += 4 * (2 + 2 + 2 +
2), index_buffer_iter += 6,
index_buffer_quad_iter += 4){
map_origin_x = static_cast<Memento::M_float>(map_tiles_iter -> map_tile_index /
map_size_y) * map_tile_size_x;
map_origin_y = static_cast<Memento::M_float>(map_tiles_iter -> map_tile_index %
map_size_y) * map_tile_size_y;
vertex_buffer_data[vertex_buffer_iter] = map_origin_x;
vertex_buffer_data[vertex_buffer_iter + 1] = map_origin_y;
//=========================================================
vertex_buffer_data[vertex_buffer_iter + 8] = map_origin_x;
vertex_buffer_data[vertex_buffer_iter + 9] = map_origin_y + map_tile_size_y;
//=========================================================
vertex_buffer_data[vertex_buffer_iter + 16] = map_origin_x + map_tile_size_x;
vertex_buffer_data[vertex_buffer_iter + 17] = map_origin_y + map_tile_size_y;
//=========================================================
vertex_buffer_data[vertex_buffer_iter + 24] = map_origin_x + map_tile_size_x;
vertex_buffer_data[vertex_buffer_iter + 25] = map_origin_y;
//=========================================================
index_buffer_data[index_buffer_iter] = index_buffer_quad_iter;
index_buffer_data[index_buffer_iter + 1] = index_buffer_quad_iter + 1;
index_buffer_data[index_buffer_iter + 2] = index_buffer_quad_iter + 2;
index_buffer_data[index_buffer_iter + 3] = index_buffer_quad_iter;
index_buffer_data[index_buffer_iter + 4] = index_buffer_quad_iter + 2;
index_buffer_data[index_buffer_iter + 5] = index_buffer_quad_iter + 3;
}
}
vertex_buffer.Create(GL_ARRAY_BUFFER, GL_STATIC_DRAW);
vertex_buffer.Bind();
vertex_buffer.AllocateRef(vertex_buffer_len * sizeof(Memento::M_float),
static_cast<const Memento::M_void*>(vertex_buffer_data));
vertex_buffer.Unbind();
index_buffer.Create(GL_ELEMENT_ARRAY_BUFFER, GL_STATIC_DRAW);
index_buffer.Bind();
index_buffer.AllocateRef(index_buffer_len * sizeof(Memento::M_uint),
static_cast<const Memento::M_void*>(index_buffer_data));
index_buffer.Unbind();
delete[] vertex_buffer_data;
delete[] index_buffer_data;
}
}
Vertex shader:
#version 140
precision highp float;
uniform mat4 modelviewprojection_matrix;
in vec2 map_vert;
void main(){
gl_Position = modelviewprojection_matrix * vec4(map_vert, 0, 1);
}
Fragment shader:
#version 140
precision highp float;
out vec4 frag_color;
void main(){
frag_color = vec4(0.4, 0.2, 0.6, 0.5);
}
I think you are running out of stack memory.
By allocating the data on the heap you can use all the memory available to your process, while the stack is limited to 1MB.
In other words: Move the object allocation outside of the main scope to the global scope.
Memento::TextureAtlas * atlas1;//("atlas/cat_image.png", "atlas/cat_source.xml");
Memento::BufferGameMap * map1;//("tryagain.tmx", atlas1);
int main(){
atlas1 = new Memento::TextureAtlas("atlas/cat_image.png", "atlas/cat_source.xml");
map1 = new Memento::BufferGameMap("tryagain.tmx", atlas1);
//.... acess with ->
}
or if this will not cause compiler errors:
Memento::TextureAtlas atlas1("atlas/cat_image.png", "atlas/cat_source.xml");
Memento::BufferGameMap map1("tryagain.tmx", atlas1);
int main(){
//.... acess with .
}
I'm using Fltk to render openGL graphs. currently I'm debugging a global array which is sorted by a heapsort function. My purpose is to see after each swap of elements in the heapsort function a graphical swap of elements. but I don't want to catch an event from FLTK event_handle for every time i need to redraw after I swapped and waiting at breakpoint. (the heapsort function and the opengl render part are running in 2 different threads (if that doesn't has to go without saying)).
So the first try I had was to use:
Fl::add_timeout(1.0, MyRedrawCallback, (void *)&myWindow);
Fl::run();
void MyRedrawCallback(void *myWindow)
{
MyWindow *pMyWindow;
pMyWindow = (MyWindow *) myWindow;
pMyWindow->redraw();
Fl::repeat_timeout(1.0, MyRedrawCallback, (void *)&pMyWindow);
}
But every Time the callback is called the 2nd time i get an "Access violation reading"
I'm suggesting that FL::run starts a different thread so maybe the first time is still in same thread so the address of redraw is still usable but after that I'm in a different thread and the function at address is not that what I'm expecting?!
But I already took a different way because I wasn't sure i cant even use the timeout on this way.
So i was looking for a way to get an event that's eq to "set amount of time passed" or "nothing is happening for..." but there isn't such a handle I'm right?
Finally is there a way to let FLTK execute commands even outside the eventloop? or is there another way to solve my problem?
Please take a look at the following example, taken from here: http://seriss.com/people/erco/fltk/#OpenGlInterp
#include <FL/Fl.H>
#include <FL/Fl_Gl_Window.H>
#include <FL/gl.h>
#include <math.h>
//
// Demonstrate interpolating shapes
// erco 06/10/05
//
class Playback : public Fl_Gl_Window {
int frame;
// Linear interpolation between two values based on 'frac' (0.0=a, 1.0=b)
float Linterp(float frac, float a, float b) {
return( a + ( frac * (b - a) ));
}
// Sinusoidal easein/easeout interpolation between two values based on 'frac' (0.0=a, 1.0=b)
float SinInterp(float frac, float a, float b) {
float pi = 3.14159;
frac = (sin(pi/2 + frac*pi ) + 1.0 ) / 2.0; // 0 ~ 1 -> 0 ~ 1
return(Linterp(frac,a,b));
}
// DRAW SIMPLE SHAPE INTERPOLATION
// Interpolation is based on the current frame number
//
void DrawShape(int frame) {
// Calculate a fraction that represents the frame# being shown
float frac = ( frame % 48 ) / 48.0 * 2;
if ( frac > 1.0 ) frac = 2.0-frac; // saw tooth wave: "/\/\/\"
static float a_xy[9][2] = {
{ -.5, -1. }, { 0.0, -.5 }, { -.5, -1. }, { 0.0, -.5 },
{ 0.0, 0.0 },
{ 0.0, -.5 }, { +.5, -1. }, { 0.0, -.5 }, { +.5, -1. },
};
static float b_xy[9][2] = {
{ -.25, -1. }, { -.50, -.75 }, { -.75, -1.0 }, { -.50, -.75 },
{ 0.0, 0.0 },
{ +.50, -.75 }, { +.75, -1.0 }, { +.50, -.75 }, { +.25, -1.0 }
};
// Linterp a and b to form new shape c
float c_xy[9][2];
for ( int i=0; i<9; i++ )
for ( int xy=0; xy<2; xy++ )
c_xy[i][xy] = SinInterp(frac, a_xy[i][xy], b_xy[i][xy]);
// Draw shape
glColor3f(1.0, 1.0, 1.0);
glBegin(GL_LINE_STRIP);
for ( int i=0; i<9; i++ )
glVertex2f(c_xy[i][0], c_xy[i][1]);
glEnd();
}
// DRAW THE WIDGET
// Each time we're called, assume
//
void draw() {
if (!valid()) {
valid(1);
glLoadIdentity();
glViewport(0,0,w(),h());
}
glClear(GL_COLOR_BUFFER_BIT);
// Draw shape 4x, rotated at 90 degree positions
glPushMatrix();
DrawShape(frame); glRotatef(90.0, 0, 0, 1);
DrawShape(frame); glRotatef(90.0, 0, 0, 1);
DrawShape(frame); glRotatef(90.0, 0, 0, 1);
DrawShape(frame);
glPopMatrix();
// Advance frame counter
++frame;
}
// 24 FPS TIMER CALLBACK
// Called 24x per second to redraw the widget
//
static void Timer_CB(void *userdata) {
Playback *pb = (Playback*)userdata;
pb->redraw();
Fl::repeat_timeout(1.0/24.0, Timer_CB, userdata);
}
public:
// Constructor
Playback(int X,int Y,int W,int H,const char*L=0) : Fl_Gl_Window(X,Y,W,H,L) {
frame = 0;
Fl::add_timeout(1.0/24.0, Timer_CB, (void*)this); // 24fps timer
end();
}
};
int main() {
Fl_Window win(500, 500);
Playback playback(10, 10, win.w()-20, win.h()-20);
win.resizable(&playback);
win.show();
return(Fl::run());
}
This example more/less does exactly what you want. Greg Ercolano has more FLTK examples on his web-site. I recommend taking a look at http://seriss.com/people/erco/fltk/ .