I'm trying to draw a quad with a texture onto the screen such that texels and pixels perfectly align. Sounds pretty easy. I draw 2 triangles (as TRIANGLE_LIST, so 6 vertices) using these shaders:
struct VSOutput
{
float4 position : SV_POSITION;
float2 uv : TEXCOORD0;
};
VSOutput VS_Draw(uint index : SV_VertexId)
{
uint vertexIndex = index % 6;
// compute face in [0,0]-[1,1] space
float2 vertex = 0;
switch (vertexIndex)
{
case 0: vertex = float2(0, 0); break;
case 1: vertex = float2(1, 0); break;
case 2: vertex = float2(0, 1); break;
case 3: vertex = float2(0, 1); break;
case 4: vertex = float2(1, 0); break;
case 5: vertex = float2(1, 1); break;
}
// compute uv
float2 uv = vertex;
// scale to size
vertex = vertex * (float2)outputSize;
vertex = vertex + topLeftPos;
// convert to screen space
VSOutput output;
output.position = float4(vertex / (float2)outputSize * float2(2.0f, -2.0f) + float2(-1.0f, 1.0f), 0, 1);
output.uv = uv;
return output;
}
float4 PS_Draw(VSOutput input) : SV_TARGET
{
uint2 pixelPos = (uint2)(input.uv * (float2)outputSize);
// output checker of 4x4
return (((pixelPos.x >> 2) & 1) ^ ((pixelPos.y >> 2) & 1) != 0) ? float4(0, 1, 1, 0) : float4(1, 1, 0, 0);
}
where outputSize and topLeftPos are constants and expressed in pixel units.
Now for outputSize = (102,12) and topLeftPos=(0,0) I get (what I would expect):
link to image (as i'm not allowed to post images)
But for outputSize = (102,12) and topLeftPos=(0,0.5) I get: Output for x=0, y=0.5
link to image (as i'm not allowed to post images)
As you can see there is a uv-discontinuity where the 2 triangles connect and interpolation of uv is inaccurate). This basically happens (in x and y) only at positions around the .5 (actually below .49 it correctly snaps to texel 0 and above .51 it snaps correctly to texel 1, but in between i get this artifact).
Now for the purpose I need this for it is essential to have pixel perfect mapping. Can anyone enlighten me why this happens ?
There are a two things you need consider to understand what is happening:
Pixel corners in window space have integer coordinates and pixel centers in windows space have half-integer coordinates.
When triangle is rasterized, D3D11 interpolates all attributes to pixel centers
So what is happening is that when topLeftPos=(0,0), the value of input.uv * (float2)outputSize is always half-integer, and it is consistently rounded down to closest integer. However, when topLeftPos=(0,0.5), the (input.uv * (float2)outputSize).y should always be exactly integer. However, due to unpredictable floating-point precision issues, it is sometimes little less than exact integer, and in this case it is rounded down too much. This is where you see your stretched squares.
So if you want perfect mapping, your source square should be aligned with the pixel boundaries and not pixel centers.
Related
I have a texture problem with the cubemap I'm rendering and can't seem to figure it out. I've generated a cube map with direct x's texture tools and then read it using
D3DX11CreateShaderResourceViewFromFile(device, L"cubemap.dds", 0, 0, &fullcubemap, 0);
The cubemap texture is not high quality at all and it looks really stretched/distorted. I can definitely tell that the images used for the cubemap match correctly, but it's not great at all at the moment
I'm not sure why this is happening. Is it because my textures are too large/small or is it something else? If it's due to the size of the textures, what is a recommended texture size? I am using a sphere for the cubemap not a cube.
Edit:
Shader:
cbuffer SkyboxConstantBuffer {
float4x4 world;
float4x4 view;
float4x4 projection;
};
TextureCube gCubeMap;
SamplerState samTriLinearSam {
Filter = MIN_MAG_MIP_LINEAR;
AddressU = Wrap;
AddressV = Wrap;
};
struct VertexIn {
float4 position : POSITION;
};
struct VertexOut {
float4 position : SV_POSITION;
float4 spherePosition : POSITION;
};
VertexOut VS(VertexIn vin) {
VertexOut vout = (VertexOut)0;
vin.position.w = 1.0f;
vout.position = mul(vin.position, world);
vout.position = mul(vout.position, view);
vout.position = mul(vout.position, projection);
vout.spherePosition = vin.position;
return vout;
}
float4 PS(VertexOut pin) : SV_Target {
return gCubeMap.Sample(samTriLinearSam, pin.spherePosition);//float4(1.0, 0.5, 0.5, 1.0);
}
RasterizerState NoCull {
CullMode = None;
};
DepthStencilState LessEqualDSS {
DepthFunc = LESS_EQUAL;
};
technique11 SkyTech {
pass p0 {
SetVertexShader(CompileShader(vs_4_0, VS()));
SetGeometryShader(NULL);
SetPixelShader(CompileShader(ps_4_0, PS()));
SetRasterizerState(NoCull);
SetDepthStencilState(LessEqualDSS, 0);
}
}
Draw:
immediateContext->OMSetRenderTargets(1, &renderTarget, nullptr);
XMMATRIX sworld, sview, sprojection;
SkyboxConstantBuffer scb;
sview = XMLoadFloat4x4(&_view);
sprojection = XMLoadFloat4x4(&_projection);
sworld = XMLoadFloat4x4(&_world);
scb.world = sworld;
scb.view = sview;
scb.projection = sprojection;
immediateContext->IASetIndexBuffer(cubeMapSphere->getIndexBuffer(), DXGI_FORMAT_R32_UINT, 0);
ID3D11Buffer* vertexBuffer = cubeMapSphere->getVertexBuffer();
//ID3DX11EffectShaderResourceVariable * cMap;
////cMap = skyboxShader->GetVariableByName("gCubeMap")->AsShaderResource();
immediateContext->PSSetShaderResources(0, 1, &fullcubemap);//textures
//cMap->SetResource(fullcubemap);
immediateContext->IASetVertexBuffers(0, 1, &vertexBuffer, &stride, &offset);
immediateContext->VSSetShader(skyboxVertexShader, nullptr, 0);
immediateContext->VSSetConstantBuffers(0, 1, &skyboxConstantBuffer);
immediateContext->PSSetConstantBuffers(0, 1, &skyboxConstantBuffer);
immediateContext->PSSetShader(skyboxPixelShader, nullptr, 0);
immediateContext->UpdateSubresource(skyboxConstantBuffer, 0, nullptr, &scb, 0, 0);
immediateContext->DrawIndexed(cubeMapSphere->getIndexBufferSize(), 0, 0);
Initially I was planning to use this snippet to update the TextureCube variable in the shader
ID3DX11EffectShaderResourceVariable * cMap;
cMap = skyboxShader->GetVariableByName("gCubeMap")->AsShaderResource();
cMap->SetResource(fullcubemap);
But it seems that has no effect, and in fact, without the following line, the sphere I'm using for the cubemap textures with a texture used with another object in the scene, so perhaps there's something going on here? I'm not sure what though.
immediateContext->PSSetShaderResources(0, 1, &fullcubemap);//textures
Edit: Probably not the above, realised that if this wasn't updated, the old texture would be applied as it's never wiped after each draw.
Edit: Tried the cubemap with both a sphere and a cube, still the same texture issue.
Edit: Tried loading the shader resource view differently
D3DX11_IMAGE_LOAD_INFO loadSMInfo;
loadSMInfo.MiscFlags = D3D11_RESOURCE_MISC_TEXTURECUBE;
ID3D11Texture2D* SMTexture = 0;
hr = D3DX11CreateTextureFromFile(device, L"cubemap.dds",
&loadSMInfo, 0, (ID3D11Resource**)&SMTexture, 0);
D3D11_TEXTURE2D_DESC SMTextureDesc;
SMTexture->GetDesc(&SMTextureDesc);
D3D11_SHADER_RESOURCE_VIEW_DESC SMViewDesc;
SMViewDesc.Format = SMTextureDesc.Format;
SMViewDesc.ViewDimension = D3D11_SRV_DIMENSION_TEXTURECUBE;
SMViewDesc.TextureCube.MipLevels = SMTextureDesc.MipLevels;
SMViewDesc.TextureCube.MostDetailedMip = 0;
hr = device->CreateShaderResourceView(SMTexture, &SMViewDesc, &fullcubemap);
Still produces the same output, any ideas?
Edit: Tried increasing the zfar distance and the texture remains the exact same no matter what value I put.
Example with second texture with increased view distance.
This texture is used on another object in my scene and comes out fine.
Edit: I have been trying to mess with the scaling of the texture/object
To achieve this I used
vin.position = vin.position * 50.0f;
This is beginning to look sort of like how it should, however, when I turn my camera angle, the image disappears so I obviously know this isn't correct, but if I could just scale the image per pixel or per vertex properly, I'm sure I could get the end result.
Edit:
I can confirm the cubemap is rendering correctly, I was ignoring the view/projection space and just using world and managed to get this, which is the high quality image i'm after, just not correct. Yes the faces are incorrect, but I'm not fussed about that now, it's easy enough to swap them around, I just need to get it rendering with this quality, in the correct space.
When in camera space does it take into account whether or not it's the outside/inside of the sphere? If my textures were over the outside of the sphere and I have the view from the inside, it's not going to look the same?
Issue is with your texture size, its small, you are applying it on larger surface, Make larger textures with more pixels
Its confirm that zfar and scaling has nothing to do with it.
Finally found the issue, silly mistake.
scb.world = XMMatrixTranspose(sworld);
scb.view = XMMatrixTranspose(sview);
scb.projection = XMMatrixTranspose(sprojection);
I am after smooth texture based outline effect in OpenGL. So far I tried mostly all kinds of edge detection algorithms which result mostly in crude and jagged outlines. Then I read about Distance Field. I found an example which does pretty nice distance field. Here is the GLSL code:
#version 420
layout(binding=0) uniform sampler2D colorMap;
flat in vec4 diffuseOut;
in vec2 uvsOut;
out vec4 outputColor;
const float ALPHA_THRESHOLD = 0.9;
const float NUM_SPOKES = 36.0; // Number of radiating lines to check in.
const float ANGULAR_STEP =360.0 / NUM_SPOKES;
const int ZERO_VALUE =128; // Color channel containing 0 => -128, 128 => 0, 255 => +127
int in_StepSize=15; // Distance to check each time (larger steps will be faster, but less accurate).
int in_MaxDistance=30; // Maximum distance to search out to. Cannot be more than 127!
vec4 distField(){
vec2 pixel_size = 1.0 / vec2(textureSize(colorMap, 0));
vec2 screenTexCoords = gl_FragCoord.xy * pixel_size;
int distance;
if(texture(colorMap, screenTexCoords).a == 0.0)
{
// Texel is transparent, search for nearest opaque.
distance = ZERO_VALUE + 1;
for(int i = in_StepSize; i < in_MaxDistance; i += in_StepSize)
{
if(find_alpha_at_distance(screenTexCoords, float(i) * pixel_size, 1.0))
{
i = in_MaxDistance + 1; // BREAK!
}
else
{
distance = ZERO_VALUE + 1 + i;
}
}
}
else
{
// Texel is opaque, search for nearest transparent.
distance = ZERO_VALUE;
for(int i = in_StepSize; i <= in_MaxDistance; i += in_StepSize)
{
if(find_alpha_at_distance(screenTexCoords, float(i) * pixel_size, 0.0))
{
i = in_MaxDistance + 1; // BREAK!
}
else
{
distance = ZERO_VALUE - i;
}
}
}
return vec4(vec3(float(distance) / 255.0) * diffuseOut.rgb, 1.0 - texture(colorMap, screenTexCoords).a);
}
void main()
{
outputColor= distField();
}
The result of this shader covers the whole screen using the diffuse color for filling the screen area outside the Distance Field outline.Here is how it looks like :
What I need is to leave all the area which has the solid red fill outside the distance field as transparent.
I came to the solution by using Distance Field gray scale 8 bit alpha map.Stefan Gustavson
describes in detail how to do it.Basically one needs to generate the distance field version of the original texture.Then this texture is rendered with the primitive normally in the first pass into an FBO.In the second pass the alpha blending mode should be on.The texture from the first pass in used with the screen quad.At this stage the the fragment shader samples the alpha from that texture.This results in both smooth edges and alpha transparency around the edges.
Here is the result:
Based on the screenshot I'm assuming you're rendering a fullscreen quad? If that's the case Tim just provided the answer, try:
glEnable( GL_BLEND );
glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA);
Before you render the quad. Obviously if you're going to render non-transparent stuff too, I advise you to render those first so you won't get depth buffer problems. When you're done drawing the transparent stuff, call:
glDisable( GL_BLEND );
To turn alphablending off again.
OpenGL can colour a rectangle with a gradient of colours from 1 side to the other. I'm using the following code for that in C++
glBegin(GL_QUADS);
{
glColor3d(simulationSettings->hotColour.redF(), simulationSettings->hotColour.greenF(), simulationSettings->hotColour.blueF());
glVertex2d(keyPosX - keyWidth/2, keyPosY + keyHight/2);
glColor3d(simulationSettings->coldColour.redF(), simulationSettings->coldColour.greenF(), simulationSettings->coldColour.blueF());
glVertex2d(keyPosX - keyWidth/2, keyPosY - keyHight/2);
glColor3d(simulationSettings->coldColour.redF(), simulationSettings->coldColour.greenF(), simulationSettings->coldColour.blueF());
glVertex2d(keyPosX + keyWidth/2, keyPosY - keyHight/2);
glColor3d(simulationSettings->hotColour.redF(), simulationSettings->hotColour.greenF(), simulationSettings->hotColour.blueF());
glVertex2d(keyPosX + keyWidth/2, keyPosY + keyHight/2);
}
I'm using some Qt libraries to do the conversions between HSV and RGB. As you can see from the code, I'm drawing a rectangle with colour gradient from what I call hotColour to coldColour.
Why am I doing this? The program I made draws 3D Vectors in space and indicates their length by their colour. The user is offered to choose the hot (high value) and cold (low value) colours, and the program will automatically do the gradient using HSV scaling.
Why HSV scaling? because HSV is single valued along the colour map I'm using, and creating gradients with it linearly is a very easy task. For the user to select the colours, I offer him a QColourDialog colour map
http://qt-project.org/doc/qt-4.8/qcolordialog.html
On this colour map, you can see that red is available on the right and left side, making it impossible to have a linear scale for this colour-map with RGB. But with HSV, the linear scale is very easily achievable, where I just have to use a linear scale between 0 and 360 for Hue values.
With this paradigm, we can see that hot and cold colours define the direction of the gradient, so for example, if I choose hue to be 0 for cold and 359 for hot, HSV will give me a gradient between 0 and 359, and will include the whole spectrum of colours in the gradient; whilst, in OpenGL, it will basically go from red to red, which is no gradient!!!!!!
How can I force OpenGL to use an HSV gradient rather than RGB? The only idea that occurs to me is slicing the rectangle I wanna colour and do many gradients over smaller rectangles, but I think this isn't the most efficient way to do it.
Any ideas?
How can I force OpenGL to use an HSV gradient rather than RGB?
I wouldn't call it "forcing", but "teaching". The default way of OpenGL to interpolate vertex attributes vectors is by barycentric interpolation of the single vector elements based on the NDC coordinates of the fragment.
You must tell OpenGL how to turn those barycentric interpolated HSV values into RGB.
For this we introduce a fragment shader that assumes the color vertex attribute not being RGB but HSV.
#version 120
varying vec3 vertex_hsv; /* set this in appropriate vertex shader to the vertex attribute data*/
vec3 hsv2rgb(vec3 hsv)
{
float h = hsv.x * 6.; /* H in 0°=0 ... 1=360° */
float s = hsv.y;
float v = hsv.z;
float c = v * s;
vec2 cx = vec2(v*s, c * ( 1 - abs(mod(h, 2.)-1.) ));
vec3 rgb = vec3(0., 0., 0.);
if( h < 1. ) {
rgb.rg = cx;
} else if( h < 2. ) {
rgb.gr = cx;
} else if( h < 3. ) {
rgb.gb = cx;
} else if( h < 4. ) {
rgb.bg = cx;
} else if( h < 5. ) {
rgb.br = cx;
} else {
rgb.rb = cx;
}
return rgb + vec3(v-cx.y);
}
void main()
{
gl_FragColor = hsv2rgb(vertex_hsv);
}
You can do this with a fragment shader. You draw a quad and apply your fragment shader which does the coloring you want to the quad. The way I would do this is to set the colors of the corners to the HSV values that you want, then in the fragment shader convert the interpolated color values from HSV back to RGB. For more information on fragment shaders see the docs.
Okay first up I am using:
DirectX 10
C++
Okay this is a bit of a bizarre one to me, I wouldn't usually ask the question, but I've been forced by circumstance. I have two triangles (not a quad for reasons I wont go into!) full screen, aligned to screen through the fact they are not transformed.
In the DirectX vertex declaration I am passing a 3 component float (Pos x,y,z) and 2 component float (Texcoord x,y). Texcoord z is reserved for texture2d arrays, which I'm currently defaulting to 0 in the the pixel shader.
I wrote this to achieve the simple task:
float fStartX = -1.0f;
float fEndX = 1.0f;
float fStartY = 1.0f;
float fEndY = -1.0f;
float fStartU = 0.0f;
float fEndU = 1.0f;
float fStartV = 0.0f;
float fEndV = 1.0f;
vmvUIVerts.push_back(CreateVertex(fStartX, fStartY, 0, fStartU, fStartV));
vmvUIVerts.push_back(CreateVertex(fEndX, fStartY, 0, fEndU, fStartV));
vmvUIVerts.push_back(CreateVertex(fEndX, fEndY, 0, fEndU, fEndV));
vmvUIVerts.push_back(CreateVertex(fStartX, fStartY, 0, fStartU, fStartV));
vmvUIVerts.push_back(CreateVertex(fEndX, fEndY, 0, fEndU, fEndV));
vmvUIVerts.push_back(CreateVertex(fStartX, fEndY, 0, fStartU, fEndV));
IA Layout: (Update)
D3D10_INPUT_ELEMENT_DESC ieDesc[2] = {
{ "POSITION", 0, DXGI_FORMAT_R32G32B32_FLOAT, 0, 0, D3D10_INPUT_PER_VERTEX_DATA, 0 },
{ "TEXCOORD", 0, DXGI_FORMAT_R32G32B32_FLOAT, 0,12, D3D10_INPUT_PER_VERTEX_DATA, 0 }
};
Data reaches the vertex shader in the following format: (Update)
struct VS_INPUT
{
float3 fPos :POSITION;
float3 fTexcoord :TEXCOORD0;
}
Within my Vertex and Pixel shader not a lot is happening for this particular draw call, the pixel shader does most of the work sampling from a texture using the specified UV coordinates. However, this isn't working quite as expected, it appears that I am getting only 1 pixel of the sampled texture.
The workaround was in the pixel shader to do the following: (Update)
sampler s0 : register(s0);
Texture2DArray<float4> meshTex : register(t0);
float4 psMain(in VS_OUTPUT vOut) : SV_TARGET
{
float4 Color;
vOut.fTexcoord.z = 0;
vOut.fTexcoord.x = vOut.fPosObj.x * 0.5f;
vOut.fTexcoord.y = vOut.fPosObj.y * 0.5f;
vOut.fTexcoord.x += 0.5f;
vOut.fTexcoord.y += 0.5f;
Color = quadTex.Sample(s0, vOut.fTexcoord);
Color.a = 1.0f;
return Color;
}
It was also worth noting that this worked with the following VS out struct defined in the shaders:
struct VS_OUTPUT
{
float4 fPos :POSITION0; // SV_POSITION wont work in this case
float3 fTexcoord :TEXCOORD0;
}
Now I have a texture that's stretched to fit the entire screen, both triangles already cover this, but why did the texture UV's not get used as expected?
To clarify I am using a point sampler and have tried both clamp and wrapping UV.
I was a bit curious and found a solution / workaround mentioned above, however I'd prefer not to have to do it if anyone knows why it's happening?
What semantics are you specifying for your vertex-type? Are they properly aligned with your vertices and also your shader? If you are using a D3DXVECTOR4, D3DXVECTOR3 setup (as shown in your VS code) this could be a problem if your CreateVertex() returns a D3DXVECTOR3, D3DXVECTOR2 struct.
It would be reassuring to see your pixel-shader code too.
Okay well, for one, the texture coordinates outside of 0..1 range get clamped. I made the mistake of assuming that by going to clip space -1 to +1 that the texture coordinates would be too. This is not the case, they still go from 0.0 to 1.0.
The reason why the code in the pixel shader worked, was because it was using the clip space x,y,z coordinates to automatically overwrite these texture coordinates; an oversight on my part. However, the pixel shader code results in texture stretch on a full screen 'quad', so it might be useful to someone somewhere ;)
I'm loading a custom data into 2D texture GL_RGBA16F:
glActiveTexture(GL_TEXTURE0);
int Gx = 128;
int Gy = 128;
GLuint grammar;
glGenTextures(1, &grammar);
glBindTexture(GL_TEXTURE_2D, grammar);
glTexStorage2D(GL_TEXTURE_2D, 1, GL_RGBA16F, Gx, Gy);
float* grammardata = new float[Gx * Gy * 4](); // set default to zero
*(grammardata) = 1;
glTexSubImage2D(GL_TEXTURE_2D,0,0,0,Gx,Gy,GL_RGBA,GL_FLOAT,grammardata);
int grammarloc = glGetUniformLocation(p_myGLSL->getProgramID(), "grammar");
if (grammarloc < 0) {
printf("grammar missing!\n");
exit(0);
}
glUniform1i(grammarloc, 0);
When I read the value of uniform sampler2D grammar in GLSL, it returns 0.25 instead of 1. How do I fix the scaling problem?
if (texture(grammar, vec2(0,0) == 0.25) {
FragColor = vec4(0,1,0,1);
} else
{
FragColor = vec4(1,0,0,1);
}
By default texture interpolation is set to the following values:
GL_TEXTURE_MIN_FILTER = GL_NEAREST_MIPMAP_LINEAR,
GL_TEXTURE_MAG_FILTER = GL_LINEAR
GL_WRAP[R|S|T] = GL_REPEAT
This means, in cases where the mapping between texels of the texture and pixels on the screen does not fit, the hardware interpolates will interpolate for you. There can be two cases:
The texture is displayed smaller than it actually is: In this case interpolation is performed between two mipmap levels. If no mipmaps are generated, these are treated as beeing 0, which could lead to 0.25.
The texture is displayed larger than it actually is (and I think this will be the case here): Here, the hardware does not interpolate between mipmap levels, but between adjacent texels in the texture. The problem now comes from the fact, that (0,0) in texture coordinates is NOT the center of pixel [0,0], but the lower left corner of it.
Have a look at the following drawing, which illustrates how texture coordinates are defined (here with 4 texels)
tex-coord: 0 0.25 0.5 0.75 1
texels |-----0-----|-----1-----|-----2-----|-----3-----|
As you can see, 0 is on the boundary of a texel, while the first texels center is at (1/(2 * |texels|)).
This means for you, that with wrap mode set to GL_REPEAT, texture coordinate (0,0) will interpolate uniformly between the texels [0,0], [-1,0], [-1,-1], [0,-1]. Since -1 == 127 (due to repeat) and everything except [0,0] is 0, this results in
([0,0] + [-1,0] + [-1,-1] + [0,-1]) / 4 =
1 + 0 + 0 + 0 ) / 4 = 0.25