I am trying to implement a gaussian blur with convolution matrix on my shader.
This is the code i have:
float4 ppPS(float2 uv : TEXCOORD0, uniform sampler2D t1) : COLOR {
//kernel matrix
float3x3 kernel={1*(1/16),2*(1/16),1*(1/16),
2*(1/16),4*(1/16),2*(1/16),
1*(1/16),2*(1/16),1*(1/16)
};
int x,y;
float2 sum = 0;
for (x = -1; x <= 1; x++)
{
for (y = -1; y <= 1; y++)
{
float2 fl;
fl.x = uv.x+x;
fl.y = uv.y+y;
sum += (fl)*(kernel[x+1][y+1]);
}
}
return tex2D(t1, sum);
}
but for some reason, i get a picture all in one solid color.
Here is the image without the blur:
Here is the image with the so called blur:
any idea of what am i doing wrong over here?
Try to change the float3x3 initialize values into floating point format (.0f) otherwise all the values will end up as 0.
//kernel matrix
static const float3x3 kernel={1*(1.0f/16.0f),2*(1.0f/16.0f),1*(1.0f/16.0f),
2*(1.0f/16.0f),4*(1.0f/16.0f),2*(1.0f/16.0f),
1*(1.0f/16.0f),2*(1.0f/16.0f),1*(1.0f/16.0f)
};
After this change you wouldn't see the blank output image !!!
Related
I am working on an assignment where I need to ray trace a sphere with a plane (floor). I have the sphere but I am having trouble with the plane. I use the ray-plane intersection formula:
t = -(o-p).n / d.n. I have this in Plane.h, however when I run my code I get errors from Ray.h. Could someone explain what I'm doing wrong? Any help would be appreciated.
Plane.h
`#include "..\..\raytrace\Ray.h"
class Plane
{
using Colour = cv::Vec3b; // RGB Value
private:
Vec3 normal_;
Vec3 distance_;
Colour color_;
public:
Plane();
Plane(Vec3 norm, Vec3 dis, Colour color) : normal_(norm), distance_(dis), color_(color) {}
Vec3 norm() const {
return normal_;
}
Vec3 dis() const {
return distance_;
}
Colour color() const {
return color_;
}
float findIntersection(Ray ray) {
Vec3 rayDirection = ray.mPosition();
float denominator = rayDirection.dot(normal_);
if (denominator == 0) {
return false;
}
else {
//mPosition() is origin in Ray.h
float t = -(((ray.mPosition() - distance_)).dot(normal_)) / denominator;
}
}
};
`
Ray.h
#include <Eigen/Dense>
#include <Eigen/Geometry>
#include <cmath>
#include "Image.h"
// Data types
typedef float Scalar; //**custom datatype: Scalar is float
typedef Eigen::Matrix<Scalar, 3, 1> Vec3; //***Vec3 is a custom datatype (specific kind)
typedef Eigen::Matrix<Scalar, 2, 1> Vec2;
typedef unsigned char uchar;
class Ray
{
private:
Vec3 mPosition_; //point
public:
Ray() {}
//constuctor, when we construct ray we get mPosition_
Ray(Vec3 mPosition) : mPosition_(mPosition) {
//
}
float t;
Vec3 mPosition() const {
return mPosition_;
}
public:
inline Vec3 generateRay(Vec3 const& pt) {
Vec3 origin = mPosition_;
Vec3 direction = pt - mPosition_; // d = s -e, pt is pixel Position
direction.normalize();
return pt + t * direction;
}
};
main.cpp
#include <cmath>
#include "Image.h"
#include "Ray.h"
#include "../build/raytrace/Plane.h"
//Color functions
using Colour = cv::Vec3b; // RGB Value
//Color is a Vec3b datatype, use Color instead of Vec3b, it has 3 vectors, hold 3 values b/w 0-255
Colour red() { return Colour(255, 0, 0); }
Colour green() { return Colour(0, 255,0); }
Colour blue() { return Colour(0, 0, 255); }
Colour white() { return Colour(255, 255, 255); }
Colour black() { return Colour(0, 0, 0); }
//bounding the channel wise pixel color between 0 to 255
//bounding the color value, if a value is beyond 255 clamp it to 255, and any value below 0 clamp to 0.
uchar Clamp(int color)
{
if (color < 0) return 0;
if (color >= 255) return 255;
return color;
}
int main(int, char**){
//Create an image object with 500 x 500 resolution.
Image image = Image(500, 500);
//Coordinates of image rectangle
Vec3 llc = Vec3(-1, -1, -1); //**llc - lower left corner
Vec3 urc = Vec3(1, 1, -1); //**urc - upper right corner
int width = urc(0) - llc(0);
int height = urc(1) - llc(1);
Vec2 pixelUV = Vec2((float)width / image.cols, (float)height / image.rows);
/// TODO: define camera position (view point), sphere center, sphere radius (Weightage: 5%)
Vec3 CameraPoint = Vec3(0, 0, 0); //**it is the origin
Vec3 SphereCenter = Vec3(0, 0, -5); //**it is the Sphere Position
float SphereRadius = 2.0;
Vec3 LightSource = Vec3(2.0, 0.0, 3.0); //**
Vec3 ambient = Vec3(0, 0, 0.5); //**
Vec3 diffuse = Vec3(224, 180, 255); //** 0, 255, 100 - green
Vec3 Origin = CameraPoint;
//end
for (int row = 0; row < image.rows; ++row) {
for (int col = 0; col < image.cols; ++col) {
//TODO: Build primary rays
//Find the pixel position (PixelPos) for each row and col and then construct the vector PixelPos-Origin
Vec3 pixelPos = Vec3(llc(0) + pixelUV(0) * (col + 0.5), llc(1) + pixelUV(1) * (row + 0.5), -1);
//create a ray object
Ray r; //**
//Vec3 rayDir = pixelPos - Origin; //**direction of the ray
Vec3 rayDir = r.generateRay(pixelPos); //**pixelPos-Origin
rayDir.normalize(); //**normalize the ray direction vector
//Ray-sphere intersection...(refer to the lecture slides and Section 4.4.1 of the textbook)
float a = rayDir.dot(rayDir);
Vec3 s0_r0 = Origin - SphereCenter; //***s0_r0 - sphere center - ray origin
float b = 2.0 * rayDir.dot(s0_r0);
float c = s0_r0.dot(s0_r0) - pow(SphereRadius, 2);
//compute the discriminant
float discriminant = pow(b, 2) - 4 * a * c;
//if the discriminant is greater than zero
if(discriminant > 0){
//find roots t1 and t2
float t1 = (-b - sqrt((pow(b, 2)) - 4.0 * a * c)) / (2.0 * a); //**
float t2 = (-b + sqrt((pow(b, 2)) - 4.0 * a * c)) / (2.0 * a); //**
//determine which one is the real intersection point
float t;
//Sphere s;
if (t1 < t2 && (t1 > 0 && t2 > 0)) {
t = t1;
//} //should this be after the if-statement below, so that it uses t = t1 and not just float t.????
if (t > 0) {
//Shade the pixel, normal is Intersection - SphereCenter, LightVector is LightSource- Intersection, make sure to normalize the vectors
Vec3 Intersection = Origin + (t * rayDir);
Vec3 Normal = Intersection - SphereCenter; //** normalize
Normal.normalize(); //**
Vec3 LightVector = LightSource - Intersection; //**normalize
LightVector.normalize(); //**
float diffuseTerm = LightVector.dot(Normal);
if (diffuseTerm < 0) diffuseTerm = 0;
Colour colour(0, 0, 0); //The ambient base
colour[0] = Clamp(ambient[0] + diffuse[0] * diffuseTerm);
colour[1] = Clamp(ambient[1] + diffuse[1] * diffuseTerm);
colour[2] = Clamp(ambient[2] + diffuse[2] * diffuseTerm);
image(row, col) = colour;
}
}//
else {
image(row, col) = black();
}
} else {
//No intersection, discriminant < 0
image(row, col) = red(); //**makes blue background colour
}
////**Plane intersection
//create a plane object
Plane plane(Vec3(-5, 0, -4), Vec3(0, 0, -1), black());
//Plane plane;
////if ray hits plane -> color black
//if (plane.findIntersection(rayDir) == 1) {
// image(row, col) = black();
//}
//else {
// image(row, col) = white();
//}
}
}
/// Required outputs: (1) Ray traced image of a sphere (2) Ray traced image when the camera is placed inside the sphere (complete black)
image.save("./result.png");
image.display();
return EXIT_SUCCESS;
}
Errors
enter image description here
#include is a shockingly simple directive. It literally just copy-pastes the content of the file.
main.cpp includes both Ray.h and Plane.h, and Plane.h includes Ray.h, so Ray.h ends up being included twice. That's why the compiler is complaining about a "class redefinition".
You can add #pragma once at the top of all your header files to let the compiler know know to skip the file if it was included already.
N.B. #pragma once is not officially part of the language, but it is supported by all compilers and has a few small advantages over the alternative.
I'm making a small game engine in which i want to draw stuff using OpenGL. I abstracted all the OpenGL objects into classes (Buffers, VertexArrays, Shaders, Programs...). Everything worked fine until i got to 3D rendering. I implemented my own matrices and vectors(i didn't use like glm), and when i multiply my vertex position in the shader with any matrix, the z coordinate flips (z = -z). I even tried with the identity matrix. Here is the vertex shader:
#version 330 core
layout(location = 0) in vec4 i_pos;
layout(location = 1) in vec4 i_color;
out vec4 p_color;
uniform mat4 u_MVP;
uniform vec4 u_pos;
void main()
{
gl_Position = u_MVP * (i_pos + u_pos);
p_color = i_color;
}
I used the u_Pos uniform just for debugging reasons. And here i set the uniforms:
void Frame() override
{
deltaTime = timer.Reset();
if (Input::GetKey(Key::W).value == KeyDown) pos.z += deltaTime;
if (Input::GetKey(Key::S).value == KeyDown) pos.z -= deltaTime;
//mat4f(1.0f) creates a identity matrix
shaderSelection.SetUniform("u_MVP", mat4f(1.0f));
shaderSelection.SetUniform("u_pos", vec4f(pos));
ren.DrawTriangles(vertexArray, indexBuffer, shaderSelection);
}
Although im sure there's nothing with the matrix struct, here it is:
template<typename T = float, int sizeX = 4, int sizeY = 4>
struct BLAZE_API mat
{
private:
T v[sizeY][sizeX];
public:
mat()
{
for (unsigned i = 0; i < sizeX * sizeY; i++)
((T*)v)[i] = 0;
}
mat(T* ptr, bool transpose = false)
{
if (transpose)
for (unsigned i = 0; i < sizeX * sizeY; i++)
((T*)v)[i] = ptr[i];
else
for (unsigned i = 0; i < sizeX * sizeY; i++)
((T*)v)[i] = ptr[i % sizeY * sizeX + i / sizeY];
}
mat(T n)
{
for (int x = 0; x < sizeX; x++)
for (int y = 0; y < sizeY; y++)
if (x == y)
operator[](x)[y] = n;
else
operator[](x)[y] = 0;
}
mat(const mat<T, sizeX, sizeY>& mat)
{
for (int x = 0; x < sizeX; x++)
for (int y = 0; y < sizeY; y++)
v[x][y] = mat[x][y];
}
inline T* operator[] (unsigned i) const { return (T*)(v[i]); }
inline void operator= (const mat<T, sizeX, sizeY>& mat)
{
for (int x = 0; x < sizeX; x++)
for (int y = 0; y < sizeY; y++)
v[x][y] = mat[x][y];
}
};
And the SetUniform does this:
glUniformMatrix4fv( ... , 1, GL_FALSE, m[0]);
I made the matrix struct such that i don't have to use GL_TRUE for transpose parameter in glUniformMatrix4fv. I am pretty sure it isnt my matrix implementation that is inverting the z coordinate.
It is like the camera is looking in the -Z direction, but when i move a object in the +X direction it moves also +X on the screen(also applies for Y direction), which it shouldn't if the camera is facing -Z.
Is this supposed to happen, if so can i change it?
If you do not transform the vertex coordinates (or transform it by the Identity matrix), then you directly set the coordinates in normalized device space. The NDC is a unique cube, with the left, bottom, near of (-1, -1, -1) and the right, top, far of (1, 1, 1). That means the X-axis is to the right, the Y-axis is upwards and the Z-axis points into the view.
In common the OpenGL coordinate system is a Right-handed system. In view space the X-axis points to the right and the Y-axis points up.
Since the Z-axis is the Cross product of the X-axis and the Y-axis, it points out of the viewport and appears to be inverted.
To compensate the difference in the direction of the Z-axis in view space in compare to normalized device space the Z-axis has to be inverted.
A typical OpenGL projection matrix (e.g. glm::ortho, glm::perspective or glm::frustum) turns the right handed system to a left handed system and mirrors the Z-axis.
That means, if you use a (typical) projection matrix (and no other transformations), then the vertex coordinates are equal to the view space coordinates. The X-axis is to the right, the Y-axis is upwards and the Z-axis points out of the view.
In simplified words, in normalized device space the camera points in +Z. In view space (before the transformation by a typical projection matrix) the camera points in -Z.
Note if you setup a Viewing frustum, then 0 < near and near < far. Both conditions have to be fulfilled. The geometry has to be between the near and the far plane, else it is clipped. In common a view matrix is used to look at the scene from a certain point of view. The near and far plane of the viewing frustum are chosen in that way, that the geometry is in between.
Since the depth is not linear (see How to render depth linearly in modern OpenGL with gl_FragCoord.z in fragment shader?), the near plane should be placed as close as possible to the geometry.
could anyone please help me calculating vertex normals in OpenGL?
I am loading an obj file and adding Gouraud shading by calculating vertex normals without using glNormal3f or glLight functions..
I have declared functions like operators, crossproduct, innerproduct,and etc..
I have understood that in order to get vertex normals, I first need to calculate surface normal aka normal vector with crossproduct.. and also
since I am loading an obj file.. and I am placing the three points of Faces of the obj file in id1,id2,id3 something like that
I would be grateful if anyone can help me writing codes or give me a guideline how to start the codes. please ...
thanks..
its to draw
FACE cur_face = cube.face[i];
glColor3f(cube.vertex_color[cur_face.id1].x,cube.vertex_color[cur_face.id1].y,cube.vertex_color[cur_face.id1].z);
glVertex3f(cube.vertex[cur_face.id1].x,cube.vertex[cur_face.id1].y,cube.vertex[cur_face.id1].z);
glColor3f(cube.vertex_color[cur_face.id2].x,cube.vertex_color[cur_face.id2].y,cube.vertex_color[cur_face.id2].z);
glVertex3f(cube.vertex[cur_face.id2].x,cube.vertex[cur_face.id2].y,cube.vertex[cur_face.id2].z);
glColor3f(cube.vertex_color[cur_face.id3].x,cube.vertex_color[cur_face.id3].y,cube.vertex_color[cur_face.id3].z);
glVertex3f(cube.vertex[cur_face.id3].x,cube.vertex[cur_face.id3].y,cube.vertex[cur_face.id3].z);
}
This is the equation for color calculation
VECTOR kd;
VECTOR ks;
kd=VECTOR(0.8, 0.8, 0.8);
ks=VECTOR(1.0, 0.0, 0.0);
double inner = kd.InnerProduct(ks);
int i, j;
for(i=0;i<cube.vertex.size();i++)
{
VECTOR n = cube.vertex_normal[i];
VECTOR l = VECTOR(100,100,0) - cube.vertex[i];
VECTOR v = VECTOR(0,0,1) - cube.vertex[i];
float xl = n.InnerProduct(l)/n.Magnitude();
VECTOR x = (n * (1.0/ n.Magnitude())) * xl;
VECTOR r = x - (l-x);
VECTOR color = kd * (n.InnerProduct(l)) + ks * pow((v.InnerProduct(r)),10);
cube.vertex_color[i] = color;
*This answer is for triangular mesh and can be extended to poly mesh as well.
tempVertices stores list of all vertices.
vertexIndices stores details of faces(triangles) of the mesh in a vector (in a flat manner).
std::vector<glm::vec3> v_normal;
// initialize vertex normals to 0
for (int i = 0; i != tempVertices.size(); i++)
{
v_normal.push_back(glm::vec3(0.0f, 0.0f, 0.0f));
}
// For each face calculate normals and append to the corresponding vertices of the face
for (unsigned int i = 0; i < vertexIndices.size(); i += 3)
{
//vi v(i+1) v(i+2) are the three faces of a triangle
glm::vec3 A = tempVertices[vertexIndices[i] - 1];
glm::vec3 B = tempVertices[vertexIndices[i + 1] - 1];
glm::vec3 C = tempVertices[vertexIndices[i + 2] - 1];
glm::vec3 AB = B - A;
glm::vec3 AC = C - A;
glm::vec3 ABxAC = glm::cross(AB, AC);
v_normal[vertexIndices[i] - 1] += ABxAC;
v_normal[vertexIndices[i + 1] - 1] += ABxAC;
v_normal[vertexIndices[i + 2] - 1] += ABxAC;
}
Now normalize each v_normal and use.
Note that the number of vertex normals is equal to the number of vertices of the mesh.
This code works fine on my machine
glm::vec3 computeFaceNormal(glm::vec3 p1, glm::vec3 p2, glm::vec3 p3) {
// Uses p2 as a new origin for p1,p3
auto a = p3 - p2;
auto b = p1 - p2;
// Compute the cross product a X b to get the face normal
return glm::normalize(glm::cross(a, b));
}
void Mesh::calculateNormals() {
this->normals = std::vector<glm::vec3>(this->vertices.size());
// For each face calculate normals and append it
// to the corresponding vertices of the face
for (unsigned int i = 0; i < this->indices.size(); i += 3) {
glm::vec3 A = this->vertices[this->indices[i]];
glm::vec3 B = this->vertices[this->indices[i + 1LL]];
glm::vec3 C = this->vertices[this->indices[i + 2LL]];
glm::vec3 normal = computeFaceNormal(A, B, C);
this->normals[this->indices[i]] += normal;
this->normals[this->indices[i + 1LL]] += normal;
this->normals[this->indices[i + 2LL]] += normal;
}
// Normalize each normal
for (unsigned int i = 0; i < this->normals.size(); i++)
this->normals[i] = glm::normalize(this->normals[i]);
}
It seems all you need to implement is the function to get the average vector from N vectors. This is one of the ways to do it:
struct Vector3f {
float x, y, z;
};
typedef struct Vector3f Vector3f;
Vector3f averageVector(Vector3f *vectors, int count) {
Vector3f toReturn;
toReturn.x = .0f;
toReturn.y = .0f;
toReturn.z = .0f;
// sum all the vectors
for(int i=0; i<count; i++) {
Vector3f toAdd = vectors[i];
toReturn.x += toAdd.x;
toReturn.y += toAdd.y;
toReturn.z += toAdd.z;
}
// divide with number of vectors
// TODO: check (count == 0)
float scale = 1.0f/count;
toReturn.x *= scale;
toReturn.y *= scale;
toReturn.z *= scale;
return toReturn;
}
I am sure you can port that to your C++ class. The result should then be normalized unless the length iz zero.
Find all surface normals for every vertex you have. Then use the averageVector and normalize the result to get the smooth normals you are looking for.
Still as already mentioned you should know that this is not appropriate for edged parts of the shape. In those cases you should use the surface vectors directly. You would probably be able to solve most of such cases by simply ignoring a surface normal(s) that are too different from the others. Extremely edgy shapes like cube for instance will be impossible with this procedure. What you would get for instance is:
{
1.0f, .0f, .0f,
.0f, 1.0f, .0f,
.0f, .0f, 1.0f
}
With the normalized average of {.58f, .58f, .58f}. The result would pretty much be an extremely low resolution sphere rather then a cube.
Function parameters: input image, first color, second color
I am taking an image, looking at the height and width of it then iterating through to find a pixel. If the pixel color is closest to the first color (color1) then change that pixel color to color1, if the pixel color is closest to color2 then change it to color2. My problem is believed to be at the code abs(color2-color1)/2 when trying to compare the two parameter colors.
void Preprocessor(BMP pix, RGB color1, RGB color2) {
int height = pix.GetHeight();
int width = pix.GetWidth();
for (int i = 0; i < height; i++) {
for (int j = 0; j < width; j++) {
if (pix[i][j]->red + pix[i][j]->green + pix[i][j]->blue >
abs(color2 - color1) / 2) { // pixel color closest to color1
pix[i][j] = color1;
pix[i][j] = color1;
pix[i][j] = color1;
} else { // pixel color closest to color2
pix[i][j] = color2;
pix[i][j] = color2;
pix[i][j] = color2;
}
}
}
}
Choosing the correct metric
The problem of which color is closer to this one is non trivial problem. There can be several approaches to combat this question. You might want to roughly have the same luminosity or maybe hue or vibrance, or something else for that matter.
So you chose abs(color2 - color1) / 2 and this does not have any intuitive value. You may consider explaining what was your reasoning for this exact approach.
I suggest that you start with brightness (kind of). Lets say that you want to estimate the distance of a color from a different color in a Taxicab metric. And then choosing the smaller one.
// Taxicab metric (Manhattan)
double distance(RGB c1, RGB c2) {
return
abs(c1->red - c2->red)
+ abs(c1->green - c2->green)
+ abs(c1->bule - c2->blue);
}
void Preprocessor(BMP pix, RGB color1, RGB color2) {
int height = pix.GetHeight();
int width = pix.GetWidth();
for (int i = 0; i < height; i++) {
for (int j = 0; j < width; j++) {
double d1 = distance(color1, pix[i][j]);
double d2 = distance(color2, pix[i][j]);
if (d1 < d2) { // pixel color closest to color1
pix[i][j] = color1;
} else { // pixel color closest to color2
pix[i][j] = color2;
}
}
}
}
Considerations
You might want to also experiment with other metrics (Euclidean) and also with other color schemes that are more suitable to comapre in this way HSV, HSL
I am attempting to do some processing in the pixel shader on a texture. The data for the texture is coming from a memory chunk of 8 bit data. The problem I am facing is how to read the data in the shader.
Code to create the texture and ressource view:
In OnD3D11CreateDevice:
D3D11_TEXTURE2D_DESC tDesc;
tDesc.Height = 480;
tDesc.Width = 640;
tDesc.Usage = D3D11_USAGE_DYNAMIC;
tDesc.MipLevels = 1;
tDesc.ArraySize = 1;
tDesc.SampleDesc.Count = 1;
tDesc.SampleDesc.Quality = 0;
tDesc.Format = DXGI_FORMAT_R8_UINT;
tDesc.CPUAccessFlags = D3D11_CPU_ACCESS_WRITE;
tDesc.BindFlags = D3D11_BIND_SHADER_RESOURCE;
tDesc.MiscFlags = 0;
V_RETURN(pd3dDevice->CreateTexture2D(&tDesc, NULL, &g_pCurrentImage));
D3D11_SHADER_RESOURCE_VIEW_DESC rvDesc;
g_pCurrentImage->GetDesc(&tDesc);
rvDesc.Format = DXGI_FORMAT_R8_UINT;
rvDesc.Texture2D.MipLevels = tDesc.MipLevels;
rvDesc.Texture2D.MostDetailedMip = tDesc.MipLevels - 1;
rvDesc.ViewDimension = D3D_SRV_DIMENSION_TEXTURE2D;
V_RETURN(pd3dDevice->CreateShaderResourceView(g_pCurrentImage, &rvDesc, &g_pImageRV)); </code>
in OnD3D11FrameRender:
HRESULT okay;
if( !g_updateDone ) {
D3D11_MAPPED_SUBRESOURCE resource;
resource.pData = mImage.GetData();
resource.RowPitch = 640;
resource.DepthPitch = 1;
okay = pd3dImmediateContext->Map(g_pCurrentImage, 0, D3D11_MAP_WRITE_DISCARD, 0, &resource);
g_updateDone = true;
}
pd3dImmediateContext->PSSetShaderResources(0, 1, &g_pImageRV);
This returns no errors so far, everything seems to work.
The HLSL Shader:
//-----
// Textures and Samplers
//-----
Texture2D <int> g_txDiffuse : register( t0 );
SamplerState g_samLinear : register( s0 );
//-----
// shader input/output structure
//-----
struct VS_INPUT
{
float4 Position : POSITION; // vertex position
float2 TextureUV : TEXCOORD0;// vertex texture coords
};
struct VS_OUTPUT
{
float4 Position : SV_POSITION; // vertex position
float2 TextureUV : TEXCOORD0; // vertex texture coords
};
//-----
// Vertex shader
//-----
VS_OUTPUT RenderSceneVS( VS_INPUT input )
{
VS_OUTPUT Output;
Output.Position = input.Position;
Output.TextureUV = input.TextureUV;
return Output;
}
//-----
// Pixel Shader
//-----
float4 RenderScenePS( VS_OUTPUT In ) : SV_TARGET
{
int3 loc;
loc.x = 0;
loc.y = 0;
loc.z = 1;
int r = g_txDiffuse.Load(loc);
//float fTest = (float) r;
return float4( In.TextureUV.x, In.TextureUV.y, In.TextureUV.x + In.TextureUV.y, 1);
}
The thing is, I can't even debug it in PIX to see what r results in, because even with Shader optimization disabled, the line int r = ... is never reached
I tested
float fTest = (float) r;
return float4( In.TextureUV.x, In.TextureUV.y, In.TextureUV.x + In.TextureUV.y, fTest);
but this would result in "cannot map expression to pixel shader instruction set", even though it's a float.
So how do I read and use 8bit integers from a texture, and if possible, with no sampling at all.
Thanks for any feedback.
Oh my this is a really old question, I thought it said 2012!
But anyway as it's still open:
Due to the nature of GPU's being optimised for floating point arithmetic, you probably wont get a great deal of performance advantage by using a Texture2D<int> over a Texture2D<float>.
You could attempt to use a Texture2D<float> and then try:
return float4( In.TextureUV.x, In.TextureUV.y, In.TextureUV.x + In.TextureUV.y, g_txDiffuse.Load(loc));
loc.z = 1;
Should be 0 here, because texture mip levels is 1 in your case, and mipmaps start at 0 in HLSL for Load intrinsic.