I am trying to use GL_RGB9_E5 format with 3D texture. For this I have created simple test to understand the usage of format. Somehow I am not getting what I expect. Following is test program
GLuint texture;
const unsigned int depth = 1;
const unsigned int width = 7;
const unsigned int height = 3;
glGenTextures(1, &texture);
glBindTexture(GL_TEXTURE_3D, texture);
const float color[3] = { 0.0f, 0.5f, 0.0f };
const rgb9e5 uColor = float3_to_rgb9e5(color);
GLuint * t1 = (GLuint*)malloc(width*height*depth* sizeof(GLuint));
GLuint * t2 = (GLuint*)malloc(width*height*depth* sizeof(GLuint));
int index = 0;
for (int i = 0; i < depth; i++)
for (int j = 0; j < width; j++)
for (int k = 0; k < height; k++)
{
t1[index] = uColor.raw;
index++;
}
glTexImage3D(GL_TEXTURE_3D, 0,
GL_RGB9_E5,
width, height, depth, 0,
GL_RGB,
GL_UNSIGNED_INT,
(void*)t1);
glGetTexImage(GL_TEXTURE_3D, 0, GL_RGB, GL_UNSIGNED_INT_5_9_9_9_REV, t2);
index = 0;
for (int i = 0; i < depth; i++)
for (int j = 0; j < width; j++)
for (int k = 0; k < height; k++)
{
rgb9e5 t;
t.raw = t2[index];
index++;
}
What I am expecting is what ever uColor.raw I am putting in t1 I get back same raw color in t2 .
I have taken float3_to_rgb9e5 method from Specification for GL_RGB9_E5
at bottom you can find code.
It would be great If someone can give me example for how to use it correctly or correct way of doing it.
GL_RGB,
GL_UNSIGNED_INT,
This means that you are passing 3-channel pixel data, where each channel is a normalized, 32-bit unsigned integer. But that's not what you're actually doing.
You're actually passing 3 channel data, which is packed into a 32-bit unsigned integer, such that the first 5 bits represent a floating-point exponent, followed by 3 sets of 9-bits, each representing the unsigned mantissa of a float. We spell that:
GL_RGB,
GL_UNSIGNED_INT_5_9_9_9_REV
Related
I can find many examples online that use CUDA texture references, but not so many that rely on texture objects. I am trying to understand why my code below always fetches 0 rather than my input texture. Am I missing something, or using a wrong setting? I simplified it as much as I could:
#include <stdio.h>
__global__ void fetch(cudaTextureObject_t tex, std::size_t width, std::size_t height)
{
for (int j = 0; j < height; j++) {
for (int i = 0; i < width; i++) {
float u = (i + 0.5f) / width;
float v = (j + 0.5f) / height;
auto p = tex2D<uchar4>(tex, u, v);
printf("i=%d, j=%d -> u=%3.2f, v=%3.2f, r=%d, g=%d, b=%d, a=%d\n", i, j, u, v, p.x, p.y, p.z, p.w);
// -> always returns p = {0, 0, 0, 0}
}
}
}
int main() {
constexpr std::size_t width = 2;
constexpr std::size_t height = 2;
// creating a dummy texture
uchar4 image[width*height];
for(std::size_t j = 0; j < height; ++j) {
for(std::size_t i = 0; i < width; ++i)
image[j*width+i] = make_uchar4(255*j/height, 255*i/width, 55, 255);
}
cudaArray_t cuArray;
auto channelDesc = cudaCreateChannelDesc<uchar4>();
cudaMallocArray(&cuArray, &channelDesc, width, height);
cudaMemcpy2DToArray(cuArray, 0, 0, image, width*sizeof(uchar4), width*sizeof(uchar4), height, cudaMemcpyHostToDevice);
struct cudaResourceDesc resDesc;
memset(&resDesc, 0, sizeof(resDesc));
resDesc.resType = cudaResourceTypeArray;
resDesc.res.array.array = cuArray;
struct cudaTextureDesc texDesc;
memset(&texDesc, 0, sizeof(texDesc));
texDesc.addressMode[0] = cudaAddressModeBorder;
texDesc.addressMode[1] = cudaAddressModeBorder;
texDesc.filterMode = cudaFilterModeLinear;
texDesc.readMode = cudaReadModeElementType;
texDesc.normalizedCoords = 1;
cudaTextureObject_t texObj = 0;
cudaCreateTextureObject(&texObj, &resDesc, &texDesc, NULL);
fetch<<<1, 1>>>(texObj, width, height);
cudaDeviceSynchronize();
cudaDestroyTextureObject(texObj);
cudaFreeArray(cuArray);
return 0;
}
In your code you specify the texture description as
texDesc.addressMode[0] = cudaAddressModeBorder;
texDesc.addressMode[1] = cudaAddressModeBorder;
texDesc.filterMode = cudaFilterModeLinear;
texDesc.readMode = cudaReadModeElementType;
texDesc.normalizedCoords = 1;
and the array holding the texture data is defined as
auto channelDesc = cudaCreateChannelDesc<uchar4>();
Quoting the documentation
Linear Filtering
In this filtering mode, which is only available for floating-point textures ......
You have a uchar4 texture. You can't use linear filtering on an integer texture. Either change to a floating point texture type or use another read mode.
I'm working on making my own topographic map, and I have been using .hgt files from NASA.
I'm loading the files using
void MapImage::load_map_file(const char* filename) {
std::ifstream file(filename, std::ios::in | std::ios::binary);
if (!file) {
std::cout << "Error opening file!" << std::endl;
}
std::vector<short> tempHeight(TOTAL_SIZE);
unsigned char buffer[2];
int x, y;
for (int i = 0; i < TOTAL_SIZE; i++) {
if (!file.read(reinterpret_cast<char*>(buffer), sizeof(buffer))) {
std::cout << "Error reading file!" << std::endl;
}
tempHeight[i] = (buffer[0] << 8) | buffer[1];
}
height = tempHeight;
}
And then adding them to an inmemory bitmap using:
void MapImage::loadTextureImage() {
img_tex = 0;
glGenTextures(1, &img_tex);
int x, y, w, h;
w = h = SRTM_SIZE;
unsigned char* img;
img = (unsigned char *)malloc(3 * w * h);
memset(img, 0, sizeof(img));
int g = 0;
double height_color;
/*
for(int i = 0; i < TOTAL_SIZE; i++){
height_color = (float)height[i] / 10.0;
g = (height_color * 255);
if (g>255)g = 255;
img[i * 3 + 2] = (unsigned char)0;
img[i * 3 + 1] = (unsigned char)g;
img[i * 3 + 0]= (unsigned char)0;
}
*/
for (int i = 0; i < w; i++) {
for (int j = 0; j < h; ++j) {
x = i;
y = (h - 1) - j;
height_color = (float)height[j + (i * w)] / 10.0;
g = (height_color * 255);
if (g>255)g = 255;
img[(x + y*w) * 3 + 2] = (unsigned char)0;
img[(x + y*w) * 3 + 1] = (unsigned char)g;
img[(x + y*w) * 3] = (unsigned char)0;
}
}
glActiveTexture(GL_TEXTURE0);
glBindTexture(GL_TEXTURE_2D, img_tex);
glTexImage2D(
GL_TEXTURE_2D,
0,
GL_RGB,
w,
h,
0,
GL_RGB,
GL_UNSIGNED_BYTE,
img
);
}
However this results in a image with the corner sliced, like this
Using the commented version in the loadTextureImage() looks slightly different, however with the same corner sliced.
Does anyone have a hint to whats going on? I've tried using a image as a texture, loading with the stbi library, and that works fine, so I'm not sure where it's going wrong.
(the coordinates for the image is N10E099)
This looks like row misalignment, caused by the 3-wide colour data. Try using the following call just before glTexImage2D:
glPixelStorei(GL_UNPACK_ALIGNMENT, 1);
This alignment value, which is 4 by default, is used by glTexImage2D and friends whenever texture data is read to be sent to the GPU.
There is no verification that it matches what the data actually looks like, so in cases like yours where a row doesn't end on a 4-byte boundary, the first few bytes of the next row will be skipped, leading to this diagonal distortion.
Texture data transfers in the other direction (from the GPU to client memory) are aligned via glPixelStorei(GL_PACK_ALIGNMENT, 1);.
I'm trying to create a dynamic array of arrays (of arrays). But for some reason the data gets corrupted. I'm using the data to generate a texture in a OpenGL application.
The following code works fine:
unsigned char imageData[64][64][3];
for (int i = 0; i < 64; i++)
{
for (int j = 0; j < 64; j++)
{
unsigned char r = 0, g = 0, b = 0;
if (i < 32)
{
if (j < 32)
r = 255;
else
b = 255;
}
else
{
if (j < 32)
g = 255;
}
imageData[i][j][0] = r;
imageData[i][j][1] = g;
imageData[i][j][2] = b;
}
std::cout << std::endl;
}
glTexImage2D(target, 0, GL_RGB, 64, 64, 0, GL_RGB, GL_UNSIGNED_BYTE, imageData);
Problem is, I want to be able to create a texture of any size (not just 64*64). So I'm trying this:
unsigned char*** imageData = new unsigned char**[64]();
for (int i = 0; i < 64; i++)
{
imageData[i] = new unsigned char*[64]();
for (int j = 0; j < 64; j++)
{
imageData[i][j] = new unsigned char[3]();
unsigned char r = 0, g = 0, b = 0;
if (i < 32)
{
if (j < 32)
r = 255;
else
b = 255;
}
else
{
if (j < 32)
g = 255;
}
imageData[i][j][0] = r;
imageData[i][j][1] = g;
imageData[i][j][2] = b;
}
std::cout << std::endl;
}
glTexImage2D(target, 0, GL_RGB, 64, 64, 0, GL_RGB, GL_UNSIGNED_BYTE, imageData);
But that doesn't work, the image gets all messed up so I assume I'm creating the array of arrays (of arrays) incorrectly? What am I doing wrong?
Also, I guess I should be using vectors instead. But how can I cast the vector of vectors of vectors data into a (void *) ?
This line contains multiple bugs:
unsigned char* pixel = &(imageData[(y * height) + x]);
You should multiply x by height and add y. And there's also the fact that each pixel is actually 3 bytes. Some issues that led to this bug in your code (and will lead to to others)
You should also be using std::vector. You can call std::vector::data to get a pointer to the underlying data to interface to C API's.
You should have a class that represents a pixel. This will handle the offsetting correctly and give things names and made the code clearer.
Whenever you are working with a multi dimensional array that you encode into a single dimensional one, you should try to carefully write an access function that takes care of indexing so you can test it separately.
(end bulleted list... oh SO).
struct Pixel {
unsigned char red;
unsigned char blue;
unsigned char green;
};
struct TwoDimPixelArray {
TwoDimArray(int width, int height)
: m_width(width), m_height(height)
{
m_vector.resize(m_width * m_height);
}
Pixel& get(int x, int y) {
return m_vector[x*height + y];
}
Pixel* data() { return m_vector.data(); }
private:
int m_width;
int m_height;
std::vector<Pixel> m_vector;
}
int width = 64;
int height = 64;
TwoDimPixelArray imageData(width, height);
for (int x = 0; x != width ; ++ x) {
for (int y = 0; y != height ; ++y) {
auto& pixel = imageData.get(x, y);
// ... pixel.red = something, pixel.blue = something, etc
}
}
glTexImage2D(target, 0, GL_RGB, 64, 64, 0, GL_RGB, GL_UNSIGNED_BYTE, imageData.data());
You need to use continuous memory for it to work with opengl.
My solution is inspired by previous answers, with a different indexing system
unsigned char* imageData = new unsigned char[width*height*3];
unsigned char r, g, b;
const unsigned int row_size_bytes = width * 3;
for( unsigned int x = 0; x < width; x++ ) {
unsigned int current_row_offset_bytes = x * 3;
for( unsigned int y = 0; y < height; y++ ) {
unsigned int one_dim_offset = y * row_size_bytes + current_row_offset_bytes
unsigned char* pixel = &(imageData[one_dim_offset]);
pixel[0] = r;
pixel[1] = g;
pixel[2] = b;
}
}
Unfortunnately it's untested, but i'm confident assuming sizeof(char) is 1.
int j,k,t3,t4;
for (int i = 0;i < width;i++)
for (j = 0;j < height;j++)
{
t3 = (i*height + j)*3 ;
t4 = (i*height + j) * 4;
for (k = 0;k < 3;k++)
texture[t4+k] = data[t3+k];
texture[t4 + 3] = (data[t3 + 1]==255 && data[t3 + 2]==255 && data[t3]==255) ? 0 : 255;
}
GLuint textureID;
glGenTextures(1, &textureID);
glBindTexture(GL_TEXTURE_2D, textureID);
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA8, width, height, 0, GL_BGRA, GL_UNSIGNED_BYTE, texture);
It's a Bitmap file and I loaded it successfully with the original data.
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGB, width, height, 0, GL_BGR, GL_UNSIGNED_BYTE, data);
But when I added the Alpha channel manually the texture losts its color.The picture is attached below.
I can't quite explain the exact symptoms, but the index arithmetic in your code looks off:
for (int i = 0;i < width;i++)
for (j = 0;j < height;j++)
{
t3 = (i*height + j)*3 ;
t4 = (i*height + j) * 4;
Since images are normally laid out in memory row by row, the index that iterates over the pixels in the row should be the one added without a multiplier, while the other index is multiplied by width (not height). This should be:
for (j = 0; j < height; j++)
for (int i = 0; i < width; i++)
{
t3 = (j * width + i) * 3;
t4 = (j * width + i) * 4;
I'm trying to convert the following code from OpenGL 1.5 spec. to the OpenGLES 1.1 spec
(num_x and num_y are passed into the function by arguments)
::glBegin(GL_LINES);
for (int i=-num_x; i<=num_x; i++)
{
glVertex3i(i, 0, -num_y);
glVertex3i(i, 0, num_y);
}
for (int i=-num_y; i<=num_y; i++)
{
glVertex3i(-num_x, 0, i);
glVertex3i( num_x, 0, i);
}
::glEnd();
Here is my corresponding converted code: (ignore the inefficiency of my loops, I am trying to get the conversion to work properly first)
I'm trying to build two things here:
A float array of all the x,y,z coordinates needed to draw the grid
An index array of all the verticies needed.
Both arrays are then passed to OpenGL which renders them.
An example of what the arrays should look like:
GLshort indices[] = {3, 0, 1, 2,
3, 3, 4, 5,
3, 6, 7, 8, };
GLfloat vertexs[] = {3.0f, 0.0f, 0.0f,
6.0f, 0.0f, -0.5f ,
0, 0, 0,
6.0f, 0.0f, 0.5f,
3.0f, 0.0f, 0.0f,
0, 0, 0,
3, 0, 0,
0, 0, 0,
0, 6, 0};
int iNumOfVerticies = (num_x + num_y)*4*3;
int iNumOfIndicies = (iNumOfVerticies/3)*4;
GLshort* verticies = new short[iNumOfVerticies];
GLshort* indicies = new short[iNumOfIndicies];
int j = 0;
for(int i=-num_x; j < iNumOfVerticies && i<=num_x; i++,j+=6)
{
verticies[j] = i;
verticies[j+1] = 0;
verticies[j+2] = -num_y;
verticies[j+3] = i;
verticies[j+4] = 0;
verticies[j+5] = num_y;
}
for(int i=-num_y; j < iNumOfVerticies && i<=num_y;i++,j+=6)
{
verticies[j] = i;
verticies[j+1] = 0;
verticies[j+2] = -num_x;
verticies[j+3] = i;
verticies[j+4] = 0;
verticies[j+5] = num_x;
}
I need to also build an array if indicies to pass on. I 'borrowed' the array structure from the iphone 'teapot' example.
In each row, we have the number of indicies followed by the indicies referenced.
int k = 0;
for(j = 0; j < iNumOfIndicies; j++)
{
if (j%4==0)
{
indicies[j] = 3;
}
else
{
indicies[j] = k++;
}
}
::glEnableClientState(GL_VERTEX_ARRAY);
::glVertexPointer(3 ,GL_FLOAT, 0, verticies);
for(int i = 0; i < iNumOfIndicies;i += indicies[i] + 1)
{
::glDrawElements( GL_LINES, indicies[i], GL_UNSIGNED_SHORT, &indicies[i+1] );
}
delete [] verticies;
delete [] indicies;
Please add code questions as comments, not answers
I can see several things wrong with your converted code:
1.- Using the wrong type for the variable verticies, it should be:
GLfloat* verticies = new float[iNumOfVerticies];
2.- Incorrectly filling verticies, second loop should be:
for(int i=-num_y; j < iNumOfVerticies && i<=num_y;i++,j+=6)
{
verticies[j] = -num_x;
verticies[j+1] = 0;
verticies[j+2] = i;
verticies[j+3] = num_x;
verticies[j+4] = 0;
verticies[j+5] = i;
}
3.- Incorrect filling of indicies, I think you should delete the lines:
if (j%4==0)
{
indicies[j] = 3;
}
else
4.- Incorrect use of glDrawElements, replace the loop by this single line:
::glDrawElements( GL_LINES, iNumOfIndicies, GL_UNSIGNED_SHORT, indicies);