Texture turns to gray in opengl when added alpha channel manually - opengl

int j,k,t3,t4;
for (int i = 0;i < width;i++)
for (j = 0;j < height;j++)
{
t3 = (i*height + j)*3 ;
t4 = (i*height + j) * 4;
for (k = 0;k < 3;k++)
texture[t4+k] = data[t3+k];
texture[t4 + 3] = (data[t3 + 1]==255 && data[t3 + 2]==255 && data[t3]==255) ? 0 : 255;
}
GLuint textureID;
glGenTextures(1, &textureID);
glBindTexture(GL_TEXTURE_2D, textureID);
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA8, width, height, 0, GL_BGRA, GL_UNSIGNED_BYTE, texture);
It's a Bitmap file and I loaded it successfully with the original data.
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGB, width, height, 0, GL_BGR, GL_UNSIGNED_BYTE, data);
But when I added the Alpha channel manually the texture losts its color.The picture is attached below.

I can't quite explain the exact symptoms, but the index arithmetic in your code looks off:
for (int i = 0;i < width;i++)
for (j = 0;j < height;j++)
{
t3 = (i*height + j)*3 ;
t4 = (i*height + j) * 4;
Since images are normally laid out in memory row by row, the index that iterates over the pixels in the row should be the one added without a multiplier, while the other index is multiplied by width (not height). This should be:
for (j = 0; j < height; j++)
for (int i = 0; i < width; i++)
{
t3 = (j * width + i) * 3;
t4 = (j * width + i) * 4;

Related

Texture object fetching in CUDA

I can find many examples online that use CUDA texture references, but not so many that rely on texture objects. I am trying to understand why my code below always fetches 0 rather than my input texture. Am I missing something, or using a wrong setting? I simplified it as much as I could:
#include <stdio.h>
__global__ void fetch(cudaTextureObject_t tex, std::size_t width, std::size_t height)
{
for (int j = 0; j < height; j++) {
for (int i = 0; i < width; i++) {
float u = (i + 0.5f) / width;
float v = (j + 0.5f) / height;
auto p = tex2D<uchar4>(tex, u, v);
printf("i=%d, j=%d -> u=%3.2f, v=%3.2f, r=%d, g=%d, b=%d, a=%d\n", i, j, u, v, p.x, p.y, p.z, p.w);
// -> always returns p = {0, 0, 0, 0}
}
}
}
int main() {
constexpr std::size_t width = 2;
constexpr std::size_t height = 2;
// creating a dummy texture
uchar4 image[width*height];
for(std::size_t j = 0; j < height; ++j) {
for(std::size_t i = 0; i < width; ++i)
image[j*width+i] = make_uchar4(255*j/height, 255*i/width, 55, 255);
}
cudaArray_t cuArray;
auto channelDesc = cudaCreateChannelDesc<uchar4>();
cudaMallocArray(&cuArray, &channelDesc, width, height);
cudaMemcpy2DToArray(cuArray, 0, 0, image, width*sizeof(uchar4), width*sizeof(uchar4), height, cudaMemcpyHostToDevice);
struct cudaResourceDesc resDesc;
memset(&resDesc, 0, sizeof(resDesc));
resDesc.resType = cudaResourceTypeArray;
resDesc.res.array.array = cuArray;
struct cudaTextureDesc texDesc;
memset(&texDesc, 0, sizeof(texDesc));
texDesc.addressMode[0] = cudaAddressModeBorder;
texDesc.addressMode[1] = cudaAddressModeBorder;
texDesc.filterMode = cudaFilterModeLinear;
texDesc.readMode = cudaReadModeElementType;
texDesc.normalizedCoords = 1;
cudaTextureObject_t texObj = 0;
cudaCreateTextureObject(&texObj, &resDesc, &texDesc, NULL);
fetch<<<1, 1>>>(texObj, width, height);
cudaDeviceSynchronize();
cudaDestroyTextureObject(texObj);
cudaFreeArray(cuArray);
return 0;
}
In your code you specify the texture description as
texDesc.addressMode[0] = cudaAddressModeBorder;
texDesc.addressMode[1] = cudaAddressModeBorder;
texDesc.filterMode = cudaFilterModeLinear;
texDesc.readMode = cudaReadModeElementType;
texDesc.normalizedCoords = 1;
and the array holding the texture data is defined as
auto channelDesc = cudaCreateChannelDesc<uchar4>();
Quoting the documentation
Linear Filtering
In this filtering mode, which is only available for floating-point textures ......
You have a uchar4 texture. You can't use linear filtering on an integer texture. Either change to a floating point texture type or use another read mode.

how to use GL_RGB9_E5 format?

I am trying to use GL_RGB9_E5 format with 3D texture. For this I have created simple test to understand the usage of format. Somehow I am not getting what I expect. Following is test program
GLuint texture;
const unsigned int depth = 1;
const unsigned int width = 7;
const unsigned int height = 3;
glGenTextures(1, &texture);
glBindTexture(GL_TEXTURE_3D, texture);
const float color[3] = { 0.0f, 0.5f, 0.0f };
const rgb9e5 uColor = float3_to_rgb9e5(color);
GLuint * t1 = (GLuint*)malloc(width*height*depth* sizeof(GLuint));
GLuint * t2 = (GLuint*)malloc(width*height*depth* sizeof(GLuint));
int index = 0;
for (int i = 0; i < depth; i++)
for (int j = 0; j < width; j++)
for (int k = 0; k < height; k++)
{
t1[index] = uColor.raw;
index++;
}
glTexImage3D(GL_TEXTURE_3D, 0,
GL_RGB9_E5,
width, height, depth, 0,
GL_RGB,
GL_UNSIGNED_INT,
(void*)t1);
glGetTexImage(GL_TEXTURE_3D, 0, GL_RGB, GL_UNSIGNED_INT_5_9_9_9_REV, t2);
index = 0;
for (int i = 0; i < depth; i++)
for (int j = 0; j < width; j++)
for (int k = 0; k < height; k++)
{
rgb9e5 t;
t.raw = t2[index];
index++;
}
What I am expecting is what ever uColor.raw I am putting in t1 I get back same raw color in t2 .
I have taken float3_to_rgb9e5 method from Specification for GL_RGB9_E5
at bottom you can find code.
It would be great If someone can give me example for how to use it correctly or correct way of doing it.
GL_RGB,
GL_UNSIGNED_INT,
This means that you are passing 3-channel pixel data, where each channel is a normalized, 32-bit unsigned integer. But that's not what you're actually doing.
You're actually passing 3 channel data, which is packed into a 32-bit unsigned integer, such that the first 5 bits represent a floating-point exponent, followed by 3 sets of 9-bits, each representing the unsigned mantissa of a float. We spell that:
GL_RGB,
GL_UNSIGNED_INT_5_9_9_9_REV

glTexImage2D slicing and alignment issues appearing in window

I'm working on making my own topographic map, and I have been using .hgt files from NASA.
I'm loading the files using
void MapImage::load_map_file(const char* filename) {
std::ifstream file(filename, std::ios::in | std::ios::binary);
if (!file) {
std::cout << "Error opening file!" << std::endl;
}
std::vector<short> tempHeight(TOTAL_SIZE);
unsigned char buffer[2];
int x, y;
for (int i = 0; i < TOTAL_SIZE; i++) {
if (!file.read(reinterpret_cast<char*>(buffer), sizeof(buffer))) {
std::cout << "Error reading file!" << std::endl;
}
tempHeight[i] = (buffer[0] << 8) | buffer[1];
}
height = tempHeight;
}
And then adding them to an inmemory bitmap using:
void MapImage::loadTextureImage() {
img_tex = 0;
glGenTextures(1, &img_tex);
int x, y, w, h;
w = h = SRTM_SIZE;
unsigned char* img;
img = (unsigned char *)malloc(3 * w * h);
memset(img, 0, sizeof(img));
int g = 0;
double height_color;
/*
for(int i = 0; i < TOTAL_SIZE; i++){
height_color = (float)height[i] / 10.0;
g = (height_color * 255);
if (g>255)g = 255;
img[i * 3 + 2] = (unsigned char)0;
img[i * 3 + 1] = (unsigned char)g;
img[i * 3 + 0]= (unsigned char)0;
}
*/
for (int i = 0; i < w; i++) {
for (int j = 0; j < h; ++j) {
x = i;
y = (h - 1) - j;
height_color = (float)height[j + (i * w)] / 10.0;
g = (height_color * 255);
if (g>255)g = 255;
img[(x + y*w) * 3 + 2] = (unsigned char)0;
img[(x + y*w) * 3 + 1] = (unsigned char)g;
img[(x + y*w) * 3] = (unsigned char)0;
}
}
glActiveTexture(GL_TEXTURE0);
glBindTexture(GL_TEXTURE_2D, img_tex);
glTexImage2D(
GL_TEXTURE_2D,
0,
GL_RGB,
w,
h,
0,
GL_RGB,
GL_UNSIGNED_BYTE,
img
);
}
However this results in a image with the corner sliced, like this
Using the commented version in the loadTextureImage() looks slightly different, however with the same corner sliced.
Does anyone have a hint to whats going on? I've tried using a image as a texture, loading with the stbi library, and that works fine, so I'm not sure where it's going wrong.
(the coordinates for the image is N10E099)
This looks like row misalignment, caused by the 3-wide colour data. Try using the following call just before glTexImage2D:
glPixelStorei(GL_UNPACK_ALIGNMENT, 1);
This alignment value, which is 4 by default, is used by glTexImage2D and friends whenever texture data is read to be sent to the GPU.
There is no verification that it matches what the data actually looks like, so in cases like yours where a row doesn't end on a 4-byte boundary, the first few bytes of the next row will be skipped, leading to this diagonal distortion.
Texture data transfers in the other direction (from the GPU to client memory) are aligned via glPixelStorei(GL_PACK_ALIGNMENT, 1);.

Flip an image vertically

I'm trying to flip an image vertically, after retrieving the buffer from openGL. It seems to be outputting an incorrect image with the following code:
const int width = 100;
const int height = width;
const int components = 3;
unsigned char pixels[width * height * components];
glReadPixels(0, 0, width, height, GL_RGB, GL_UNSIGNED_BYTE, pixels);
unsigned char flipPixels[width * height * components];
for (int i = 0; i < width; ++i) {
for (int j = 0; j < height; ++j) {
for (int k = 0; k < components; ++k) {
flipPixels[i + j * width + k] = pixels[(height) * (width) - ((j+1) * width) + i + k];
}
}
}
I know I can only iterate half the height and achieve the same, but I want to implement it by going through the complete height of the image. I can't seem to figure out what's wrong with the code. Any help would be appreciated.
I'm not sure how the image is stored but your indices i and k are given the same stride which is suspicious. Maybe you want i * components and j * width * components. After that, inverting vertically you should only have to change j to (height - j - 1).
flipPixels[(i + j * width) * components + k] = pixels[(i + (height - 1 - j) * width) * components + k];
I had the same issue, the pixels returned by OpenGL resulten in an upside down bitmap. so I flipped them like this: but the bitmap is still flipped left to right...
void Flip(GLubyte* pixels, int pixelbuffersize)
{
// basically rewrites from bottom up...
std::vector<GLubyte> flipped_pixels(pixels, pixels+pixelbuffersize);
auto count = flipped_pixels.size();
std::reverse(flipped_pixels.begin(), flipped_pixels.end());
GLubyte* buff = (reinterpret_cast<GLubyte*>(&flipped_pixels[0]));
const void * pnewdata = (const void *)buff;
memcpy(pixels, pnewdata, count);
}

Having trouble useing glDrawPixels

I have to use glDrawPixels to implement a raster algorithm.
Right now I'm only trying to get a simple example of glDrawPixels working but having an issue.
GLint height, width, size = 0;
GLbyte *image = NULL;
int i,j=0;
width = 512;
height = 512;
size = width*height;
image = (GLbyte*)malloc(sizeof(GLbyte)*size*3);
for(i = 0; i < size*3; i=i+width*3){
for(j = i; j < width*3; j=j+3){
image[j] = 0xFF;
image[j+1] = 0x00;
image[j+2] = 0x00;
}
}
glDrawPixels(width, height, GL_RGB, GL_BYTE, image);
free(image);
gluSwapBuffers();
Above is the code that I'm trying to get to work, from my understanding it should simply draw a 512x512 red square.
However what I get is one red row at the bottom and everything else is grey.
Your second for() loop is broken -- you're starting at i, but only going up to width * 3, so it doesn't run at all when i > 0.
Here's a simpler approach:
GLbyte *p = image;
for (i = 0; i < height; i++) {
for (j = 0; j < width; j++) {
*p++ = 0xFF;
*p++ = 0x00;
*p++ = 0x00;
}
}
Your loop conditions look off to me. (After the first row, the condition on j will always be true, and the inner loop won't execute.) An easier way to do it would be to do something like this:
for (y = 0; y < height; y++)
{
// Go to the start of the next row
GLbyte* rowStart = image + (width * 3) * y;
GLbyte* row = rowStart;
for (x = 0; x < width; x++)
{
row [ x * 3 ] = 0xFF;
row [ (x * 3) + 1 ] = 0x00;
row [ (x * 3) + 2 ] = 0x00;
}
}