I'm working on a sort of ascii canvas for a game. I assumed it would be more efficient to to use a spritesheet of ascii glyphs in cp437 style to draw the ascii art. I needed a way to color the background and foreground of the glyphs so I'm using a fragment shader. Using the shader drops me to 7 fps. Not using the shader, I get about 131.
Am I doing something incorrectly? Is it just too expensive to loop through a vector of strings (and each character in the string), calculate the position of the glyph on the sheet, set the texture position, and then draw the sprite with a shader for each character?
int main() {
sf::RenderWindow rt(sf::VideoMode(1280, 720), "Demo Game");
sf::Texture texture;
texture.loadFromFile("Resources/courier_8x16.png");
texture.setSmooth(false);
sf::Sprite sprite(texture);
sf::Shader shader;
shader.loadFromFile("cycle.frag", sf::Shader::Fragment);
shader.setUniform("texture", sf::Shader::CurrentTexture);
sf::Clock clock;
sf::Time timeSinceLastUpdate = sf::Time::Zero;
std::vector<std::string> chars = std::vector<std::string>(50, std::string(100, 'X'));
while (rt.isOpen())
{
processEvents();
timeSinceLastUpdate += clock.restart();
while (timeSinceLastUpdate > TimePerFrame)
{
timeSinceLastUpdate -= TimePerFrame;
processEvents();
update(TimePerFrame);
}
//render();
rt.clear();
for (int y = 0; y < chars.size(); y++)
{
for (int x = 0; x < chars[y].size(); x++)
{
//bg and fg colors will be in a 2D vector and used here
shader.setUniform("foreground", sf::Glsl::Vec4(std::Color.White));
shader.setUniform("background", sf::Glsl::Vec4(std::Color.Black));
//uses decimal value of char and dimensions
//to find location of appropriate glyph in sprite sheet
sprite.setTextureRect(sf::IntRect((chars[y][x] % 16) * 8, (chars[y][x] / 16) * 16, 8, 16));
sprite.setPosition(x * 8, y * 16);
rt.draw(sprite, &shader);
}
}
rt.display();
}
}
Here's the shader code:
//////cycle.frag
uniform vec4 foreground;
uniform vec4 background;
uniform sampler2D texture;
void main()
{
vec4 pixel = texture2D(texture, gl_TexCoord[0].xy);
if (pixel.r < .1)
pixel = background;
else
pixel = foreground;
gl_FragColor = pixel;
}
Here's the sprite sheet: (all the paragraph symbols are just placeholders)
Related
I have a shader I'm attempting to use and I've come across an issue that i can't solve since my knowledge of glsl is limited.
I'm using a texture as a mask and to debug this issue I simply use this textures pixel color as the gl_FragColor, I'll post some images to show what it looks like and what it should look like.
Image link;
https://imgur.com/EBt2vbL
It seems related to the coordinates from gl_TexCoord[0].xy not getting the proper coordinates of the dissolve texture
main.cpp
#include "Engine.h"
#include <stdio.h>
#include <iostream>
#include <windows.h>
int main(int argc, char *argv[])
{
try
{
Engine game;
game.Run();
}
catch (std::exception& err)
{
std::cout << "\nException: " << err.what() << std::endl;
}
return 0;
}
Engine.h
#pragma once
#include <SFML/System.hpp>
#include <SFML/Graphics.hpp>
#include <SFML/Window.hpp>
#include <SFML/Audio.hpp>
#include <SFML/Network.hpp>
#include <vector>
#include <iostream>
class Engine
{
public:
Engine();
void Run();
void HandleEvents(sf::Time deltaTime);
void Update(sf::Time deltaTime);
void BuildVertices();
void Draw();
private:
bool running;
bool hasFocus;
bool fullScreen;
sf::RenderWindow mainWindow;
sf::Time deltaTime;
sf::Event event;
sf::Vector2i screenResolution;
sf::Vector2i mousePosition;
sf::VertexArray vertices;
sf::Vertex vertex;
sf::Shader dissolveShader;
sf::Texture dissolveTexture;
sf::RenderStates renderState;
float dissolveValue;
sf::Texture objectSpriteSheetTexture;
};
Engine.cpp
#include "Engine.h"
static const sf::Time TimePerFrame = sf::seconds(1.f / 60.f);
Engine::Engine()
: hasFocus(true)
, fullScreen(fullScreen)
, running(false)
, dissolveValue(1.0f)
, vertices(sf::Quads)
{
mainWindow.create(sf::VideoMode(640, 480), "Test", sf::Style::Titlebar);
mainWindow.setPosition(sf::Vector2i(0, 0));
screenResolution.x = 640;
screenResolution.y = 480;
// 512x512 sheet, each sprite is 128x128
if (!objectSpriteSheetTexture.loadFromFile("ObjectSheet.png"))
std::cout << "failed to load ObjectSheet.png" << std::endl;
if (!dissolveTexture.loadFromFile("DissolveTexture.png"))
std::cout << "failed to load DissolveTexture.png" << std::endl;
if (!dissolveShader.loadFromFile("DissolveShader.frag", sf::Shader::Fragment))
{
std::cout << "failed to load DissolveShader.frag" << std::endl;
}
dissolveShader.setUniform("sourceTexture", sf::Shader::CurrentTexture);
dissolveShader.setUniform("dissolveTexture", dissolveTexture);
renderState.shader = &dissolveShader;
renderState.texture = &objectSpriteSheetTexture;
}
void Engine::Run()
{
// main loop
sf::Clock clock;
sf::Time timeSinceLastUpdate = sf::Time::Zero;
sf::Time elapsedTime;
running = true;
while(running)
{
elapsedTime = clock.restart();
timeSinceLastUpdate += elapsedTime;
HandleEvents(TimePerFrame);
while(timeSinceLastUpdate > TimePerFrame)
{
timeSinceLastUpdate -= TimePerFrame;
Update(TimePerFrame);
}
BuildVertices();
Draw();
}
}
void Engine::HandleEvents(sf::Time deltaTime)
{
mousePosition = sf::Mouse::getPosition(mainWindow);
while(mainWindow.pollEvent(event))
{
if(event.type == sf::Event::Closed)
mainWindow.close();
if (event.type == sf::Event::KeyPressed)
{
if (event.key.code == sf::Keyboard::Escape)
{
running = false;
}
}
}
}
void Engine::Update(sf::Time deltaTime)
{
}
void Engine::BuildVertices()
{
vertices.clear();
int frameSize = 128;
sf::Vector2i objectPosition(100, 100);
sf::Vector2i spriteSheetTextureCoordinates(0, 128);
vertex.position.x = objectPosition.x;
vertex.position.y = objectPosition.y;
vertex.texCoords.x = spriteSheetTextureCoordinates.x;
vertex.texCoords.y = spriteSheetTextureCoordinates.y;
vertices.append(vertex);
vertex.position.x = objectPosition.x + frameSize;
vertex.position.y = objectPosition.y;
vertex.texCoords.x = spriteSheetTextureCoordinates.x + frameSize;
vertex.texCoords.y = spriteSheetTextureCoordinates.y;
vertices.append(vertex);
vertex.position.x = objectPosition.x + frameSize;
vertex.position.y = objectPosition.y + frameSize;
vertex.texCoords.x = spriteSheetTextureCoordinates.x + frameSize;
vertex.texCoords.y = spriteSheetTextureCoordinates.y + frameSize;
vertices.append(vertex);
vertex.position.x = objectPosition.x;
vertex.position.y = objectPosition.y + frameSize;
vertex.texCoords.x = spriteSheetTextureCoordinates.x;
vertex.texCoords.y = spriteSheetTextureCoordinates.y + frameSize;
vertices.append(vertex);
}
void Engine::Draw()
{
mainWindow.clear(sf::Color::Black);
dissolveShader.setUniform("dissolveValue", dissolveValue);
mainWindow.draw(vertices, renderState);
mainWindow.display();
}
the vertex shader is a standard pass through handled by sfml.
the fragment shader;
#version 130
// used as the mask to determine if a pixel of the source texture should be drawn, 128x128
uniform sampler2D dissolveTexture;
// the texture of the object i'm drawing, a 128x128 part of a 512x512 sprite sheet
uniform sampler2D sourceTexture;
// set to 1.0 for debug
uniform float dissolveValue;
void main( void )
{
vec4 sourceColor = texture2D(sourceTexture, gl_TexCoord[0].xy);
vec4 maskColor = texture2D(dissolveTexture, gl_TexCoord[0].xy);
if(maskColor.r <= dissolveValue)
{
// it would return the source pixel color here one the issue is solved
// gl_FragColor = sourceColor;
// debuging, so returning the mask textures pixel color
gl_FragColor = maskColor;
}
else
{
gl_FragColor = sourceColor;
}
}
I'm probably overlooking something simple, so if someone can point me in the right direction i'd appreciate it, thanks!
The texture coordinates, for the GLSL function texture (formerly texture2D) range from 0.0 to 1.0 where (0.0, 0.0) is in general the bottom-left corner and (1.0, 1.0) is the top-right corner of the texture image.
But, the SFML library scales the texture cooridnates by the size of the curren t texture (sf::Shader::CurrentTexture). This means the texture coordinates have to be set up in the range of the current texture size:
This means you have to set up the texture coordinates like this:
void Engine::BuildVertices()
{
vertices.clear();
int frameSize = 128;
sf::Vector2i objectPosition(100, 100);
sf::Vector2i texSize(512, 512);
vertex.position = sf::Vector2f(objectPosition.x, objectPosition.y);
vertex.texCoords = sf::Vector2f(0.0f, 0.0f);
vertices.append(vertex);
vertex.position = sf::Vector2f(objectPosition.x + frameSize, objectPosition.y);
vertex.texCoords = sf::Vector2f(texSize.x, 0.0f);
vertices.append(vertex);
vertex.position = sf::Vector2f(objectPosition.x + frameSize, objectPosition.y + frameSize);
vertex.texCoords = sf::Vector2f(texSize.x, texSize.y);
vertices.append(vertex);
vertex.position = sf::Vector2f(objectPosition.x, objectPosition.y + frameSize);
vertex.texCoords = sf::Vector2f(0.0f, texSize.y);
vertices.append(vertex);
}
You have a mask texture with the size of 128*128, and you have tiled sprite (4*4 tiles) with the size of 512*512. I recommend to add a texture coordinate offset uniform (texOffset) and a texture scale uniform (texScale) to the fragment shader, with allows to select a tile of the texture:
#version 130
uniform sampler2D dissolveTexture;
uniform sampler2D sourceTexture;
uniform float dissolveValue;
uniform vec2 texScale;
uniform vec2 texOffset;
void main( void )
{
vec4 sourceColor = texture2D(sourceTexture, texOffset+texScale*gl_TexCoord[0].xy);
vec4 maskColor = texture2D(dissolveTexture, gl_TexCoord[0].xy);
gl_FragColor = mix( sourceColor, maskColor, step(maskColor.r, dissolveValue) );
}
You have to set the uniforms in the function Draw. The scale is given by the reciprocal of the number of tile rows and colums. The offset is the index of the tile multiplied by the scale factor:
void Engine::Draw()
{
mainWindow.clear(sf::Color::Black);
dissolveValue = 0.5f;
dissolveShader.setUniform("dissolveValue", dissolveValue);
float scale_x = 1.0f/4.0f;
float scale_y = 1.0f/4.0f;
int i_x = 1; // column of tile (form 0 to 3)
int i_y = 2; // row of tile (form 0 to 3)
dissolveShader.setUniform("texScale", sf::Glsl::Vec2(scale_x, scale_y));
dissolveShader.setUniform("texOffset", sf::Glsl::Vec2(i_x*scale_x, i_y*scale_y));
mainWindow.draw(vertices, renderState);
mainWindow.display();
}
New to game development with SFML and really like it.
So far I'm trying to render a circle on a sprite, but somehow I only get a quarter of the circle meaning that the window is taken as surface when drawing instead of sprite. If I make the sprie size as the window, I'll see the full circle..
Here's my code:
#include <SFML/Window.hpp>
#include <SFML/Graphics.hpp>
const char *glsl = R"(
#version 330 core
uniform vec2 u_resolution;
void main() {
vec2 pos = gl_FragCoord.xy / u_resolution;
float radius = 0.4;
float d = smoothstep(radius + 0.01, radius, distance(pos, vec2(0.5)));
gl_FragColor = vec4(d*0.0, d*0.5, d*1.0, 1.0);
}
)";
int main() {
sf::RenderWindow window(sf::VideoMode(800, 600), "GLSL", sf::Style::Default);
sf::Texture tex;
tex.create(800 / 2, 600 /2); // or tex.create(800, 600); for full window surface
sf::Sprite spr(tex);
sf::Shader shader;
shader.loadFromMemory(glsl, sf::Shader::Fragment);
shader.setUniform("u_resolution", sf::Vector2f(800, 600));
while (window.isOpen()) {
sf::Event event;
while (window.pollEvent(event)) if(event.type == sf::Event::Closed) window.close();
window.clear(sf::Color(244,244,244));
window.draw(spr, &shader);
window.display();
}
}
Any help would be appreciate. What I'm looking to achieve is to render the full circle in the sprite range. Thanks!
You take a quarter of the screen because you do tex.create(800 / 2, 600 /2); and your window size is sf::RenderWindow window(sf::VideoMode(800, 600) ...);. So it devide by the width and the height. Create a texture with the same size that your window and I think you'll be good (tex.create(800, 600);).
I'm running into a problem and I don't know what is the best practise for it. I have a background that moves upward, which is in fact "slices" that moves toghether, as if the screen was splitted in 4-5 parts horizontally. I need to be able to draw a hole (circle) in the background (see-through), at a specified position which will change dynamically at each frame or so.
Here is how I generate a zone, I don't think there's much of a problem there:
// A 'zone' is simply the 'slice' of ground that moves upward. There's about 4 of
// them visible on screen at the same time, and they are automatically generated by
// a method irrelevant to the situation. Zones are Sprites.
// ---------
void LevelLayer::Zone::generate(LevelLayer *sender) {
// [...]
// Make a background for the zone
Sprite *background = this->generateBackgroundSprite();
background->setPosition(_contentSize.width / 2, _contentSize.height / 2);
this->addChild(background, 0);
}
This is the Zone::generateBackgroundSprite() method:
// generates dynamically a new background texture
Sprite *LevelLayer::Zone::generateBackgroundSprite() {
RenderTexture *rt = RenderTexture::create(_contentSize.width, _contentSize.height);
rt->retain();
Color4B dirtColorByte = Color4B(/*initialize the color with bytes*/);
Color4F dirtColor(dirtColorByte);
rt->beginWithClear(dirtColor.r, dirtColor.g, dirtColor.b, dirtColor.a);
// [Nothing here yet, gotta learn OpenGL m8]
rt->end();
// ++++++++++++++++++++
// I'm just testing clipping node, it works but the FPS get significantly lower.
// If I lock them to 60, they get down to 30, and if I lock them there they get
// to 20 :(
// Also for the test I'm drawing a square since ClippingNode doesn't seem to
// like circles...
DrawNode *square = DrawNode::create();
Point squarePoints[4] = { Point(-20, -20), Point(20, -20), Point(20, 20), Point(-20, 20) };
square->drawPolygon(squarePoints, 4, Color4F::BLACK, 0.0f, Color4F(0, 0, 0, 0));
square->setPosition(0, 0);
// Make a stencil
Node *stencil = Node::create();
stencil->addChild(square);
// Create a clipping node with the prepared stencil
ClippingNode *clippingNode = ClippingNode::create(stencil);
clippingNode->setInverted(true);
clippingNode->addChild(rt);
Sprite *ret = Sprite::create();
ret->addChild(clippingNode);
rt->release();
return ret;
}
**
So I'm asking you guys, what would you do in such a situation? Is what I am doing a good idea? Would you do it in another more imaginative way?
PS This is a rewrite of a little app I made for iOS (I want to port it to Android), and I was using MutableTextures in the Objective-C version (it was working). I'm just trying to see if there's a better way using RenderTexture, so I can dynamically create background images using OpenGL calls.
EDIT (SOLUTION)
I wrote my own simple fragment shader that "masks" the visible parts of a texture (the background) based on the visible parts of another texture (the mask). I have an array of points that determine where my circles are on the screen, and in the update method I draw them to a RenderTexture. I then take the generated texture and use it as the mask I pass to the shader.
This is my shader:
#ifdef GL_ES
precision mediump float;
#endif
varying vec2 v_texCoord;
uniform sampler2D u_texture;
uniform sampler2D u_alphaMaskTexture;
void main() {
float maskAlpha = texture2D(u_alphaMaskTexture, v_texCoord).a;
float texAlpha = texture2D(u_texture, v_texCoord).a;
float blendAlpha = (1.0 - maskAlpha) * texAlpha; // Show only where mask is not visible
vec3 texColor = texture2D(u_texture, v_texCoord).rgb;
gl_FragColor = vec4(texColor, blendAlpha);
return;
}
init method:
bool HelloWorld::init() {
// [...]
Size visibleSize = Director::getInstance()->getVisibleSize();
// Load and cache the custom shader
this->loadCustomShader();
// 'generateBackgroundSlice()' creates a new RenderTexture and fills it with a
// color, nothing too complicated here so I won't copy-paste it in my edit
m_background = Sprite::createWithTexture(this->generateBackgroundSprite()->getSprite()->getTexture());
m_background->setPosition(visibleSize.width / 2, visibleSize.height / 2);
this->addChild(m_background);
m_background->setShaderProgram(ShaderCache::getInstance()->getProgram(Shader_AlphaMask_frag_key));
GLProgram *shader = m_background->getShaderProgram();
m_alphaMaskTextureUniformLocation = glGetUniformLocation(shader->getProgram(), "u_alphaMaskTexture");
glUniform1i(m_alphaMaskTextureUniformLocation, 1);
m_alphaMaskRender = RenderTexture::create(m_background->getContentSize().width,
m_background->getContentSize().height);
m_alphaMaskRender->retain();
// [...]
}
loadCustomShader method:
void HelloWorld::loadCustomShader() {
// Load the content of the vertex and fragement shader
FileUtils *fileUtils = FileUtils::getInstance();
string vertexSource = ccPositionTextureA8Color_vert;
string fragmentSource = fileUtils->getStringFromFile(
fileUtils->fullPathForFilename("Shader_AlphaMask_frag.fsh"));
// Init a shader and add its attributes
GLProgram *shader = new GLProgram;
shader->initWithByteArrays(vertexSource.c_str(), fragmentSource.c_str());
shader->bindAttribLocation(GLProgram::ATTRIBUTE_NAME_POSITION, GLProgram::VERTEX_ATTRIB_POSITION);
shader->bindAttribLocation(GLProgram::ATTRIBUTE_NAME_TEX_COORD, GLProgram::VERTEX_ATTRIB_TEX_COORDS);
shader->link();
shader->updateUniforms();
ShaderCache::getInstance()->addProgram(shader, Shader_AlphaMask_frag_key);
// Trace OpenGL errors if any
CHECK_GL_ERROR_DEBUG();
}
update method:
void HelloWorld::update(float dt) {
// ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
// Create the mask texture from the points in the m_circlePos array
GLProgram *shader = m_background->getShaderProgram();
m_alphaMaskRender->beginWithClear(0, 0, 0, 0); // Begin with transparent mask
for (vector<Point>::iterator it = m_circlePos.begin(); it != m_circlePos.end(); it++) {
// draw a circle on the mask
const float radius = 40;
const int resolution = 20;
Point circlePoints[resolution];
Point center = *it;
center = Director::getInstance()->convertToUI(center); // OpenGL has a weird coordinates system
float angle = 0;
for (int i = 0; i < resolution; i++) {
float x = (radius * cosf(angle)) + center.x;
float y = (radius * sinf(angle)) + center.y;
angle += (2 * M_PI) / resolution;
circlePoints[i] = Point(x, y);
}
DrawNode *circle = DrawNode::create();
circle->retain();
circle->drawPolygon(circlePoints, resolution, Color4F::BLACK, 0.0f, Color4F(0, 0, 0, 0));
circle->setPosition(Point::ZERO);
circle->visit();
circle->release();
}
m_alphaMaskRender->end();
Texture2D *alphaMaskTexture = m_alphaMaskRender->getSprite()->getTexture();
alphaMaskTexture->setAliasTexParameters(); // Disable linear interpolation
// ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
shader->use();
glActiveTexture(GL_TEXTURE1);
glBindTexture(GL_TEXTURE_2D, alphaMaskTexture->getName());
glActiveTexture(GL_TEXTURE0);
}
What you might want to look at is framebuffers, i'm not too familiar with the mobile API for OpenGL but I'm sure you should have access to framebuffers.
An idea of what you might want to try is to do a first pass where you render the circles's that you want to set to alpha on your background into a new framebuffer texture, then you can use this texture as an alpha map on your pass for rendering your background. So basically when you render your circle you might set the value in the texture to 0.0 for the alpha channel otherwise to 1.0, when rendering you can then set the alpha channel of the fragment to the same value as the alpha of texture of the first pass' of the rendering process.
You can think of it as a the same idea as a mask. But just using another texture.
Hope this helps :)
I am trying to display an npot texture in my lwjgl window. The result is this:
The texture is repeated 4 times, upside down as well as distorted by horizontal lines. Obviously this is not the intended result. Here is what I feel to be relevant source code:
Utility method that loads a texture:
// load the image
BufferedImage image = null;
try {
image = ImageIO.read(new File(path));
}
// exit on error
catch (IOException exception) {
Utility.errorExit(exception);
}
// add the image's data to a bytebuffer
ByteBuffer buffer = BufferUtils.createByteBuffer(image.getWidth() * image.getHeight() * 4);
for(int x = 0; x < image.getWidth(); x++) {
for(int y = 0; y < image.getHeight(); y++) {
int pixel = image.getRGB(x, y);
buffer.put((byte) ((pixel >> 16) & 0xFF)); // red
buffer.put((byte) ((pixel >> 8) & 0xFF)); // green
buffer.put((byte) (pixel & 0xFF)); // blue
buffer.put((byte) 0xFF); // alpha
}
}
// flip the buffer
buffer.flip();
// generate and bind the texture
int handle = GL11.glGenTextures();
GL11.glBindTexture(GL31.GL_TEXTURE_RECTANGLE, handle);
//Setup wrap mode
GL11.glTexParameteri(GL31.GL_TEXTURE_RECTANGLE, GL11.GL_TEXTURE_WRAP_S, GL12.GL_CLAMP_TO_EDGE);
GL11.glTexParameteri(GL31.GL_TEXTURE_RECTANGLE, GL11.GL_TEXTURE_WRAP_T, GL12.GL_CLAMP_TO_EDGE);
//Setup texture scaling filtering
GL11.glTexParameteri(GL31.GL_TEXTURE_RECTANGLE, GL11.GL_TEXTURE_MIN_FILTER, GL11.GL_LINEAR);
GL11.glTexParameteri(GL31.GL_TEXTURE_RECTANGLE, GL11.GL_TEXTURE_MAG_FILTER, GL11.GL_LINEAR);
// set the texture data
GL11.glTexImage2D(GL31.GL_TEXTURE_RECTANGLE, 0, GL11.GL_RGBA8, image.getWidth(), image.getHeight(), 0,
GL11.GL_RGBA, GL11.GL_UNSIGNED_BYTE, buffer);
// return the handle
return handle;
Utility method to bind the texture to the sampler:
// set the sampler's texture unit
GL20.glUniform1i(samplerLocation, GL13.GL_TEXTURE0 + textureUnit);
// bind the texture to the texture unit
GL13.glActiveTexture(GL13.GL_TEXTURE0 + textureUnit);
GL11.glBindTexture(GL31.GL_TEXTURE_RECTANGLE, textureID);
Fragment shader:
#version 150
#extension GL_ARB_texture_rectangle : enable
uniform sampler2DRect sampler;
in vec2 vTexture;
out vec4 color;
void main()
{
color = texture2DRect(sampler, vTexture);
}
The last piece of information that I feel would be relevant is what my texture coordinates are:
Bottom Left Point: (0, 0)
Top Left Point: (0, 600)
Top Right Point: (800, 600)
Bottom Right Point (800, 0)
I am guessing I am doing multiple things wrong. Post in comments section if you feel there is more information that I could provide. Thanks!
P.S. The reason I say the texture is manually loaded is because I am used to using Slick-Util for loading textures, but I was not able to use it for this certain texture as I hear Slick-Util does not support npot textures.
You're pushing texels to the buffer in the wrong order.
ByteBuffer buffer = BufferUtils.createByteBuffer(image.getWidth() * image.getHeight() * 4);
for(int x = 0; x < image.getWidth(); x++) {
for(int y = 0; y < image.getHeight(); y++) {
int pixel = image.getRGB(x, y);
buffer.put((byte) ((pixel >> 16) & 0xFF)); // red
buffer.put((byte) ((pixel >> 8) & 0xFF)); // green
buffer.put((byte) (pixel & 0xFF)); // blue
buffer.put((byte) 0xFF); // alpha
}
}
You are iterating over the height in the inner loop. glTexImage2D expects the data to be scanline based, not column based. So try to swap your x and y loops.
I am trying to implement a gaussian blur with convolution matrix on my shader.
This is the code i have:
float4 ppPS(float2 uv : TEXCOORD0, uniform sampler2D t1) : COLOR {
//kernel matrix
float3x3 kernel={1*(1/16),2*(1/16),1*(1/16),
2*(1/16),4*(1/16),2*(1/16),
1*(1/16),2*(1/16),1*(1/16)
};
int x,y;
float2 sum = 0;
for (x = -1; x <= 1; x++)
{
for (y = -1; y <= 1; y++)
{
float2 fl;
fl.x = uv.x+x;
fl.y = uv.y+y;
sum += (fl)*(kernel[x+1][y+1]);
}
}
return tex2D(t1, sum);
}
but for some reason, i get a picture all in one solid color.
Here is the image without the blur:
Here is the image with the so called blur:
any idea of what am i doing wrong over here?
Try to change the float3x3 initialize values into floating point format (.0f) otherwise all the values will end up as 0.
//kernel matrix
static const float3x3 kernel={1*(1.0f/16.0f),2*(1.0f/16.0f),1*(1.0f/16.0f),
2*(1.0f/16.0f),4*(1.0f/16.0f),2*(1.0f/16.0f),
1*(1.0f/16.0f),2*(1.0f/16.0f),1*(1.0f/16.0f)
};
After this change you wouldn't see the blank output image !!!