Nim and SDL2 trouble with Rect - sdl

I have the following Nim+official libsdl2 wrapper code
import sdl2
discard sdl2.init(INIT_EVERYTHING)
let
window = createWindow("Tic-Tac-Toe", SDL_WINDOWPOS_CENTERED, SDL_WINDOWPOS_CENTERED, 600, 390, SDL_WINDOW_SHOWN)
renderer = createRenderer(window, -1, Renderer_Accelerated or Renderer_PresentVsync or Renderer_TargetTexture)
proc loadImage(file: string): TexturePtr =
let loadedImage = loadBMP(file)
let texture = createTextureFromSurface(renderer, loadedImage)
freeSurface(loadedImage)
return texture
proc applySurface(x: cint, y: cint, tex: TexturePtr, rend: RendererPtr) =
var pos: Rect
pos.x = x
pos.y = y
queryTexture(tex, nil, nil, pos.w, pos.h)
copy(rend, tex, nil, pos)
let
background = loadImage("resources/bg.bmp")
clear(renderer)
applySurface(0, 0, background, renderer)
present(renderer)
var
evt = sdl2.defaultEvent
runGame = true
while runGame:
while pollEvent(evt):
if evt.kind == QuitEvent:
runGame = false
break
destroy window
And there is an error during compilation:
source.nim(19, 15) Error: type mismatch: got (TexturePtr, nil, nil, cint, cint)
but expected one of:
sdl2.queryTexture(texture: TexturePtr, format: ptr uint32, access: ptr cint, w: ptr cint, h: ptr cint)
Same for 20th line:
source.nim(20, 7) Error: type mismatch: got (RendererPtr, TexturePtr, nil, Rect)
but expected one of:
system.copy(s: string, first: int)
system.copy(s: string, first: int, last: int)
sdl2.copy(renderer: RendererPtr, texture: TexturePtr, srcrect: ptr Rect, dstrect: ptr Rect)
If replace pos with nil in copy() and comment queryTexture(), everything will be okay. Please, help me solve this problem.

Your problem is that the procs require a ptr to the respective data types, not the data itself. For instance a ptr cint is required, but you are passing a plain cint. What you have to do is take the addr of the cint to get a ptr cint. For example:
var w = pos.w
var h = pos.h
queryTexture(tex, nil, nil, w.addr, h.addr)
Note that in order to "take the address" you need a variable of the var type (for details on that see this question). Since pos is a var, pos.w.addr and pos.h.addr should also work. Similarly you have to take pos.addr for the last parameter of copy.

Related

Rendering rapidly-changing arbitrary-sized meshes with Metal and Swift 3

I am trying to render random meshes to an MTKView as fast as the device will allow.. Pretty much all the metal examples I have found show how to draw a piece of geometry for which the buffer size is defined only once (i.e. fixed):
let dataSize = vertexCount * MemoryLayout<VertexWithColor>.size // size of the vertex data in bytes
let vertexBuffer: MTLBuffer = device!.makeBuffer(bytes: verticesWithColorArray, length: dataSize, options: []) // create a new buffer on the GPU
The goal is to eventually generate meshes on the fly given some point cloud input. I've set up drawing to be triggered with a tap as follows:
override func touchesBegan(_ touches: Set<UITouch>, with event: UIEvent?) {
if let touch = touches.first {
let touchPoint = touch.location(in: view)
print ("...touch \(touchPoint)")
autoreleasepool {
delaunayView.setupTriangles()
delaunayView.renderTriangles()
}
}
}
I can get the screen to refresh with new triangles, as long as I don't tap too frequently. However, if I tap too quickly (like say a double tap), the app crashes with the following error:
[CAMetalLayerDrawable texture] should not be called after presenting the drawable.
Performance will obviously be linked to the number of triangles drawn. Besides getting the app to function stably, just as important is the question, how can I best take advantage of the GPU to push as many triangles as possible? (In its current state, the app draws about 30,000 triangles at 3 fps on an iPad Air 2).
Any pointers/gotchas for speed and frame rate would be most welcome
The whole project can be found here:
Also, below is the pertinent updated metal class
import Metal
import MetalKit
import GameplayKit
protocol MTKViewDelaunayTriangulationDelegate: NSObjectProtocol{
func fpsUpdate (fps: Int)
}
class MTKViewDelaunayTriangulation: MTKView {
//var kernelFunction: MTLFunction!
var pipelineState: MTLComputePipelineState!
var defaultLibrary: MTLLibrary! = nil
var commandQueue: MTLCommandQueue! = nil
var renderPipeline: MTLRenderPipelineState!
var errorFlag:Bool = false
var verticesWithColorArray : [VertexWithColor]!
var vertexCount: Int
var verticesMemoryByteSize:Int
let fpsLabel = UILabel(frame: CGRect(x: 0, y: 0, width: 400, height: 20))
var frameCounter: Int = 0
var frameStartTime = CFAbsoluteTimeGetCurrent()
weak var MTKViewDelaunayTriangulationDelegate: MTKViewDelaunayTriangulationDelegate?
////////////////////
init(frame: CGRect) {
vertexCount = 100000
//verticesMemoryByteSize = vertexCount * MemoryLayout<VertexWithColor>.size
verticesMemoryByteSize = vertexCount * MemoryLayout<VertexWithColor>.stride // apple recommendation
super.init(frame: frame, device: MTLCreateSystemDefaultDevice())
setupMetal()
//setupTriangles()
//renderTriangles()
}
required init(coder aDecoder: NSCoder) {
fatalError("init(coder:) has not been implemented")
}
/*
override func draw(_ rect: CGRect) {
step() // needed to update frame counter
autoreleasepool {
setupTriangles()
renderTriangles()
}
} */
func step() {
frameCounter += 1
if frameCounter == 100
{
let frametime = (CFAbsoluteTimeGetCurrent() - frameStartTime) / 100
MTKViewDelaunayTriangulationDelegate?.fpsUpdate(fps: Int(1 / frametime)) // let the delegate know of the frame update
print ("...frametime: \((Int(1/frametime)))")
frameStartTime = CFAbsoluteTimeGetCurrent() // reset start time
frameCounter = 0 // reset counter
}
}
func setupMetal(){
// Steps required to set up metal for rendering:
// 1. Create a MTLDevice
// 2. Create a Command Queue
// 3. Access the custom shader library
// 4. Compile shaders from library
// 5. Create a render pipeline
// 6. Set buffer size of objects to be drawn
// 7. Draw to pipeline through a renderCommandEncoder
// 1. Create a MTLDevice
guard let device = MTLCreateSystemDefaultDevice() else {
errorFlag = true
//particleLabDelegate?.particleLabMetalUnavailable()
return
}
// 2. Create a Command Queue
commandQueue = device.makeCommandQueue()
// 3. Access the custom shader library
defaultLibrary = device.newDefaultLibrary()
// 4. Compile shaders from library
let fragmentProgram = defaultLibrary.makeFunction(name: "basic_fragment")
let vertexProgram = defaultLibrary.makeFunction(name: "basic_vertex")
// 5a. Define render pipeline settings
let renderPipelineDescriptor = MTLRenderPipelineDescriptor()
renderPipelineDescriptor.vertexFunction = vertexProgram
renderPipelineDescriptor.sampleCount = self.sampleCount
renderPipelineDescriptor.colorAttachments[0].pixelFormat = self.colorPixelFormat
renderPipelineDescriptor.fragmentFunction = fragmentProgram
// 5b. Compile renderPipeline with above renderPipelineDescriptor
do {
renderPipeline = try device.makeRenderPipelineState(descriptor: renderPipelineDescriptor)
} catch let error as NSError {
print("render pipeline error: " + error.description)
}
// initialize counter variables
frameStartTime = CFAbsoluteTimeGetCurrent()
frameCounter = 0
} // end of setupMetal
/// Generate set of vertices for our triangulation to use
func generateVertices(_ size: CGSize, cellSize: CGFloat, variance: CGFloat = 0.75, seed: UInt64 = numericCast(arc4random())) -> [Vertex] {
// How many cells we're going to have on each axis (pad by 2 cells on each edge)
let cellsX = (size.width + 4 * cellSize) / cellSize
let cellsY = (size.height + 4 * cellSize) / cellSize
// figure out the bleed widths to center the grid
let bleedX = ((cellsX * cellSize) - size.width)/2
let bleedY = ((cellsY * cellSize) - size.height)/2
let _variance = cellSize * variance / 4
var points = [Vertex]()
let minX = -bleedX
let maxX = size.width + bleedX
let minY = -bleedY
let maxY = size.height + bleedY
let generator = GKLinearCongruentialRandomSource(seed: seed)
for i in stride(from: minX, to: maxX, by: cellSize) {
for j in stride(from: minY, to: maxY, by: cellSize) {
let x = i + cellSize/2 + CGFloat(generator.nextUniform()) + CGFloat.random(-_variance, _variance)
let y = j + cellSize/2 + CGFloat(generator.nextUniform()) + CGFloat.random(-_variance, _variance)
points.append(Vertex(x: Double(x), y: Double(y)))
}
}
return points
} // end of generateVertices
func setupTriangles(){
// generate n random triangles
///////////////////
verticesWithColorArray = [] // empty out vertex array
for _ in 0 ... vertexCount {
//for vertex in vertices {
let x = Float(Double.random(-1.0, 1.0))
let y = Float(Double.random(-1.0, 1.0))
let v = VertexWithColor(x: x, y: y, z: 0.0, r: Float(Double.random()), g: Float(Double.random()), b: Float(Double.random()), a: 0.0)
verticesWithColorArray.append(v)
} // end of for _ in
} // end of setupTriangles
func renderTriangles(){
// 6. Set buffer size of objects to be drawn
//let dataSize = vertexCount * MemoryLayout<VertexWithColor>.size // size of the vertex data in bytes
let dataSize = vertexCount * MemoryLayout<VertexWithColor>.stride // apple recommendation
let vertexBuffer: MTLBuffer = device!.makeBuffer(bytes: verticesWithColorArray, length: dataSize, options: []) // create a new buffer on the GPU
let renderPassDescriptor: MTLRenderPassDescriptor? = self.currentRenderPassDescriptor
// If the renderPassDescriptor is valid, begin the commands to render into its drawable
if renderPassDescriptor != nil {
// Create a new command buffer for each tessellation pass
let commandBuffer: MTLCommandBuffer? = commandQueue.makeCommandBuffer()
// Create a render command encoder
// 7a. Create a renderCommandEncoder four our renderPipeline
let renderCommandEncoder: MTLRenderCommandEncoder? = commandBuffer?.makeRenderCommandEncoder(descriptor: renderPassDescriptor!)
renderCommandEncoder?.label = "Render Command Encoder"
//////////renderCommandEncoder?.pushDebugGroup("Tessellate and Render")
renderCommandEncoder?.setRenderPipelineState(renderPipeline!)
renderCommandEncoder?.setVertexBuffer(vertexBuffer, offset: 0, at: 0)
// most important below: we tell the GPU to draw a set of triangles, based on the vertex buffer. Each triangle consists of three vertices, starting at index 0 inside the vertex buffer, and there are vertexCount/3 triangles total
//renderCommandEncoder?.drawPrimitives(type: .triangle, vertexStart: 0, vertexCount: vertexCount, instanceCount: vertexCount/3)
renderCommandEncoder?.drawPrimitives(type: .triangle, vertexStart: 0, vertexCount: vertexCount)
///////////renderCommandEncoder?.popDebugGroup()
renderCommandEncoder?.endEncoding() // finalize renderEncoder set up
commandBuffer?.present(self.currentDrawable!) // needed to make sure the new texture is presented as soon as the drawing completes
// 7b. Render to pipeline
commandBuffer?.commit() // commit and send task to gpu
} // end of if renderPassDescriptor
}// end of func renderTriangles()
} // end of class MTKViewDelaunayTriangulation
You shouldn't be calling setupTriangles() or, especially, renderTriangles() from init(). Nor, as per your comment, from touchesBegan(). In general, you should only attempt to draw when the framework calls your override of draw(_:).
How you update for user events depends on the drawing mode of the MTKView, as explained in the class overview. By default, your draw(_:) method is called periodically. In this mode, you shouldn't have to do anything about drawing in touchesBegan(). Just update your class's internal state about what it should draw. The actual drawing will happen automatically a short time later.
If you've configured the view to redraw after setNeedsDisplay(), then touchesBegan() should update internal state and then call setNeedsDisplay(). It shouldn't attempt to draw immediately. A short time after you return control back to the framework (i.e. return from touchesBegan()), it will call draw(_:) for you.
If you've configured the view to only draw when you explicitly call draw(), then you would do that after updating internal state.

GDI+ linear gradient strange behavior

I need to create a linear gradient brush to paint a surface with GDI+. On the input, I receive the following parameters:
Viewport rect: x = 0, y = 0, width = 744.09448819f, height = 1052.3622047f
Fill rect: x = 13.040037f, y = 17.478735f, width = 721.3703f, height = 1009.1535f
Gradient start: x = 384.8, y = 611.46
Gradient stop: x = 378.93, y = 474.96
Start color: a = 255, r = 179, g = 221, b = 253 (between 0 and 255)
End color: a = 255, r = 51, g = 102, b = 152 (between 0 and 255)
Matrix to apply: a = 7.7430985, b = 0, c = 0, d = 7.2926249, e = -2439.6639, f = -3446.2263
Wrap mode: clamp
These values were extracted from a SVG file. When I open it with my browser, I get the following result:
https://drive.google.com/open?id=0B7C-RwCTA9qaNnFsaGJpenlseGc
Now I try to draw the exact same gradient with GDI+. I use Embarcadero RAD Studio c++ builder to do that. I wrote the following code:
void DrawGradient()
{
std::auto_ptr<TBitmap> pBitmap(new TBitmap());
pBitmap->Width = 744.09448819f;
pBitmap->Height = 1052.3622047f;
pBitmap->Canvas->Brush->Color = clWhite;
pBitmap->Canvas->FillRect(TRect(0, 0, pBitmap->Width, pBitmap->Height));
Gdiplus::Graphics graphics(pBitmap->Canvas->Handle);
Gdiplus::LinearGradientBrush brush(
Gdiplus::PointF(384.8, 611.46),
Gdiplus::PointF(378.93, 474.96),
Gdiplus::Color(255, 179, 221, 253),
Gdiplus::Color(255, 51, 102, 152)
);
Gdiplus::Matrix matrix(7.7430985, 0, 0, 7.2926249, -2439.6639, -3446.2263);
brush.SetTransform(&matrix);
graphics.FillRectangle(&brush, 13.040037f, 17.478735f, 721.3703f, 1009.1535f);
pBitmap->SaveToFile(L"test.bmp");
}
However I get this result:
https://drive.google.com/open?id=0B7C-RwCTA9qaU1BhSTJxay16Z00
Putting aside the question of clamping, for which I know that GDI + offers no simple solution (like e.g. putting the Gdiplus::WrapModeClamp in the SetWrapMode() function, that could be so easy for the users), the resulting drawing should at least be closer to the expected result, or I'm wrong?
Somebody can explain to me why I get a so different result?
NOTE I already tweaked all the parameters, I obtain just several variations of the same wrong result
NOTE The original SVG that I refer above can be obtained here:
https://drive.google.com/open?id=0B7C-RwCTA9qaYkNRQ3lJNC1fNmM
Regards

OpenGL/Libgdx scissors

I am trying in libgdx to stop the rendering of actors outside of a group, as well as some rectangles inside the group, an example is below
----------------------
----------------------
--******************--
--******************--
--*****--------*****--
--******************--
--******************--
----------------------
----------------------
The group is defined as * which is where I would like actors drawn, - is where I would like actors clipped.
I have managed using the below code to clip any actors outside of the group;
Rectangle scissors = new Rectangle();
Rectangle clipBounds = new Rectangle(getX(), getY(), getWidth(), getHeight());
ScissorStack.calculateScissors(screen.getCamera(), screen.getBatch().getTransformMatrix(), clipBounds, scissors);
ScissorStack.pushScissors(scissors);
super.draw(batch, parentAlpha);
batch.flush();
ScissorStack.popScissors();
How can I also clip actors inside the group (I have tried adding other clip bounds but it only renders sprites which are inside both clipbounds)?
I don't think you can nest an interior "anti-scissor" region inside a scissor region. So you cannot, as far as I know, use the basic Libgdx/OpenGL primitives to accomplish this.
You can accomplish something similar by re-drawing the inner-most "------" region after drawing your actors (the "***" stuff).
Alternatively, you can probably use a Stencil Buffer or Depth Buffer or a custom Fragment Shader. (See https://github.com/mattdesl/lwjgl-basics/wiki/LibGDX-Masking for ideas.)
Implementation of own ScissorGroup
ScissorGroup:
class ScissorGroup : Group() {
private lateinit var localToScreenCoordinates: Vector2
private lateinit var localToScreenSize : Vector2
private var X: Int = 0
private var Y: Int = 0
private var W: Int = 0
private var H: Int = 0
override fun draw(batch: Batch?, parentAlpha: Float) {
if (stage != null) drawScissor(batch, parentAlpha)
}
private fun drawScissor(batch: Batch?, parentAlpha: Float) {
batch?.endbegin {}
localToScreenCoordinates = localToScreenCoordinates(Vector2())
localToScreenSize = stage.viewport.project(Vector2(width, height))
X = localToScreenCoordinates.x.toInt()
Y = Gdx.graphics.height - localToScreenCoordinates.y.toInt()
W = localToScreenSize.x.toInt()
H = localToScreenSize.y.toInt()
Gdx.gl.glEnable(GL20.GL_SCISSOR_TEST)
Gdx.gl.glScissor(
X, Y,
W - stage.viewport.zeroScreenVector.x.toInt(),
H - stage.viewport.zeroScreenVector.y.toInt(),
)
super.draw(batch, parentAlpha)
batch?.flush()
Gdx.gl.glDisable(GL20.GL_SCISSOR_TEST)
}
}
fun Batch.beginend(block: Batch.() -> Unit = { } ) {
begin()
block()
end()
}
fun Batch.endbegin(block: Batch.() -> Unit = { }) {
end()
block()
begin()
}
val Viewport.zeroScreenVector: Vector2 get() = project(Vector2(0f, 0f))
How Use:
val scissor = ScissorGroup()
stage.addActor(scissor)
scissor.debug()
scissor.setBounds(50f, 0f, 50f, 50f)
scissor.addActor(a)

How do I get an event in Raphael's paper coordinates

I would like to get the coordinates of a mouse event in Raphael's paper coordinates. I would like those to be accurate even when I have used setViewBox.
Please see http://jsfiddle.net/CEnBN/
The following creates a 10x10 green box and then zooms way in - with the center of that box at the view's origin.
var paper = Raphael(10, 50, 320, 200);
var rect = paper.rect(0, 0, 10, 10);
rect.attr('fill', 'green');
rect.mousedown(function (event, a, b) {
$('#here').text([a, b]);
console.log(event);
});
paper.setViewBox(5, 5, 10, 10);
I would like to receive click coordinates that reflect their position in the box. ie. they should range from ([5-10], [5-10]).
Note: much later, and I have migrated to D3.js - which has generally made me a lot happier.
Edited: simplified by using clientX/Y of the mouse event - remove need to get element offset
Here is what I came up with. Basically, correct the mouse position to be relative to the paper by using the client rect of the paper and clientX/Y of the mouse event. Then compare the corrected positions to the client rect's width/height, then factor the results by original paper dimensions:
var paper = Raphael(10, 50, 320, 200);
var rect = paper.rect(0, 0, 10, 10);
rect.attr('fill', 'green');
rect.mousedown(function (event, a, b) {
// get bounding rect of the paper
var bnds = event.target.getBoundingClientRect();
// adjust mouse x/y
var mx = event.clientX - bnds.left;
var my = event.clientY - bnds.top;
// divide x/y by the bounding w/h to get location %s and apply factor by actual paper w/h
var fx = mx/bnds.width * rect.attrs.width
var fy = my/bnds.height * rect.attrs.height
// cleanup output
fx = Number(fx).toPrecision(3);
fy = Number(fy).toPrecision(3);
$('#here').text('x: ' + fx + ', y: ' + fy);
});
paper.setViewBox(5, 5, 10, 10);
An updated fiddle link is here:
http://jsfiddle.net/CEnBN/3/
more compact version of mouse down func:
rect.mousedown(function (event, a, b) {
var bnds = event.target.getBoundingClientRect();
var fx = (event.clientX - bnds.left)/bnds.width * rect.attrs.width
var fy = (event.clientY - bnds.top)/bnds.height * rect.attrs.height
$('#here').text('x: ' + fx + ', y: ' + fy);
});
You need to offset the result, something like this:
var posx, posy;
var paper = Raphael("canvas", 320, 200);
var rect = paper.rect(0, 0, 10, 10);
rect.attr('fill', 'green');
rect.mousedown(function (e, a, b) {
posx = e.pageX - $(document).scrollLeft() - $('#canvas').offset().left;
posy = e.pageY - $(document).scrollTop() - $('#canvas').offset().top;
$('#here').text([posx, posy]);
console.log(e);
});
paper.setViewBox(5, 5, 10, 10);
I added an element for Raphaeljs to target, have a look at this update to your jsfiddle
The answer by gthmb is very good, but missing a detail - the position of the rectangle on the paper. This version is only working, if the rectangle is at position (0,0). To support also the situation where it is translated, add the position of the rectangle to the result:
function mouseEvent_handler(e) {
var bnds = event.target.getBoundingClientRect();
var bbox = this.getBBox();
var fx = (event.clientX - bnds.left)/bnds.width * rect.attrs.width + bbox.x;
var fy = (event.clientY - bnds.top)/bnds.height * rect.attrs.height + bbox.y;
$('#here').text('x: ' + fx + ', y: ' + fy);
}
Here the modified version of the fiddle: http://jsfiddle.net/zJu8b/1/

How do I remove (or apply) transparency on a gdk-pixbuf?

I have a c++ program in which a gdk-pixbuf is created. I want to output it as an image, so I call gdk_pixbuf_save_to_stream(pixbuf,stream,type,NULL,&err,NULL). This works fine when "type" is png or tiff, but with jpeg or bmp it just produces a black square. The original pixbuf consists of black-on-transparent (and gdk_pixbuf_get_has_alpha returns true) so I'm guessing that the problem is with the alpha mask.
GdkPixbuf has a function to add an alpha channel, but I can't see one that removes it again, or (which might be as good) to invert it.
Is there a simple way to get the jpeg and bmp formats to work properly?
(I should say that I'm very new to proper programming like this.)
JPEG doesn't have any notion of an alpha channel, or transparency at all. The alpha channel is stripped during the conversion to JPEG. BMP has the same restriction.
Since transparency is important to you, your program should stick to generating PNGs.
As far as the question you've posed in the title, removing an alpha channel can be done manually. The trick is understanding how the data in a GdkPixbuf is stored. When you have an RGB pixbuf with an alpha channel (also called RGBA), the pixels are stored as 32-bit values: 4 bytes, one byte per color, the fourth being the alpha channel. RGB pixbufs are stored as 24-bit values, one byte per color.
So, if you create a temporary byte buffer and copy over the first three bytes of each RGBA pixel and drop the fourth, that temporary buffer is then pure RGB. To diagram it a little:
[R][G][B][A][R][G][B][A]... => [R][G][B][R][G][B]...
Note that you have to pack the temporary buffer; there's no spare byte between the [B] byte and the next [R] byte.
You then create a new GdkPixbuf by handing it this RGB buffer, and you've removed the alpha channel.
See gdk_pixbuf_get_pixels() to access the RGBA buffer and gdk_pixbuf_new_from_data() to create the RGB pixbuf. See here for more discussion on how the packed data is stored inside a GdkPixbuf.
Here is (rather inefficient and ugly) Vala application that removes transparency from an image and saves it in the format specified. NOTE: There is a small bug in vala binding for gdk_pixbuf_new_from_data() that causes corruption of the resulting image. I'm going to fix that soon but this is meanly for demonstration purposes for now (besides the question was about C++):
public static int main (string[] args) {
if (args.length < 4) {
print ("Usage: %s SOURCE DESTINATION FORMAT\n", args[0]);
return -1;
}
var src_path = args[1];
var destination_path = args[2];
var dest_type = args[3];
var pixbuf = new Gdk.Pixbuf.from_file_at_scale (src_path, 48, 48, false);
// Remove alpha channel
if (pixbuf.get_has_alpha () && pixbuf.get_n_channels () == 4 && pixbuf.get_bits_per_sample () == 8) {
var width = pixbuf.get_width ();
var height = pixbuf.get_height ();
var rowstride = pixbuf.get_rowstride ();
unowned uint8[] orig_pixels = pixbuf.get_pixels ();
var pixels = new uint8[rowstride * height];
for (var i = 0; i < height; i++) {
for (var j = 0, k = 0; j < width * 4; j += 4, k += 3) {
var orig_index = rowstride * i + j;
var index = rowstride * i + k;
if (orig_pixels[orig_index] == 0 &&
orig_pixels[orig_index + 1] == 0 &&
orig_pixels[orig_index + 2] == 0 &&
orig_pixels[orig_index + 3] == 0) {
pixels[index] = 0xFF;
pixels[index + 1] = 0xFF;
pixels[index + 2] = 0xFF;
} else {
pixels[index] = orig_pixels[orig_index];
pixels[index + 1] = orig_pixels[orig_index + 1];
pixels[index + 2] = orig_pixels[orig_index + 2];
}
}
}
pixbuf = new Gdk.Pixbuf.from_data (pixels,
pixbuf.get_colorspace (),
false,
8,
width,
height,
rowstride,
null);
}
pixbuf.save (destination_path, dest_type);
return 0;
}