OSG render scene into image - opengl

I trying to render an OSG scene into a image in my Qt program. Refer to the example of SnapImageDrawCallback(https://www.mail-archive.com/osg-users#lists.openscenegraph.org/msg45360.html).
class SnapImageDrawCallback : public osg::CameraNode::DrawCallback {
public:
SnapImageDrawCallback()
{
_snapImageOnNextFrame = false;
}
void setFileName(const std::string& filename) { _filename = filename; }
const std::string& getFileName() const { return _filename; }
void setSnapImageOnNextFrame(bool flag) { _snapImageOnNextFrame = flag;}
bool getSnapImageOnNextFrame() const { return _snapImageOnNextFrame; }
virtual void operator () (const osg::CameraNode& camera) const
{
if (!_snapImageOnNextFrame) return;
int x,y,width,height;
x = camera.getViewport()->x();
y = camera.getViewport()->y();
width = camera.getViewport()->width();
height = camera.getViewport()->height();
osg::ref_ptr<osg::Image> image = new osg::Image;
image->readPixels(x,y,width,height,GL_RGB,GL_UNSIGNED_BYTE);
if (osgDB::writeImageFile(*image,_filename))
{
std::cout << "Saved screen image to `"<<_filename
<<"`"<< std::endl;
}
_snapImageOnNextFrame = false;
}
protected:
std::string _filename;
mutable bool _snapImageOnNextFrame;
};
I set this as a the osg::Viewer's camera's FinalDrawCallback, but I failed with a blank image, and get this warning "Warning: detected OpenGL error 'invalid operation' at start of State::apply()" when invoke image->readPixels, My osgViewer::Viewer in embedded in QQuickFramebufferObject. Can any one give some suggestions?

Not sure to give you the right pointer, you should provide more details about your setup and what you're after.
As a general note, if you're trying to render with OSG into a QtQuick widget the best approach is to have osg to render to an FBO in a separate shared GL context, and copy the FBO contents back the qtquick widget.
I had tested this approach some times ago, see code here:
https://github.com/rickyviking/qmlosg
Another similar project here: https://github.com/podsvirov/osgqtquick

you can use pbo
ext->glGenBuffers(1, &pbo);
ext->glBindBuffer(GL_PIXEL_PACK_BUFFER_ARB, pbo);
ext->glBufferData(GL_PIXEL_PACK_BUFFER_ARB, _width*_height*4, 0, GL_STREAM_READ);
glReadPixels(0, 0, _width, _height, _pixelFormat, _type, 0);
GLubyte* src = (GLubyte*)ext->glMapBuffer(GL_PIXEL_PACK_BUFFER_ARB,
GL_READ_ONLY_ARB);
if(src)
{
memcpy(image->data(), src, _width*_height*4);
ext->glUnmapBuffer(GL_PIXEL_PACK_BUFFER_ARB);
}
ext->glBindBuffer(GL_PIXEL_PACK_BUFFER_ARB, 0);

Related

How to properly do Context Sharing with GLFW?

What I'm trying to do is make it so that if I replace the window I'm rendering with a new window, which could happen because the user switches screens, or switches from fullscreen to windowed, or for any number of other reasons.
My code so far looks like this:
"Context.h"
struct window_deleter {
void operator()(GLFWwindow * window) const;
};
class context {
std::unique_ptr<GLFWwindow, window_deleter> window;
public:
context(int width, int height, const char * s, GLFWmonitor * monitor, GLFWwindow * old_window, bool borderless);
GLFWwindow * get_window() const;
void make_current() const;
};
"Context.cpp"
context::context(int width, int height, const char * s, GLFWmonitor * monitor, GLFWwindow * old_window, bool borderless) {
if (!glfwInit()) throw std::runtime_error("Unable to Initialize GLFW");
if (borderless) glfwWindowHint(GLFW_DECORATED, 0);
else glfwWindowHint(GLFW_DECORATED, 1);
window.reset(glfwCreateWindow(width, height, s, monitor, old_window));
if (!window) throw std::runtime_error("Unable to Create Window");
make_current();
}
GLFWwindow * context::get_window() const {
return window.get();
}
void context::make_current() const {
glfwMakeContextCurrent(window.get());
}
"WindowManager.h"
#include "Context.h"
class window_style;
/* window_style is basically a really fancy "enum class", and I don't
* believe its implementation or interface are relevant to this project.
* I'll add it if knowing how it works is super critical.
*/
class window_manager {
context c_context;
uint32_t c_width, c_height;
std::string c_title;
window_style c_style;
std::function<bool()> close_test;
std::function<void()> poll_task;
public:
static GLFWmonitor * get_monitor(window_style style);
window_manager(uint32_t width, uint32_t height, std::string const& title, window_style style);
context & get_context();
const context & get_context() const;
bool resize(uint32_t width, uint32_t height, std::string const& title, window_style style);
std::function<bool()> get_default_close_test();
void set_close_test(std::function<bool()> const& test);
std::function<void()> get_default_poll_task();
void set_poll_task(std::function<void()> const& task);
void poll_loop();
};
"WindowManager.cpp"
GLFWmonitor * window_manager::get_monitor(window_style style) {
if (style.type != window_style::style_type::fullscreen) return nullptr;
if (!glfwInit()) throw std::runtime_error("Unable to initialize GLFW");
int count;
GLFWmonitor ** monitors = glfwGetMonitors(&count);
if (style.monitor_number >= uint32_t(count)) throw invalid_monitor_exception{};
return monitors[style.monitor_number];
}
std::function<bool()> window_manager::get_default_close_test() {
return [&] {return glfwWindowShouldClose(c_context.get_window()) != 0; };
}
window_manager::window_manager(uint32_t width, uint32_t height, std::string const& title, window_style style) :
c_context(int(width), int(height), title.c_str(), get_monitor(style), nullptr, style.type == window_style::style_type::borderless),
c_width(width), c_height(height), c_title(title), c_style(style), close_test(get_default_close_test()), poll_task(get_default_poll_task()) {
}
context & window_manager::get_context() {
return c_context;
}
const context & window_manager::get_context() const {
return c_context;
}
bool window_manager::resize(uint32_t width, uint32_t height, std::string const& title, window_style style) {
if (width == c_width && height == c_height && title == c_title && style == c_style) return false;
c_width = width;
c_height = height;
c_title = title;
c_style = style;
c_context = context(int(width), int(height), title.c_str(), get_monitor(style), get_context().get_window(), style.type == window_style::style_type::borderless);
return true;
}
void window_manager::set_close_test(std::function<bool()> const& test) {
close_test = test;
}
std::function<void()> window_manager::get_default_poll_task() {
return [&] {glfwSwapBuffers(c_context.get_window()); };
}
void window_manager::set_poll_task(std::function<void()> const& task) {
poll_task = task;
}
void window_manager::poll_loop() {
while (!close_test()) {
glfwPollEvents();
poll_task();
}
}
"Main.cpp"
int main() {
try {
glfwInit();
const GLFWvidmode * vid_mode = glfwGetVideoMode(glfwGetPrimaryMonitor());
gl_backend::window_manager window(vid_mode->width, vid_mode->height, "First test of the window manager", gl_backend::window_style::fullscreen(0));
glfwSetKeyCallback(window.get_context().get_window(), [](GLFWwindow * window, int, int, int, int) {glfwSetWindowShouldClose(window, 1); });
glbinding::Binding::initialize();
//Anything with a "glresource" prefix is basically just a std::shared_ptr<GLuint>
//with some extra deletion code added.
glresource::vertex_array vao;
glresource::buffer square;
float data[] = {
-.5f, -.5f,
.5f, -.5f,
.5f, .5f,
-.5f, .5f
};
gl::glBindVertexArray(*vao);
gl::glBindBuffer(gl::GL_ARRAY_BUFFER, *square);
gl::glBufferData(gl::GL_ARRAY_BUFFER, sizeof(data), data, gl::GL_STATIC_DRAW);
gl::glEnableVertexAttribArray(0);
gl::glVertexAttribPointer(0, 2, gl::GL_FLOAT, false, 2 * sizeof(float), nullptr);
std::string vert_src =
"#version 430\n"
"layout(location = 0) in vec2 vertices;"
"void main() {"
"gl_Position = vec4(vertices, 0, 1);"
"}";
std::string frag_src =
"#version 430\n"
"uniform vec4 square_color;"
"out vec4 fragment_color;"
"void main() {"
"fragment_color = square_color;"
"}";
glresource::shader vert(gl::GL_VERTEX_SHADER, vert_src);
glresource::shader frag(gl::GL_FRAGMENT_SHADER, frag_src);
glresource::program program({ vert, frag });
window.set_poll_task([&] {
gl::glUseProgram(*program);
gl::glBindVertexArray(*vao);
glm::vec4 color{ (glm::sin(float(glfwGetTime())) + 1) / 2, 0.f, 0.5f, 1.f };
gl::glUniform4fv(gl::glGetUniformLocation(*program, "square_color"), 1, glm::value_ptr(color));
gl::glDrawArrays(gl::GL_QUADS, 0, 4);
glfwSwapBuffers(window.get_context().get_window());
});
window.poll_loop();
window.resize(vid_mode->width, vid_mode->height, "Second test of the window manager", gl_backend::window_style::fullscreen(1));
glfwSetKeyCallback(window.get_context().get_window(), [](GLFWwindow * window, int, int, int, int) {glfwSetWindowShouldClose(window, 1); });
window.poll_loop();
}
catch (std::exception const& e) {
std::cerr << e.what() << std::endl;
std::ofstream error_log("error.log");
error_log << e.what() << std::endl;
system("pause");
}
return 0;
}
So the current version of the code is supposed to do the following:
Display a fullscreen window on the primary monitor
On this monitor, display a "square" (rectangle, really....) that over time transitions between magenta and blue, while the background transitions between magenta and a green-ish color.
When the user presses a key, create a new fullscreen window on the second monitor using the first window's context to feed into GLFW's window creation, and destroy the original window (in that order)
Display the same rectangle on this second window
Continue to transition the background periodically
When the user presses a key again, destroy the second window and exit the program.
Of these steps, step 4 doesn't work at all, and step 3 partially works: the window does get created, but it doesn't display by default, and the user has to call it up via the taskbar. All the other steps work as expected, including the transitioning background on both windows.
So my assumption is that something is going wrong with respect to the object sharing between contexts; specifically, it doesn't appear that the second context I'm creating is receiving the objects created by the first context. Is there an obvious logic error I'm making? Should I be doing something else to ensure that context sharing works as intended? Is it possible that there's just a bug in GLFW?
So my assumption is that something is going wrong with respect to the object sharing between contexts; specifically, it doesn't appear that the second context I'm creating is receiving the objects created by the first context. Is there an obvious logic error I'm making?
Yes, your premise is just wrong. Shared OpenGL context will not share the whole state, just the "big" objects which actually hold user-specific data (like VBOs, textures, shaders and programs, renderbuffers and so on), and not the ones which only reference them - state containers like VAOs, FBOs and so on are never shared.
Should I be doing something else to ensure that context sharing works as intended?
Well, if you really want to go that route, you have to re-build all those state containers, and also restore the global state (all those glEnables, the depth buffer setting, blending state, tons of other things) of your original context.
However, I find your whole concept doubtful here. You do not need to destroy a window when going from fullscreen to windowed, or to a different monitor on the same GPU, and GLFW directly supports that via glfwSetWindowMonitor().
And even if you do re-create a window, this does not imply that you have to re-create the GL context. There might be some restrictions imposed by GLFWs API in that regard, but the underlying concepts are separate. You basically can make the old context current in the new window, and are just done with it. GLFW just inseperably links Window and Context together, which is kind of an unfortunate abstraction.
However, the only scenario I could imagine where re-creating the window would be necessary is something where different screens are driven be different GPUs - but GL context sharing won't work across different GL implementations, so even in that scenario, you would have to rebuild the whole context state.

Qt video frames from camera corrupted

EDIT: The first answer solved my problem. Apart from that I had to set the ASI_BANDWIDTH_OVERLOAD value to 0.
I am programming a Linux application in C++/Qt 5.7 to track stars in my telescope. I use a camera (ZWO ASI 120MM with according SDK v0.3) and grab its frames in a while loop in a separate thread. These are then emitted to a QOpenGlWidget to be displayed. I have following problem: When the mouse is inside the QOpenGlWidget area, the displayed frames get corrupted. Especially when the mouse is moved. The problem is worst when I use an exposure time of 50ms and disappears for lower exposure times. When I feed the pipeline with alternating images from disk, the problem disappears. I assume that this is some sort of thread-synchronization problem between the camera thread and the main thread, but I couldnt solve it. The same problem appears in the openastro software. Here are parts of the code:
MainWindow:
MainWindow::MainWindow(QWidget *parent) : QMainWindow(parent){
mutex = new QMutex;
camThread = new QThread(this);
camera = new Camera(nullptr, mutex);
display = new GLViewer(this, mutex);
setCentralWidget(display);
cameraHandle = camera->getHandle();
connect(camThread, SIGNAL(started()), camera, SLOT(connect()));
connect(camera, SIGNAL(exposureCompleted(const QImage)), display, SLOT(showImage(const QImage)), Qt::BlockingQueuedConnection );
camera->moveToThread(camThread);
camThread->start();
}
The routine that grabs the frames:
void Camera::captureFrame(){
while( cameraIsReady && capturing ){
mutex->lock();
error = ASIGetVideoData(camID, buffer, bufferSize, int(exposure*2*1e-3)+500);
if(error == ASI_SUCCESS){
frame = QImage(buffer,width,height,QImage::Format_Indexed8).convertToFormat(QImage::Format_RGB32); //Indexed8 is for 8bit
mutex->unlock();
emit exposureCompleted(frame);
}
else {
cameraStream << "timeout" << endl;
mutex->unlock();
}
}
}
The slot that receives the image:
bool GLViewer::showImage(const QImage image)
{
mutex->lock();
mOrigImage = image;
mRenderQtImg = mOrigImage;
recalculatePosition();
updateScene();
mutex->unlock();
return true;
}
And the GL function that sets the image:
void GLViewer::renderImage()
{
makeCurrent();
glClear(GL_COLOR_BUFFER_BIT);
if (!mRenderQtImg.isNull())
{
glLoadIdentity();
glPushMatrix();
{
if (mResizedImg.width() <= 0)
{
if (mRenderWidth == mRenderQtImg.width() && mRenderHeight == mRenderQtImg.height())
mResizedImg = mRenderQtImg;
else
mResizedImg = mRenderQtImg.scaled(QSize(mRenderWidth, mRenderHeight),
Qt::IgnoreAspectRatio,
Qt::SmoothTransformation);
}
glRasterPos2i(mRenderPosX, mRenderPosY);
glPixelZoom(1, -1);
glDrawPixels(mResizedImg.width(), mResizedImg.height(), GL_RGBA, GL_UNSIGNED_BYTE, mResizedImg.bits());
}
glPopMatrix();
glFlush();
}
}
I stole this code from here: https://github.com/Myzhar/QtOpenCVViewerGl
And lastly, here is how my problem looks:
This looks awful.
The image producer should produce new images and emit them through a signal. Since QImage is implicitly shared, it will automatically recycle frames to avoid new allocations. Only when the producer thread out-runs the display thread will image copies be made.
Instead of using an explicit loop in the Camera object, you can run the capture using a zero-duration timer, and having the event loop invoke it. That way the camera object can process events, e.g. timers, cross-thread slot invocations, etc.
There's no need for explicit mutexes, nor for a blocking connection. Qt's event loop provides cross-thread synchronization. Finally, the QtOpenCVViewerGl project performs image scaling on the CPU and is really an example of how not to do it. You can get image scaling for free by drawing the image on a quad, even though that's also an outdated technique from the fixed pipeline days - but it works just fine.
The ASICamera class would look roughly as follows:
// https://github.com/KubaO/stackoverflown/tree/master/questions/asi-astro-cam-39968889
#include <QtOpenGL>
#include <QOpenGLFunctions_2_0>
#include "ASICamera2.h"
class ASICamera : public QObject {
Q_OBJECT
ASI_ERROR_CODE m_error;
ASI_CAMERA_INFO m_info;
QImage m_frame{640, 480, QImage::Format_RGB888};
QTimer m_timer{this};
int m_exposure_ms = 0;
inline int id() const { return m_info.CameraID; }
void capture() {
m_error = ASIGetVideoData(id(), m_frame.bits(), m_frame.byteCount(),
m_exposure_ms*2 + 500);
if (m_error == ASI_SUCCESS)
emit newFrame(m_frame);
else
qDebug() << "capture error" << m_error;
}
public:
explicit ASICamera(QObject * parent = nullptr) : QObject{parent} {
connect(&m_timer, &QTimer::timeout, this, &ASICamera::capture);
}
ASI_ERROR_CODE error() const { return m_error; }
bool open(int index) {
m_error = ASIGetCameraProperty(&m_info, index);
if (m_error != ASI_SUCCESS)
return false;
m_error = ASIOpenCamera(id());
if (m_error != ASI_SUCCESS)
return false;
m_error = ASIInitCamera(id());
if (m_error != ASI_SUCCESS)
return false;
m_error = ASISetROIFormat(id(), m_frame.width(), m_frame.height(), 1, ASI_IMG_RGB24);
if (m_error != ASI_SUCCESS)
return false;
return true;
}
bool close() {
m_error = ASICloseCamera(id());
return m_error == ASI_SUCCESS;
}
Q_SIGNAL void newFrame(const QImage &);
QImage frame() const { return m_frame; }
Q_SLOT bool start() {
m_error = ASIStartVideoCapture(id());
if (m_error == ASI_SUCCESS)
m_timer.start(0);
return m_error == ASI_SUCCESS;
}
Q_SLOT bool stop() {
m_error = ASIStopVideoCapture(id());
return m_error == ASI_SUCCESS;
m_timer.stop();
}
~ASICamera() {
stop();
close();
}
};
Since I'm using a dummy ASI API implementation, the above is sufficient. Code for a real ASI camera would need to set appropriate controls, such as exposure.
The OpenGL viewer is also fairly simple:
class GLViewer : public QOpenGLWidget, protected QOpenGLFunctions_2_0 {
Q_OBJECT
QImage m_image;
void ck() {
for(GLenum err; (err = glGetError()) != GL_NO_ERROR;) qDebug() << "gl error" << err;
}
void initializeGL() override {
initializeOpenGLFunctions();
glClearColor(0.2f, 0.2f, 0.25f, 1.f);
}
void resizeGL(int width, int height) override {
glViewport(0, 0, width, height);
glMatrixMode(GL_PROJECTION);
glLoadIdentity();
glOrtho(0, width, height, 0, 0, 1);
glMatrixMode(GL_MODELVIEW);
update();
}
// From http://stackoverflow.com/a/8774580/1329652
void paintGL() override {
auto scaled = m_image.size().scaled(this->size(), Qt::KeepAspectRatio);
GLuint texID;
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
glGenTextures(1, &texID);
glEnable(GL_TEXTURE_RECTANGLE);
glBindTexture(GL_TEXTURE_RECTANGLE, texID);
glTexImage2D(GL_TEXTURE_RECTANGLE, 0, GL_RGB, m_image.width(), m_image.height(), 0,
GL_RGB, GL_UNSIGNED_BYTE, m_image.constBits());
glBegin(GL_QUADS);
glTexCoord2f(0, 0);
glVertex2f(0, 0);
glTexCoord2f(m_image.width(), 0);
glVertex2f(scaled.width(), 0);
glTexCoord2f(m_image.width(), m_image.height());
glVertex2f(scaled.width(), scaled.height());
glTexCoord2f(0, m_image.height());
glVertex2f(0, scaled.height());
glEnd();
glDisable(GL_TEXTURE_RECTANGLE);
glDeleteTextures(1, &texID);
ck();
}
public:
GLViewer(QWidget * parent = nullptr) : QOpenGLWidget{parent} {}
void setImage(const QImage & image) {
Q_ASSERT(image.format() == QImage::Format_RGB888);
m_image = image;
update();
}
};
Finally, we hook the camera and the viewer together. Since the camera initialization may take some time, we perform it in the camera's thread.
The UI should emit signals that control the camera, e.g. to open it, start/stop acquisition, etc., and have slots that provide feedback from the camera (e.g. state changes). A free-standing function would take the two objects and hook them together, using functors as appropriate to adapt the UI to a particular camera. If adapter code would be extensive, you'd use a helper QObject for that, but usually a function should suffice (as it does below).
class Thread : public QThread { public: ~Thread() { quit(); wait(); } };
// See http://stackoverflow.com/q/21646467/1329652
template <typename F>
static void postToThread(F && fun, QObject * obj = qApp) {
QObject src;
QObject::connect(&src, &QObject::destroyed, obj, std::forward<F>(fun),
Qt::QueuedConnection);
}
int main(int argc, char ** argv) {
QApplication app{argc, argv};
GLViewer viewer;
viewer.setMinimumSize(200, 200);
ASICamera camera;
Thread thread;
QObject::connect(&camera, &ASICamera::newFrame, &viewer, &GLViewer::setImage);
QObject::connect(&thread, &QThread::destroyed, [&]{ camera.moveToThread(app.thread()); });
camera.moveToThread(&thread);
thread.start();
postToThread([&]{
camera.open(0);
camera.start();
}, &camera);
viewer.show();
return app.exec();
}
#include "main.moc"
The GitHub project includes a very basic ASI camera API test harness and is complete: you can run it and see the test video rendered in real time.

Texture not being drawn on Screen

I am trying to follow a slightly outdated tutorial on making a Tile Engine.
The problem is that the Texture I am trying to draw on screen, doesn't show up. I just get a black screen.
I've taken the most relevant parts of Engine.cpp:
bool Engine::Init()
{
LoadTextures();
window = new sf::RenderWindow(sf::VideoMode(800,600,32), "RPG");
if(!window)
return false;
return true;
}
void Engine::LoadTextures()
{
sf::Texture sprite;
sprite.loadFromFile("C:\\Users\\Vipar\\Pictures\\sprite1.png");
textureManager.AddTexture(sprite);
testTile = new Tile(textureManager.GetTexture(0));
}
void Engine::RenderFrame()
{
window->clear();
testTile->Draw(0,0,window);
window->display();
}
void Engine::MainLoop()
{
//Loop until our window is closed
while(window->isOpen())
{
ProcessInput();
Update();
RenderFrame();
}
}
void Engine::Go()
{
if(!Init())
throw "Could not initialize Engine";
MainLoop();
}
And here is the TextureManager.cpp
#include "TextureManager.h"
#include <vector>
#include <SFML\Graphics.hpp>
TextureManager::TextureManager()
{
}
TextureManager::~TextureManager()
{
}
void TextureManager::AddTexture(sf::Texture& texture)
{
textureList.push_back(texture);
}
sf::Texture& TextureManager::GetTexture(int index)
{
return textureList[index];
}
In the tutorial itself the Image type was used but there was no Draw() method for Image so I made Texture instead. Why won't the Texture render on the screen?
The problem seems to be in:
void Engine::LoadTextures()
{
sf::Texture sprite;
sprite.loadFromFile("C:\\Users\\Vipar\\Pictures\\sprite1.png");
textureManager.AddTexture(sprite);
testTile = new Tile(textureManager.GetTexture(0));
}
You are creating a local sf::Texture and passing that to TextureManager::AddTexture. It's probably going out of scope at the end of the function, and is then no longer valid when you try to draw it. You fix this by using a smart pointer:
void Engine::LoadTextures()
{
textureManager.AddTexture(std::shared_ptr<sf::Texture>(
new sf::Texture("C:\\Users\\Vipar\\Pictures\\sprite1.png")));
testTile = new Tile(textureManager.GetTexture(0));
}
And changing TextureManager to use it:
void TextureManager::AddTexture(std::shared_ptr<sf::Texture> texture)
{
textureList.push_back(texture);
}
sf::Texture& TextureManager::GetTexture(int index)
{
return *textureList[index];
}
You'll also have to change textureList to be an std::vector<std::shared_ptr<sf::Texture> of course.

in c++ gtk::drawarea: how to refresh image

I am using gtkmm for a GUI in C++.
I have a Gtk::DrawingArea on which I have an image (filename):
class MyArea : public Gtk::DrawingArea
{
public:
MyArea(string filename)
{
m_image = Gdk::Pixbuf::create_from_file(filename.c_str());
}
virtual bool on_draw(const Cairo::RefPtr<Cairo::Context>& cr)
{
if (!m_image)
return false;
Gtk::Allocation allocation = get_allocation();
const int width = allocation.get_width();
const int height = allocation.get_height();
// Draw the image in the middle of the drawing area, or (if the image is
// larger than the drawing area) draw the middle part of the image.
Gdk::Cairo::set_source_pixbuf(cr, m_image, (width - m_image->get_width())/2, (height - m_image->get_height())/2);
cr->paint();
return true;
}
Glib::RefPtr<Gdk::Pixbuf> m_image;
}
I would like to have a function that change the image (with filename2). But I can't find how to :-/
Could someone help me with this please. Thank You
As a member of your class:
void change_image(string filename2)
{
m_image = Gdk::Pixbuf::create_from_file(filename2.c_str());
queue_draw();
}

DirectX background image

I tried to follow a tutorial from my 3D directx book with some modifications. The problem I encountered is that I want to draw an image but can't get this working. The example is working with a camera since it's based on a small game but I just want to load the image without any fancy camera transformations. (Draw the image as it is without resizing + alpha blending).
This is my code which should contain the relevant parts.
.h
class Screen
{
private:
IDirect3DTexture9* m_BGImage;
ID3DXSprite* m_Sprite;
IDirect3DDevice9* m_Device;
public:
Screen();
~Screen();
void setDevice(IDirect3DDevice9* device);
void setBGImage(std::string path);
void Draw();
void onLostDevice();
void onResetDevice();
void Clean();
};
.cpp
Screen::Screen() {}
Screen::~Screen()
{
Clean();
}
void Screen::setDevice(IDirect3DDevice9* device)
{
m_Device = device;
D3DXCreateSprite(m_Device, &m_Sprite);
}
void Screen::setBGImage(std::string path)
{
D3DXCreateTextureFromFileA(m_Device, path.c_str(), &m_BGImage);
}
void Screen::Draw()
{
m_Sprite->Begin(D3DXSPRITE_DONOTMODIFY_RENDERSTATE); // This is (so I believe) what causes the problem. If I use D3DXSPRITE_OBJECTSPACE|D3DXSPRITE_DONOTMODIFY_RENDERSTATE like described in the example from my book I only get a black screen.
m_Sprite->Draw(m_BGImage, 0, &D3DXVECTOR3(256.0f, 256.0f, 0.0f), 0, D3DCOLOR_XRGB(255, 255, 255));
m_Sprite->Flush();
m_Sprite->End();
}
void Screen::Clean()
{
ReleaseCOM(m_Sprite);
ReleaseCOM(m_BGImage);
}
void Screen::onLostDevice()
{
m_Sprite->OnLostDevice();
}
void Screen::onResetDevice()
{
m_Sprite->OnResetDevice();
m_Device->SetRenderState(D3DRS_ALPHAREF, 10);
m_Device->SetRenderState(D3DRS_ALPHAFUNC, D3DCMP_GREATER);
m_Device->SetTextureStageState(0, D3DTSS_ALPHAARG1, D3DTA_TEXTURE);
m_Device->SetTextureStageState(0, D3DTSS_ALPHAOP, D3DTOP_SELECTARG1);
m_Device->SetRenderState(D3DRS_SRCBLEND, D3DBLEND_SRCALPHA);
m_Device->SetRenderState(D3DRS_DESTBLEND, D3DBLEND_INVSRCALPHA);
m_Device->SetTextureStageState(0, D3DTSS_TEXTURETRANSFORMFLAGS, D3DTTFF_COUNT2);
}
Edit: Almost forgot:
#define ReleaseCOM(x) { if(x){ x->Release(); x = 0; } }
You should use D3DXSPRITE_ALPHABLEND instead of D3DXSPRITE_DONOTMODIFY_RENDERSTATE when calling function "Begin", specify D3DXSPRITE_ALPHABLEND does not mean you must use alpha blend, if you don't call Screen::onResetDevice(), the alpha blend won't work, so D3DXSPRITE_ALPHABLEND here is just a parameter to make "Begin" works. if you don't want to specify any flags when calling function "Begin", you can pass 0 to it.
m_Sprite->Begin(0);
m_Sprite->Draw(m_BGImage, 0, &D3DXVECTOR3(256.0f, 256.0f, 0.0f), 0, D3DCOLOR_XRGB(255, 255, 255));
m_Sprite->Flush();
m_Sprite->End();
reference: http://msdn.microsoft.com/en-us/library/windows/desktop/bb205466(v=vs.85).aspx