cocos2dx 3.3 animation implementation - c++

I am new in cocos2dx development.I almost learn all basic level of cocos2dx (version 3.3) in android using cpp language. I showing that there are lots of update over cocos2dx.org.
In android I am currently developing the tetris game using cocos2dx version 3.3, I want to know what is the best way to achieve awesome animation such like ticking bombs, bomb explosion, spinning game score display, popping up and disappearing balloons with game score increments. I was wondering about animation implementation cocos2dx in android using c++. We also need to make game that support the multiscreen support, I had search lots all these points could not able to find much information over the google.
I show sonar system support nicely for beginner level, we really appreciate it , I had watched all videos shared by sonar system over the YouTube. I learn lot over there. I want to know the advance level animation in android in cocos2dx.
We looking any help will appreciate.
Thank you

here is my appmacros.h filr ..
#ifndef __APPMACROS_H__
#define __APPMACROS_H__
#include "cocos2d.h"
#define DESIGN_RESOLUTION_480X320 0
#define DESIGN_RESOLUTION_480X800 1
#define DESIGN_RESOLUTION_1024X768 2
#define DESIGN_RESOLUTION_1280X800 3
#define DESIGN_RESOLUTION_2048X1536 4
/* If you want to switch design resolution, change next line */
#define TARGET_DESIGN_RESOLUTION_SIZE DESIGN_RESOLUTION_1280X800
typedef struct tagResource
{
cocos2d::CCSize size;
char directory[100];
}Resource;
static Resource smallResource = { CCSizeMake(480, 320), "iphone" };
static Resource mysmallResource = { CCSizeMake(800, 480), "iphone" };
static Resource mediumResource = { CCSizeMake(1024, 768), "ipad" };
static Resource myResource = { CCSizeMake(1280, 800), "ipad" };
static Resource largeResource = { CCSizeMake(2048, 1536), "ipadhd" };
#if (TARGET_DESIGN_RESOLUTION_SIZE == DESIGN_RESOLUTION_480X320)
static cocos2d::CCSize designResolutionSize = cocos2d::CCSizeMake(480, 320);
#elif (TARGET_DESIGN_RESOLUTION_SIZE == DESIGN_RESOLUTION_480X800)
static cocos2d::CCSize designResolutionSize = cocos2d::CCSizeMake(800, 480);
#elif (TARGET_DESIGN_RESOLUTION_SIZE == DESIGN_RESOLUTION_1024X768)
static cocos2d::CCSize designResolutionSize = cocos2d::CCSizeMake(1024, 768);
#elif (TARGET_DESIGN_RESOLUTION_SIZE == DESIGN_RESOLUTION_2048X1536)
static cocos2d::CCSize designResolutionSize = cocos2d::CCSizeMake(2048, 1536);
#elif (TARGET_DESIGN_RESOLUTION_SIZE == DESIGN_RESOLUTION_1280X800)
static CCSize designResolutionSize = cocos2d::CCSizeMake(1280, 800);
#else
#error unknown target design resolution!
#endif
// The font size 24 is designed for small resolution, so we should change it to fit for current design resolution
#define TITLE_FONT_SIZE (cocos2d::CCEGLView::sharedOpenGLView()->getDesignResolutionSize().width / myResource.size.width * 24)
#endif /* __APPMACROS_H__ */
now we can use this class in appdelegate.. so my appdelegate looks like...
#include "AppDelegate.h"
#include "MenuLayer.h"
#include "AppMacros.h"
#include "SimpleAudioEngine.h"
#include "cocos2d.h"
USING_NS_CC;
AppDelegate::AppDelegate() {
}
AppDelegate::~AppDelegate() {
}
void AppDelegate::initGLContextAttrs()
{
//set OpenGL context attributions,now can only set six attributions:
//red,green,blue,alpha,depth,stencil
GLContextAttrs glContextAttrs = {8, 8, 8, 8, 24, 8};
GLView::setGLContextAttrs(glContextAttrs);
}
bool AppDelegate::applicationDidFinishLaunching() {
// initialize director
auto director = Director::getInstance();
auto glview = director->getOpenGLView();
if(!glview) {
glview = GLViewImpl::create("My Game");
director->setOpenGLView(glview);
}
CCLOG("%s","applicationDidFinishLaunching ");
// initialize director
// CCDirector* pDirector = CCDirector::sharedDirector();
// CCEGLView* pEGLView = CCEGLView::sharedOpenGLView();
director->setOpenGLView(glview);
// Set the design resolution
glview->setDesignResolutionSize(designResolutionSize.width,
designResolutionSize.height, kResolutionFixedWidth);
//CCSize frameSize = pEGLView->getFrameSize();
CCSize frameSize = director->getVisibleSize();
// if the frame's height is larger than the height of medium resource size, select large resource.
if (frameSize.width > myResource.size.width) {
director->setContentScaleFactor(
largeResource.size.width / designResolutionSize.width);
}
// if the frame's height is larger than the height of small resource size, select medium resource.
else if (frameSize.width > mediumResource.size.width) {
director->setContentScaleFactor(
myResource.size.width / designResolutionSize.width);
} else if (frameSize.width > mysmallResource.size.width) {
director->setContentScaleFactor(
designResolutionSize.width / mediumResource.size.width);
} else if (frameSize.width > smallResource.size.width) {
director->setContentScaleFactor(
designResolutionSize.width / mediumResource.size.width);
}
// if the frame's height is smaller than the height of medium resource size, select small resource.
else {
director->setContentScaleFactor(
designResolutionSize.width / smallResource.size.width);
}
// turn on display FPS
director->setDisplayStats(false);
// set FPS. the default value is 1.0/60 if you don't call this
director->setAnimationInterval(1.0 / 60);
CCScene *pScene = MenuLayer::scene();
director->runWithScene(pScene);
return true;
}
// This function will be called when the app is inactive. When comes a phone call,it's be invoked too
void AppDelegate::applicationDidEnterBackground() {
CCDirector::sharedDirector()->stopAnimation();
// if you use SimpleAudioEngine, it must be pause
CocosDenshion::SimpleAudioEngine::sharedEngine()->pauseBackgroundMusic();
}
// this function will be called when the app is active again
void AppDelegate::applicationWillEnterForeground() {
CCDirector::sharedDirector()->startAnimation();
// if you use SimpleAudioEngine, it must resume here
CocosDenshion::SimpleAudioEngine::sharedEngine()->resumeBackgroundMusic();
}

i am also new to cocos2dx 3.3 . well i am using animation for blast (explosion) by using (.plist) particle effects
first you need a .plist file of animation . there is a online editor for making .plist file . you can check it out
here is link . http://www.effecthub.com/particle2dx
now you can use thiese line of code
CCParticleSystemQuad *system = CCParticleSystemQuad::create("explodeEffect.plist");
system->setTexture(CCTextureCache::sharedTextureCache()->addImage("yourplayer.png"));
system->setDuration(0.05);
system->setScale(3.0f);
system->setPosition(yourplayer->getPosition().x, yourplayer->getPosition().y);
this->addChild(system,1);
system->setAutoRemoveOnFinish(true);
this will help you

Related

How to Render To Texture in DirectX12 & C++? What is the process?

I have been trying to figure out how to render the entire scene to a texture in DX12. I know how to do this in OpenGL, but I'm having trouble figuring it out in DirectX12. Plus, there isn't many resources online on how its done.
(Currently we have a 3D Model rendering in the scene with a texture applied)
Would anyone be able to point me towards some resources that I can use to learn Render Targets and Rendering to a Texture in DX12? or any good websites?
Any help is much appreciated.
Kind regards,
Charlie
OpenGL is more like Direct3D 11, where Direct3D 12 and Vulkan are more alike in terms of design/usage and level of graphics knowledge needed to use them effectively. As such, you may find it easier to start with Direct3D 11 before jumping into Direct3D 12 rendering. The concepts and HLSL programming are all very similar between 11 & 12, so it can be a good place to start.
The biggest thing to know about DirectX 12 is that it makes the application (i.e. the programmer) responsible for many aspects that were handled by the Direct3D 11 Runtime: CPU/GPU synchronization, memory management, resource scheduling, etc. DirectX 12 is intended to give the experienced graphics programmer more control and therefore able to achieve higher-levels of CPU-side performance for the same complexity of rendering. This additional control and responsibility, however, can be overwhelming for someone new to graphics or DirectX. It's much easier in DX12 to write something that 'works on my machine', but won't run or even crashes on other people's machines.
With all that said, some good resources for starting with Direct3D 12:
There is a new 'landing page' for DirectX here with many useful links and resources for DirectX 12 development: https://devblogs.microsoft.com/directx/landing-page/
Official DirectX 12 samples written by the DirectX graphics team are at DirectX-Graphics-Samples.
Public samples written by the Xbox Advanced Technology Group are at Xbox-ATG-Samples. In particular, see the IntroGraphics samples which offer many basic samples in both DX11 & DX12 form.
The DirectX Tool Kit is an open-source C++ library that provides helpers for getting going with Direct3D development. There are both DirectX 11 and DirectX 12 versions. If you learn the DX 11 version first, it's pretty simple to move over to DX 12 from there as it handles a number of the 'house-keeping' tasks for you as you learn the new API.
As for the question of 'rendering-to-texture' in DirectX 12, there are some specific samples to look at:
SimpleMSAA does render-to-texture.
This HDR rendering tutorial for DirectX Tool Kit for DX12 does render-to-texture.
The second one uses this this helper class h / cpp.
class RenderTexture
{
public:
RenderTexture(DXGI_FORMAT format) noexcept;
void SetDevice(_In_ ID3D12Device* device, D3D12_CPU_DESCRIPTOR_HANDLE srvDescriptor, D3D12_CPU_DESCRIPTOR_HANDLE rtvDescriptor);
void SizeResources(size_t width, size_t height);
void ReleaseDevice() noexcept;
void TransitionTo(_In_ ID3D12GraphicsCommandList* commandList, D3D12_RESOURCE_STATES afterState);
void BeginScene(_In_ ID3D12GraphicsCommandList* commandList)
{
TransitionTo(commandList, D3D12_RESOURCE_STATE_RENDER_TARGET);
}
void EndScene(_In_ ID3D12GraphicsCommandList* commandList)
{
TransitionTo(commandList, D3D12_RESOURCE_STATE_PIXEL_SHADER_RESOURCE);
}
void SetClearColor(DirectX::FXMVECTOR color)
{
DirectX::XMStoreFloat4(reinterpret_cast<DirectX::XMFLOAT4*>(m_clearColor), color);
}
ID3D12Resource* GetResource() const noexcept { return m_resource.Get(); }
D3D12_RESOURCE_STATES GetCurrentState() const noexcept { return m_state; }
void SetWindow(const RECT& rect);
DXGI_FORMAT GetFormat() const noexcept { return m_format; }
private:
Microsoft::WRL::ComPtr<ID3D12Device> m_device;
Microsoft::WRL::ComPtr<ID3D12Resource> m_resource;
D3D12_RESOURCE_STATES m_state;
D3D12_CPU_DESCRIPTOR_HANDLE m_srvDescriptor;
D3D12_CPU_DESCRIPTOR_HANDLE m_rtvDescriptor;
float m_clearColor[4];
DXGI_FORMAT m_format;
size_t m_width;
size_t m_height;
};
RenderTexture::RenderTexture(DXGI_FORMAT format) noexcept :
m_state(D3D12_RESOURCE_STATE_COMMON),
m_srvDescriptor{},
m_rtvDescriptor{},
m_clearColor{},
m_format(format),
m_width(0),
m_height(0)
{
}
void RenderTexture::SetDevice(_In_ ID3D12Device* device, D3D12_CPU_DESCRIPTOR_HANDLE srvDescriptor, D3D12_CPU_DESCRIPTOR_HANDLE rtvDescriptor)
{
if (device == m_device.Get()
&& srvDescriptor.ptr == m_srvDescriptor.ptr
&& rtvDescriptor.ptr == m_rtvDescriptor.ptr)
return;
if (m_device)
{
ReleaseDevice();
}
{
D3D12_FEATURE_DATA_FORMAT_SUPPORT formatSupport = { m_format, D3D12_FORMAT_SUPPORT1_NONE, D3D12_FORMAT_SUPPORT2_NONE };
if (FAILED(device->CheckFeatureSupport(D3D12_FEATURE_FORMAT_SUPPORT, &formatSupport, sizeof(formatSupport))))
{
throw std::runtime_error("CheckFeatureSupport");
}
UINT required = D3D12_FORMAT_SUPPORT1_TEXTURE2D | D3D12_FORMAT_SUPPORT1_RENDER_TARGET;
if ((formatSupport.Support1 & required) != required)
{
#ifdef _DEBUG
char buff[128] = {};
sprintf_s(buff, "RenderTexture: Device does not support the requested format (%u)!\n", m_format);
OutputDebugStringA(buff);
#endif
throw std::runtime_error("RenderTexture");
}
}
if (!srvDescriptor.ptr || !rtvDescriptor.ptr)
{
throw std::runtime_error("Invalid descriptors");
}
m_device = device;
m_srvDescriptor = srvDescriptor;
m_rtvDescriptor = rtvDescriptor;
}
void RenderTexture::SizeResources(size_t width, size_t height)
{
if (width == m_width && height == m_height)
return;
if (m_width > UINT32_MAX || m_height > UINT32_MAX)
{
throw std::out_of_range("Invalid width/height");
}
if (!m_device)
return;
m_width = m_height = 0;
auto heapProperties = CD3DX12_HEAP_PROPERTIES(D3D12_HEAP_TYPE_DEFAULT);
D3D12_RESOURCE_DESC desc = CD3DX12_RESOURCE_DESC::Tex2D(m_format,
static_cast<UINT64>(width),
static_cast<UINT>(height),
1, 1, 1, 0, D3D12_RESOURCE_FLAG_ALLOW_RENDER_TARGET);
D3D12_CLEAR_VALUE clearValue = { m_format, {} };
memcpy(clearValue.Color, m_clearColor, sizeof(clearValue.Color));
m_state = D3D12_RESOURCE_STATE_RENDER_TARGET;
// Create a render target
ThrowIfFailed(
m_device->CreateCommittedResource(&heapProperties, D3D12_HEAP_FLAG_ALLOW_ALL_BUFFERS_AND_TEXTURES,
&desc,
m_state, &clearValue,
IID_GRAPHICS_PPV_ARGS(m_resource.ReleaseAndGetAddressOf()))
);
SetDebugObjectName(m_resource.Get(), L"RenderTexture RT");
// Create RTV.
m_device->CreateRenderTargetView(m_resource.Get(), nullptr, m_rtvDescriptor);
// Create SRV.
m_device->CreateShaderResourceView(m_resource.Get(), nullptr, m_srvDescriptor);
m_width = width;
m_height = height;
}
void RenderTexture::ReleaseDevice() noexcept
{
m_resource.Reset();
m_device.Reset();
m_state = D3D12_RESOURCE_STATE_COMMON;
m_width = m_height = 0;
m_srvDescriptor.ptr = m_rtvDescriptor.ptr = 0;
}
void RenderTexture::TransitionTo(_In_ ID3D12GraphicsCommandList* commandList, D3D12_RESOURCE_STATES afterState)
{
TransitionResource(commandList, m_resource.Get(), m_state, afterState);
m_state = afterState;
}
void RenderTexture::SetWindow(const RECT& output)
{
// Determine the render target size in pixels.
auto width = size_t(std::max<LONG>(output.right - output.left, 1));
auto height = size_t(std::max<LONG>(output.bottom - output.top, 1));
SizeResources(width, height);
}
You'd use it like this:
// Setup
m_scene = std::make_unique<DX::RenderTexture>( /* format that matches your resource and your Pipeline State Objects you will use to render */ );
m_scene->SetClearColor( /* color value you use to clear */ );
m_scene->SetDevice(m_device,
/* CPU descriptor handle for your scene as a SRV texture */,
/* CPU descriptor handle for your scene as a RTV texture */);
m_scene->SetWindow( /* provide viewport size for your render texture */ );
// Reset command list and allocator.
// Transition the backbuffer target into the correct state to allow for
// Clear the render texture
CD3DX12_CPU_DESCRIPTOR_HANDLE rtvDescriptor(
/* CPU descriptor handle for your scene as a RTV texture */
static_cast<INT>(m_backBufferIndex), m_rtvDescriptorSize);
CD3DX12_CPU_DESCRIPTOR_HANDLE dsvDescriptor(m_dsvDescriptorHeap->GetCPUDescriptorHandleForHeapStart());
m_commandList->OMSetRenderTargets(1, &rtvDescriptor, FALSE, &dsvDescriptor);
m_commandList->ClearRenderTargetView(rtvDescriptor, /* clear color */, 0, nullptr);
m_commandList->ClearDepthStencilView(dsvDescriptor, D3D12_CLEAR_FLAG_DEPTH, 1.0f, 0, 0, nullptr);
// Set the viewport and scissor rect.
D3D12_VIEWPORT viewport = { 0.0f, 0.0f, /* width/height of your render texture */, D3D12_MIN_DEPTH, D3D12_MAX_DEPTH };
D3D12_RECT scissorRect = { 0, 0, /* width/height of your render texture */ };
m_commandList->RSSetViewports(1, &viewport);
m_commandList->RSSetScissorRects(1, &scissorRect);
// Tell helper we are starting the render
m_scene->BeginScene(m_commandList);
/* Do rendering to m_commandList */
m_scene->EndScene(m_commandList);
Here we've scheduled the transition to render target resource state, populated all the draw calls, and then inserted a barrier back to the pixel shader resource state. At that point, you can use the descriptor handle to your render-texture's SRV to render. As with all things DirectX 12, nothing happens until you actually close the command-list and submit it for execution.

Make Windows MFC Game Loop Faster

I am creating a billiards game and am having major problems with tunneling at high speeds. I figured using linear interpolation for animations would help quite a bit, but the problem persists. To see this, I drew a circle at the previous few positions an object has been. At the highest velocity the ball can travel, the path looks like this:
Surely, these increments of advancement are much too large even after using linear interpolation.
At each frame, every object's location is updated based on the amount of time since the window was last drawn. I noticed that the average time for the window to be redrawn is somewhere between 70 and 80ms. I would really like this game to work at 60 fps, so this is about 4 or 5 times longer than what I am looking for.
Is there a way to change how often the window is redrawn? Here is how I am currently redrawing the screen
#include "pch.h"
#include "framework.h"
#include "ChildView.h"
#include "DoubleBufferDC.h"
const int FrameDuration = 16;
void CChildView::OnPaint()
{
CPaintDC paintDC(this); // device context for painting
CDoubleBufferDC dc(&paintDC); // device context for painting
Graphics graphics(dc.m_hDC); // Create GDI+ graphics context
mGame.OnDraw(&graphics);
if (mFirstDraw)
{
mFirstDraw = false;
SetTimer(1, FrameDuration, nullptr);
LARGE_INTEGER time, freq;
QueryPerformanceCounter(&time);
QueryPerformanceFrequency(&freq);
mLastTime = time.QuadPart;
mTimeFreq = double(freq.QuadPart);
}
LARGE_INTEGER time;
QueryPerformanceCounter(&time);
long long diff = time.QuadPart - mLastTime;
double elapsed = double(diff) / mTimeFreq;
mLastTime = time.QuadPart;
mGame.Update(elapsed);
}
void CChildView::OnTimer(UINT_PTR nIDEvent)
{
RedrawWindow(NULL, NULL, RDW_UPDATENOW);
Invalidate();
CWnd::OnTimer(nIDEvent);
}
EDIT: Upon request, here is how the actual drawing is done:
void CGame::OnDraw(Gdiplus::Graphics* graphics)
{
// Draw the background
graphics->DrawImage(mBackground.get(), 0, 0,
mBackground->GetWidth(), mBackground->GetHeight());
mTable->Draw(graphics);
Pen pen(Color(500, 128, 0), 1);
Pen penW(Color(1000, 1000, 1000), 1);
this->mCue->Draw(graphics);
for (shared_ptr<CBall> ball : this->mBalls)
{
ball->Draw(graphics);
}
for (shared_ptr<CBall> ball : this->mSunkenSolidBalls)
{
ball->Draw(graphics);
}
for (shared_ptr<CBall> ball : this->mSunkenStripedBalls)
{
ball->Draw(graphics);
}
this->mPowerBar->Draw(graphics);
}
Game::OnDraw will call Draw on all of the game items, which draw on the graphics object they receive as an argument.

Cocos 2dx game increasing in memory every time a scene transition occurs

I am making a cocos 2dx game. But each time the memory increases with every level transition. For debugging purposes I am calling the same scene again and again on touch event. Each level is generated by changing the parameters for the folowing code. Initially I thought that the memory was increasing because there were more objects in the higher levels, but even when calling the same level, memory occupied is increasing.
#include "GameScene.h"
#include "MainMenuScene.h"
#include "GameOverScene.h"
#include "Levels.h"
#define COCOS2D_DEBUG 1
USING_NS_CC;
float theta=0;
int r=0;
int levelNo=0;
int controlable=0; // flag to check if user can controll the ball or not
int rMax=0; // max radius of circle
float objectTime; // stores the inverse of speed
int secondCount=0; // second han value in the timer
int minuteCount=0; //minute hand clock in the timer
float obstacleSpeed=0;
Label *timer;
GameScene::~GameScene()
{
rotationPoint->removeAllChildrenWithCleanup(true);
obstacleRotationPoint->removeAllChildrenWithCleanup(true);
this->removeAllChildrenWithCleanup(true);
}
Scene* GameScene::createScene(int level)
{
// 'scene' is an autorelease object
auto scene = Scene::create();
controlable=0;
r=0;
theta=0;
// 'layer' is an autorelease object
levelNo=level;
rMax=levels[levelNo].ringCount * 15; //setting various parameters
obstacleSpeed =levels[levelNo].obstacleSpeed;
objectTime=1.0/levels[levelNo].speed;
secondCount=0; minuteCount=0;
auto layer = GameScene::create();
// add layer as a child to scene
scene->addChild(layer);
// return the scene
return scene;
}
// on "init" you need to initialize your instance
bool GameScene::init()
{
//////////////////////////////
// 1. super init first
if ( !Layer::init() )
{
return false;
}
controlable=0;
distance=rMax;
visibleSize = Director::getInstance()->getVisibleSize();
Vec2 origin = Director::getInstance()->getVisibleOrigin();
#if COMPILE_FOR_MOBILE == 1
auto listener = EventListenerTouchOneByOne::create();
listener->setSwallowTouches(true);
listener->onTouchBegan = CC_CALLBACK_2(GameScene::onTouchBegan, this);
_eventDispatcher->addEventListenerWithSceneGraphPriority(listener, this);
#endif
goal = DrawNode::create();
goal->drawDot(Vec2(visibleSize.width/2 + origin.x, visibleSize.height/2 + origin.y), 5, Color4F(100,0,0,1));
this->addChild(goal,1); // drawing the goal
rotationPoint = Node::create();
rotationPoint->setPosition(visibleSize.width/2 + origin.x, visibleSize.height/2 + origin.y);
this->addChild(rotationPoint, 2);
//Setting the exit button
auto exitLabel = Label::createWithTTF("Exit","fonts/Marker Felt.ttf",10);
exitButtonWidth=exitLabel->getContentSize().width;
exitButtonHeight=exitLabel->getContentSize().height;
exitLabel->setPosition(Point(visibleSize.width-exitButtonWidth,visibleSize.height-exitButtonHeight));
this->addChild(exitLabel);
//setting the clock
timer = Label::createWithTTF("00:00","fonts/Marker Felt.ttf",10);
timer->setPosition(Point(timer->getContentSize().width,visibleSize.height-timer->getContentSize().height));
this->schedule(schedule_selector(GameScene::updateClock),1.0f); //scedule to call upDateClock function every 1.0 sec
this->addChild(timer);
obstacleRotationPoint = Node::create();
obstacleRotationPoint->setPosition(visibleSize.width/2 + origin.x, visibleSize.height/2 + origin.y);
this->addChild(obstacleRotationPoint, 3);
float theta=0;
snake[0] = DrawNode::create();
snake[0]->drawDot(Vec2(0,0),3,Color4F(100,110,0,1));
theta+=2*M_PI/150;
//this->addChild(snake[0],2);
rotationPoint->addChild(snake[0]);
// fixedPoint->addChild(snake[0]);
//loop to draw the concentric circles
for(r=15;r<=rMax;r+=15)
{
for(theta=0;theta<=2*M_PI;theta+=2*M_PI/r){
pathNode = DrawNode::create();
pathNode->drawDot(Vec2(r*cos(theta)+origin.x+visibleSize.width/2,r*sin(theta)+origin.y+visibleSize.height/2),1,Color4F(0,0,10,1));
//pathNode->autorelease();
this->addChild(pathNode,1);
//this->removeChild(pathNode);
}
}
controlable=0;
this->scheduleUpdate();
return true;
}
bool GameScene::onTouchBegan(cocos2d::Touch *touch, cocos2d::Event *event)
{ // check if exit button region was clicked
_eventDispatcher->removeAllEventListeners();
auto scene = GameScene::createScene(levelNo);
Director::getInstance()->replaceScene(scene);
return true;
}
//function updates every frame
void GameScene::update(float dt){
}
The last part in the init function where I need to add pathNode is increasing my memory requirement everytime I transition a scene. I believe I am releasing everything in my destructor.
First of all, I don't recommend using global variables, which you have on top of your file (especially timer label). You should keep everything in class.
Second, you should check whether destructor is called in the first place.
Third, you can also try using some "Loading" screen between two levels and clean all unused textures like this:
setOnExitCallback([&](){
Director::getInstance()->getTextureCache()->removeUnusedTextures();
});
Fourth, I'd recommend to not recreate GameScene at all, but create function like restartLevel() and loadLevel() and just remove unnecessary stuff there and load new one.

Sequential off-screen rendering / screen capture without windowing system using OpenSceneGraph

I am working currently on an off-screen renderer so that I can do Mutual Information Registration for real-world scenes. I use OpenSceneGraph to cope with the large data and automatic loading. I am having trouble getting a framebuffer capture within a sequential, single-threaded program.
Well, I have this class (header):
#include <osg/ref_ptr>
#include <osg/Array>
#include <osg/ImageUtils>
#include <osgGA/StateSetManipulator>
#include <osgViewer/Viewer>
#include <osg/GraphicsContext>
#include <osg/Texture2D>
#include <osg/FrameBufferObject>
#include <osgDB/WriteFile>
#include <osg/Referenced>
#include <osg/Vec3>
#include <osg/Image>
#include <osg/State>
#include <string>
#include <chrono>
#include <thread>
#include <assert.h>
#include "ImagingPrimitives.h"
class BoundRenderScene {
public:
BoundRenderScene();
virtual ~BoundRenderScene();
void NextFrame(void);
inline OpenThreads::Mutex* GetMutexObject(void) { return &_mutex; }
inline osg::Image* GetFrame(void)
{
OpenThreads::ScopedLock<OpenThreads::Mutex> lock(_mutex);
return _frame.get();
}
inline void GetFrame(osg::Image* img)
{
OpenThreads::ScopedLock<OpenThreads::Mutex> lock(_mutex);
if(_frame.valid() && (img!=NULL) && img->valid())
{
glReadBuffer(GL_BACK);
img->readPixels(0,0,_camera_configuration->GetSX(),_camera_configuration->GetSY(), GL_RGB,GL_UNSIGNED_BYTE);
uint w = img->s(), h = img->t(), d = img->r(), c = uint(img->getPixelSizeInBits()/8);
/*
* bare testing write op
* osgDB::writeImageFile(const_cast<const osg::Image&>(*img), "/tmp/testimg.png");
*/
}
}
inline void SetCameraConfiguration(CameraConfiguration* configuration) { _camera_configuration = configuration; }
inline void SetCameraMatrix(osg::Matrixd camera_matrix) { _camera_matrix = camera_matrix; }
inline void SetScene(osg::Node* scene) { _scene = scene; }
inline void Initialize(void) {
if(!_initialized)
_init();
else
_re_init();
}
protected:
osgViewer::Viewer _viewer;
osg::Matrixd _camera_matrix;
osg::ref_ptr<osg::Texture2D> _tex;
osg::ref_ptr<osg::FrameBufferObject> _fbo;
mutable osg::ref_ptr<osg::Image> _frame;
osg::ref_ptr<osg::Node> _scene;
osg::ref_ptr<osg::GraphicsContext::Traits> _traits;
osg::ref_ptr<osg::GraphicsContext> _gc;
CameraConfiguration* _camera_configuration;
SnapshotCallback* cb;
std::string _filepath;
private:
void _init(void);
void _re_init(void);
bool _initialized;
mutable OpenThreads::Mutex _mutex;
osg::Matrixd pre_transform;
osg::Matrixd transformation;
};
Also, because many examples within offscreen-rendering and for screen capture work with Post/FinalDrawCallaback's, I copied the callback structure from the "osgdistortion" example, but added the mutex for synchronisation:
struct SnapshotCallback : public osg::Camera::DrawCallback
{
public:
inline SnapshotCallback(OpenThreads::Mutex* mtx_obj, std::string filepath, int width, int height) : _filepath(filepath), _output_to_file(false), _mutex(mtx_obj)
{
_image = new osg::Image();
_image->allocateImage(width, height, 1, GL_RGB, GL_UNSIGNED_BYTE);
if(filepath!="")
_output_to_file = true;
}
inline virtual void operator() (osg::RenderInfo& renderInfo) const
{
OpenThreads::ScopedLock<OpenThreads::Mutex> lock(*_mutex);
osg::Camera* camera = renderInfo.getCurrentCamera();
osg::Viewport* viewport = camera ? camera->getViewport() : 0;
if(viewport && _image.valid())
{
glReadBuffer(GL_BACK);
_image->readPixels(int(viewport->x()),int(viewport->y()),int(viewport->width()),int(viewport->height()), GL_RGB, GL_UNSIGNED_BYTE);
if(_output_to_file)
{
osgDB::writeImageFile(*_image, _filepath);
}
}
}
inline virtual void operator() (const osg::Camera& camera) const
{
OpenThreads::ScopedLock<OpenThreads::Mutex> lock(*_mutex);
osg::Viewport* viewport = camera.getViewport();
if(viewport && _image.valid())
{
glReadBuffer(GL_BACK);
_image->readPixels(int(viewport->x()),int(viewport->y()),int(viewport->width()),int(viewport->height()), GL_RGB, GL_UNSIGNED_BYTE);
if(_output_to_file)
{
osgDB::writeImageFile(*_image, _filepath);
}
}
}
std::string _filepath;
bool _output_to_file;
mutable OpenThreads::Mutex* _mutex;
mutable osg::ref_ptr<osg::Image> _image;
};
I initialize and render the scene as follows:
#include "BoundRenderScene.h"
void BoundRenderScene::_init(void)
{
if(_camera!=NULL)
_viewer.setDone(true);
_traits->x = 0;
_traits->y = 0;
_traits->width = _camera_configuration->GetSX();
_traits->height = _camera_configuration->GetSY();
_traits->red = 8;
_traits->green = 8;
_traits->blue = 8;
_traits->alpha = 0;
_traits->depth = 24;
_traits->windowDecoration = false;
_traits->pbuffer = true;
_traits->doubleBuffer = true;
_traits->sharedContext = 0x0;
if(_gc.get()!=NULL)
{
bool release_success = _gc->releaseContext();
if(!release_success)
std::cerr << "Error releasing Graphics Context.";
}
_gc = osg::GraphicsContext::createGraphicsContext(_traits.get());
_viewer.getCamera()->setGraphicsContext(_gc.get());
_viewer.setThreadingModel(osgViewer::Viewer::SingleThreaded);
_viewer.setUpThreading();
_viewer.realize();
_frame->allocateImage(_camera_configuration->GetSX(), _camera_configuration->GetSY(), 1, GL_RGB, GL_UNSIGNED_BYTE);
_viewer.getCamera()->getOrCreateStateSet();
_viewer.getCamera()->setRenderTargetImplementation(osg::Camera::PIXEL_BUFFER);
cb = new SnapshotCallback(&_mutex,_filepath, _camera_configuration->GetSX(), _camera_configuration->GetSY());
//_viewer.getCamera()->setPostDrawCallback( cb );
//Clear colour "black" for representing "no information" => background elimination in natural image, pls.
_viewer.getCamera()->setClearColor(osg::Vec4f(0.25f, 0.25f, 0.25f, 1.0f));
_viewer.getCamera()->setClearMask(GL_COLOR_BUFFER_BIT|GL_DEPTH_BUFFER_BIT);
_viewer.getCamera()->setDrawBuffer(GL_BACK);
_viewer.getCamera()->setReadBuffer(GL_BACK);
_viewer.getCamera()->setViewport(0,0,_camera_configuration->GetSX(),_camera_configuration->GetSY());
_viewer.getCamera()->setProjectionMatrix(osg::Matrixd::perspective(osg::RadiansToDegrees(_camera_configuration->GetFoV()), _camera_configuration->GetAspectRatio(), 0.1, 150.0));
//looking in geo-coord system
_viewer.getCamera()->setViewMatrix(osg::Matrixd::lookAt(osg::Vec3d(0.0, 0.0, -1.0), osg::Vec3d(0.0, 0.0, 1.0), osg::Vec3d(0.0, 1.0, 0.0)));
_viewer.getCamera()->attach(osg::Camera::COLOR_BUFFER, _frame.get());
_viewer.getCamera()->setRenderTargetImplementation(osg::Camera::FRAME_BUFFER_OBJECT);
_tex->setTextureSize(_camera_configuration->GetSX(), _camera_configuration->GetSY());
_tex->setInternalFormat(GL_RGB);
_tex->setFilter(osg::Texture::MIN_FILTER, osg::Texture::LINEAR);
_tex->setFilter(osg::Texture::MAG_FILTER, osg::Texture::LINEAR);
_tex->setWrap(osg::Texture::WRAP_S, osg::Texture::CLAMP_TO_EDGE);
_tex->setWrap(osg::Texture::WRAP_T, osg::Texture::CLAMP_TO_EDGE);
_tex->setResizeNonPowerOfTwoHint(false);
_tex->setImage(0,_frame.get());
_fbo->setAttachment(osg::Camera::COLOR_BUFFER, osg::FrameBufferAttachment(_tex.get()));
_viewer.setDone(false);
_viewer.setSceneData(_scene.get());
_viewer.setCameraManipulator(0x0);
}
void BoundRenderScene::NextFrame(void)
{
OpenThreads::ScopedLock<OpenThreads::Mutex> lock(_mutex);
if(_frame.valid() && !_viewer.done())
{
osg::Matrixd inverse_cam = osg::Matrixd::inverse(_camera_matrix);
transformation = inverse_cam * pre_transform;
_viewer.getCamera()->setViewMatrix(transformation);
_viewer.updateTraversal();
_viewer.frame();
}
else
std::cout << "Viewer or Camera invalid." << std::endl;
}
The main workflow looks like this (simplified):
BoundRenderScene renderer;
std::vector<osg::Matrixd> poses;
/*
* setting initial parameters
* fill poses with camera positions to render, for regsitration
*/
renderer._init();
for(uint i = 0; i < poses.size(); i++)
{
renderer.SetCameraMatrix(poses.at(i));
renderer.NextImage();
sleep(0.04); // to get the 25fps frame limit
osg::Image* reg_image = renderer.GetImage();
/*
* Do further processing
*/
}
Now comes the crux: the OpenSceneGraph example "osgprenderer" (included in OSG) does off-screen rendering using an osg::Camera::DrawCallback, as my SnapshotCallback. Unfortunately, the operator()-function in my case never get's called in my scenegraph, so that way of screen capture doesn't work for me. It's also rather inconvenient as the rest of the Mutual Information procedure is a rather sequential pipeline.
Other wrappers (https://github.com/xarray/osgRecipes/blob/master/integrations/osgberkelium/osgberkelium.cpp) use methods similar to my "void GetFrame(osg::Image* img)" method, where the image is actively read using "readPixels". That is very convenient for my workflow, but the method always returns a blank image. It doesn't crash, but it doesn't do it's job either.
The method that does work is "osg: and :Image* GetFrame(void)", which returns the bound/attached FBO image. It is similar to the "osgdistortion" example. It does work for rendering one- to two images, but after some time, rendering and processing get out of sync and the application crashes as follows:
[---FIRST FRAME---]
GraphicsCostEstimator::calibrate(..)
cull_draw() 0x1998ca0
ShaderComposer::~ShaderComposer() 0x35a4d40
Renderer::compile()
OpenGL extension 'GL_ARB_vertex_buffer_object' is supported.
OpenGL extension 'GL_EXT_secondary_color' is supported.
OpenGL extension 'GL_EXT_fog_coord' is supported.
OpenGL extension '' is not supported.
OpenGL extension 'GL_EXT_packed_depth_stencil' is supported.
Setting up osg::Camera::FRAME_BUFFER_OBJECT
end cull_draw() 0x1998ca0
[processing]
[ SECOND FRAME ]
cull_draw() 0x1998ca0
OpenGL extension 'GL_ARB_fragment_program' is supported.
OpenGL extension 'GL_ARB_vertex_program' is supported.
OpenGL extension 'GL_ARB_shader_objects' is supported.
OpenGL extension 'GL_ARB_vertex_shader' is supported.
OpenGL extension 'GL_ARB_fragment_shader' is supported.
OpenGL extension 'GL_ARB_shading_language_100' is supported.
OpenGL extension 'GL_EXT_geometry_shader4' is supported.
OpenGL extension 'GL_EXT_gpu_shader4' is supported.
OpenGL extension 'GL_ARB_tessellation_shader' is supported.
OpenGL extension 'GL_ARB_uniform_buffer_object' is supported.
OpenGL extension 'GL_ARB_get_program_binary' is supported.
OpenGL extension 'GL_ARB_gpu_shader_fp64' is supported.
OpenGL extension 'GL_ARB_shader_atomic_counters' is supported.
glVersion=4.5, isGlslSupported=YES, glslLanguageVersion=4.5
Warning: detected OpenGL error 'invalid operation' at end of SceneView::draw()
end cull_draw() 0x1998ca0
[-FROM 3rd FRAME ONWARDS-]
[workload, matrix setup]
[_viewer.frame()]
cull_draw() 0x1998ca0
Warning: detected OpenGL error 'invalid operation' at start of State::apply()
end cull_draw() 0x1998ca0
[next frame]
[BREAKING]
cull_draw() 0x1998ca0
Warning: detected OpenGL error 'invalid operation' at start of State::apply()
end cull_draw() 0x1998ca0
[more work]
Segmentation fault (core dumped)
So, the question is:
I had a look into the source files from osg for the Viewer-related classes, but I was not able to determine where the error
Warning: detected OpenGL error 'invalid operation' at start of State::apply()
comes from. Any idea where to start looking for it ?
For sequential rendering and screen capture, which method is the best to use within OSG ?
How can I obtain the mutex of the normal osg::Viewer, so to sync the renderer with the rest of py pipeline ? (Renderer is single-threaded)
Any other suggestions from experiences OpenSceneGraph off-screen
renderers and screen captures ?
As deeper research turned out, releasing the graphics context in the class destructor freed the OpenGL pipeline, BUT: it also disallocated stateset-bound textures of the loaded scene/model, although the model itself was not suspended (as given in the question: it is re-used in the following passes). So, in further render passes, the render pipeline wanted to access OSG assets which have been released via releasing the GL context.
in code it changed from:
BoundRenderScene::~BoundRenderScene() {
// TODO Auto-generated destructor stub
_viewer.setDone(true);
_viewer.setReleaseContextAtEndOfFrameHint(true);
_gc->releaseContext();
#ifdef DEBUG
std::cout << "BoundRenderScene deleted." << std::endl;
#endif
}
to:
BoundRenderScene::~BoundRenderScene() {
// TODO Auto-generated destructor stub
_viewer.setDone(true);
_viewer.setReleaseContextAtEndOfFrameHint(true);
#ifdef DEBUG
std::cout << "BoundRenderScene deleted." << std::endl;
#endif
}
This resolved the OpenSceneGraph-internal error messages. Now, in order to solve the frame capture problem itself, I implemented the callback from osgprenderer:
struct SnapshotCallback : public osg::Camera::DrawCallback
{
public:
inline SnapshotCallback(std::string filepath) : _filepath(filepath), _output_to_file(false), _image(NULL)
{
if(filepath!="")
_output_to_file = true;
_image = new osg::Image();
}
inline virtual void operator() (osg::RenderInfo& renderInfo) const
{
osg::Camera* camera = renderInfo.getCurrentCamera();
osg::Viewport* viewport = camera ? camera->getViewport() : 0;
if(viewport)
{
glReadBuffer(camera->getDrawBuffer());
_image->allocateImage(int(viewport->width()), int(viewport->height()), 1, GL_RGB, GL_UNSIGNED_BYTE);
_image->readPixels(int(viewport->x()),int(viewport->y()),int(viewport->width()),int(viewport->height()), GL_RGB, GL_UNSIGNED_BYTE);
if(_output_to_file)
{
osgDB::writeImageFile(*reinterpret_cast<osg::Image*>(_image->clone(osg::CopyOp::DEEP_COPY_ALL)), _filepath);
}
}
}
inline virtual void operator() (const osg::Camera& camera) const
{
osg::Viewport* viewport = camera.getViewport();
if(viewport)
{
glReadBuffer(camera.getDrawBuffer());
_image->allocateImage(int(viewport->width()), int(viewport->height()), 1, GL_RGB, GL_UNSIGNED_BYTE);
_image->readPixels(int(viewport->x()),int(viewport->y()),int(viewport->width()),int(viewport->height()), GL_RGB, GL_UNSIGNED_BYTE);
if(_output_to_file)
{
osgDB::writeImageFile(*reinterpret_cast<osg::Image*>(_image->clone(osg::CopyOp::DEEP_COPY_ALL)), _filepath);
}
}
}
inline osg::Image* GetImage(void)
{
return reinterpret_cast<osg::Image*>(_image->clone(osg::CopyOp::DEEP_COPY_ALL));
}
protected:
std::string _filepath;
bool _output_to_file;
mutable osg::ref_ptr<osg::Image> _image;
};
Now, with the cloned buffer instead of the actual image buffer (idea taken over from osgscreencapture example), I do get the real image without memory errors.
For double-buffered rendering, I though have to somehow render the scene twice for the right buffer to contain the objects' images, but this is for my use case currently less of an issue (I/O-bound rendering, not operation-bound).
so, the main function looks like follows:
BoundRenderScene renderer;
std::vector<osg::Matrixd> poses;
/*
* setting initial parameters
* fill poses with camera positions to render, for registration
*/
renderer._init();
for(uint i = 0; i < poses.size(); i++)
{
renderer.SetCameraMatrix(poses.at(i));
renderer.NextImage();
renderer.NextImage();
osg::Image* reg_image = renderer.GetImage();
/*
* Do further processing
*/
}

SFML 2 drawing does not work when connected with OGRE

I am currently working on connecting OGRE and SFML.
SFML should be used for 2D drawing, network stuff and input.
OGRE is for the 3d Graphics.
Currently the whole thing is on Linux.
What works: Connecting OGRE and SFML. First I create a SFML Render Window, then I grab the handle of this window and give it to the OGRE Render WIndow while creating it. I can use the SFML Events now. Did not test the Network stuff, but I am sure this will work too.
What does not work: Drawing in the SFML window.
Case 1: SFML and OGRE are not connected. OGRE does not have the SFML window handle and has its own window. SFML still can't draw in its own window! The main loop executes a maximum of 3 times and then just stops. Nothing more happens. A few seconds later (about 20 or so) I get a Memory Access violation and the program ends.
Case 2: SFML and OGRE are connected. A similar thing happens: The main loop executes exectly 53 times, nothing gets drawn and then the program stops with the terminal message "aborted" (actually its "Abgebrochen", because it's in German)
The strange behaviour also happens, when I let SFML draw into a sf::RenderTexture instead of the sfml_window.
Here is my code:
#include <SFML/Graphics.hpp>
#include <SFML/Window.hpp>
#include <SFML/System.hpp>
#include <iostream>
#include <OGRE/Ogre.h>
#include <vector>
#include <stdio.h>
int main(int argc, char * argv[])
{
if(argc == 1)
return -1;
// start with "1" and you get 1 window, start with "0" and you get two
bool together = atoi(argv[1]);
// create the SFML window
sf::RenderWindow sfml_window(sf::VideoMode(800, 600), "test");
sf::WindowHandle sfml_system_handle = sfml_window.getSystemHandle();
sfml_window.setVerticalSyncEnabled(true);
std::cout<<sfml_system_handle<<std::endl;
// init ogre
Ogre::Root * ogre_root = new Ogre::Root("", "", "");
std::vector<Ogre::String> plugins;
plugins.push_back("/usr/lib/x86_64-linux-gnu/OGRE-1.8.0/RenderSystem_GL");
for(auto p : plugins)
{
ogre_root->loadPlugin(p);
}
const Ogre::RenderSystemList& render_systems = ogre_root->getAvailableRenderers();
if(render_systems.size() == 0)
{
std::cerr<<"no rendersystem found"<<std::endl;
return -1;
}
Ogre::RenderSystem * render_system = render_systems[0];
ogre_root->setRenderSystem(render_system);
ogre_root->initialise( false, "", "");
// create the ogre window
Ogre::RenderWindow * ogre_window= NULL;
{
Ogre::NameValuePairList parameters;
parameters["FSAA"] = "0";
parameters["vsync"] = "true";
// if started with 1, connect the windows
if(together) parameters["externalWindowHandle"] = std::to_string(sfml_system_handle);
ogre_window = ogre_root->createRenderWindow("ogre window", 800, 600, false, &parameters);
}
// ogre stuff
Ogre::SceneManager * scene = ogre_root->createSceneManager(Ogre::ST_GENERIC, "Scene");
Ogre::SceneNode * root_node = scene->getRootSceneNode();
Ogre::Camera * cam = scene->createCamera("Cam");
Ogre::SceneNode * cam_node = root_node->createChildSceneNode("cam_node");
cam_node->attachObject(cam);
Ogre::Viewport * vp = ogre_window->addViewport(cam);
vp->setAutoUpdated(false);
vp->setBackgroundColour(Ogre::ColourValue(0, 1, 1));
ogre_window->setAutoUpdated(false);
ogre_root->clearEventTimes();
//sfml image loading
sf::Texture ring;
std::cout<<"ring loading: "<<ring.loadFromFile("ring.png")<<std::endl;
sf::Sprite sprite;
sprite.setTexture(ring);
// main loop
int counter = 0;
while(!ogre_window->isClosed() && sfml_window.isOpen())
{
std::cout<<counter<<std::endl;
counter++;
std::cout<<"a"<<std::endl;
// sfml events
sf::Event event;
while(sfml_window.pollEvent(event))
{
if(event.type == sf::Event::Closed)
{
sfml_window.close();
}
}
std::cout<<"b"<<std::endl;
std::cout<<"c"<<std::endl;
ogre_root->renderOneFrame();
std::cout<<"d"<<std::endl;
// here the strange behaviour happens
// if this line (draw) isn't present, everything works
sfml_window.pushGLStates();
sfml_window.draw(sprite);
sfml_window.popGLStates();
vp->update();
std::cout<<"e"<<std::endl;
sfml_window.display();
// only needs to be done for separated windows
// sfml display updates otherwise, both use double buffering
if(!together) ogre_window->update(true);
}
return 0;
}
Help would be really appreciated.
EDIT: I added the pushGLStates(); and popGLStates(); commands, forgot those earlier!
Not an answer really, but too long for a comment:
ogre_window = ogre_root->createRenderWindow("ogre window", 800, 600, false, &parameters);
Are you sure that it's okay to pass the address of an object you destroy the very next line here with &parameters?