OpenVG draw circle - c++

My hardware have two Layers, I draw the background at the layer0
I draw a circle like this:
(a color circle, other area is transparent.) at the layer1
My code is :
vgPaintPattern(maskPaint, maskImage);
vgSetPaint( maskPaint, VG_FILL_PATH );
vgSeti(VG_MATRIX_MODE, VG_MATRIX_FILL_PAINT_TO_USER);
vgLoadIdentity();
vgTranslate (-HALO_W / 2, -HALO_W / 2);
vgSeti(VG_MATRIX_MODE, VG_MATRIX_PATH_USER_TO_SURFACE);
vgLoadIdentity();
vgTranslate (pointer_x, floater_y);
vgSeti ( VG_FILL_RULE, VG_NON_ZERO );
path = vgCreatePath(VG_PATH_FORMAT_STANDARD, VG_PATH_DATATYPE_F, 1.0f, 0.0f, 0, 0,
VG_PATH_CAPABILITY_ALL);
vguArc( path, 0.0f, 0.0f, HALO_W, HALO_W, startAngle, angleExtent, VGU_ARC_PIE );
vgRemovePathCapabilities(path, VG_PATH_CAPABILITY_APPEND_FROM | VG_PATH_CAPABILITY_APPEND_TO |
VG_PATH_CAPABILITY_MODIFY |
VG_PATH_CAPABILITY_TRANSFORM_FROM |
VG_PATH_CAPABILITY_TRANSFORM_TO |
VG_PATH_CAPABILITY_INTERPOLATE_FROM |
VG_PATH_CAPABILITY_INTERPOLATE_TO);
vgDrawPath( path, VG_FILL_PATH );
vgDestroyPath( path );
vgSetPaint (VG_INVALID_HANDLE, VG_FILL_PATH);
vgPaintPattern(maskPaint, VG_INVALID_HANDLE);
but the result is this:
the center part is black not transparent. How did I do?

Related

glRotatef rotates the camera instead of the object

I need to create a virtual trackball with c++. I have made all the calculations and I can find the rotation angle and axis values. My object rotates as I intented with every mouse drag but the problem is after every rotation it goes back to its initial position.
So I figured out that I need to get the current modelview matrix, multiply it by the rotation matrix then load the result back to the opengl.
I have tried it but unfortunately, glRotatef rotates my camera instead of the object. Here is my function to draw the scene
//--- Drawing code ---------------------------------------
/** Drawing code for one frame. */
void drawGLScene ()
{
// Real time in seconds.
GLfloat t = frameStat->frameStart( width, height );
// Clear the frame buffer and the depth buffer
glClear( GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT );
// Set current model-view transform:
glLoadIdentity();
// Looking from "camera" to the "center" point (y-axis defines the "up" vector)
gluLookAt( 0.0f, 0.0f, 10.0f,
center[ 0 ], center[ 1 ], center[ 2 ],
0.0f, 1.0f, 0.0f );
if (dragging)
glLoadMatrixf(matModelView.array());
glScalef( zoom, zoom, zoom );
#ifdef USE_VBO
// scene rendering using VBO buffers:
scene.render();
#else
// client-side vertex arrays (cube):
glVertexPointer( 3, GL_FLOAT, 6 * sizeof(GLfloat), vert ); // specify vertex data array
glColorPointer( 3, GL_FLOAT, 6 * sizeof(GLfloat), vert + 3 ); // specify vertex color array
glDrawElements( GL_QUADS, sizeof(ind) / sizeof(GLubyte), GL_UNSIGNED_BYTE, ind );
#endif
frameStat->swapBuffers( drawStat ); // SDL_GL_SwapWindow() and frame statistics
}
//--- Event handling -------------------------------------
/** Function to release/destroy our resources and restore the old desktop. */
void Quit ( int returnCode )
{
scene.deleteBuffers();
if ( glcontext )
SDL_GL_DeleteContext( glcontext );
// Clean up the window ..
SDL_Quit();
// .. and exit appropriately
exit( returnCode );
}
And here is my mouse handling functions, I am excluding the release function since it is trivial.
//--------------------------------------------------------
// Mouse handling:
void handleMouseMove ( SDL_Event &ev )
{
if ( ev.button.button == SDL_BUTTON_LEFT && dragging )
{
rotation.set(MI_IDENTITY);
rotation.rotate(5, 0.0f, 1.0f, 0.0f);
matModelView = matModelView * rotation;
}
}
void handleMousePress ( SDL_Event &ev )
{
if ( ev.button.button == SDL_BUTTON_LEFT )
{
dragging = true;
glMatrixMode(GL_MODELVIEW);
glGetFloatv(GL_MODELVIEW_MATRIX, (GLfloat *)matModelView.array());
rotation.set(MI_IDENTITY);
}
}
matModelView and rotation are 4x4 matrices. In this code, I am not included my trackball calculations. Here, I just expect it to rotate by 5 degree through the x axis as long as mouse dragging .
Maybe it is so simple but I am stuked into this point. Any guideness, code samples would be great.
Try changing:
matModelView = matModelView * rotation;
To this:
matModelView = rotation * matModelView;

DirectX 11 LINELIST_TOPOLOGY uncompleted 2D rectangle at left-top coordinates

I'm doing something with DirectX 11 and came to a rectagle drawing (empty, non-colored), seemed simple for me at start (linelist_topology, 8 indices) but when I have it on the screen I see that my rectangle is kinda incomleted at left-top coordinate, there is a point of a background color there, the code is not complicated at all, vertices are 2D space:
SIMPLEVERTEX gvFrameVertices[4]=
{XMFLOAT3(0.0f,0.0f,1.0f),XMFLOAT2(0.0f, 0.0f),
XMFLOAT3(1.0f, 0.0f, 1.0f), XMFLOAT2(1.0f, 0.0f),
XMFLOAT3(1.0f, -1.0f, 1.0f), XMFLOAT2(1.0f, 1.0f),
XMFLOAT3(0.0f, -1.0f, 1.0f), XMFLOAT2(0.0f, 1.0f)};
indices:
WORD gvRectangularIndices[8] = { 0, 1, 1, 2, 2, 3, 3, 0 };
Shader just returns given color in constant buffer:
float4 PS_PANEL(PS_INPUT input) : SV_Target
{
return fontcolor;
}
Function code itself:
VOID rectangle(INT _left, INT _top, INT _width, INT _height, XMFLOAT4 _color)
{
XMMATRIX scale;
XMMATRIX translate;
XMMATRIX world;
scale = XMMatrixScaling( _width, _height, 1.0f );
translate = XMMatrixTranslation(_left, gvHeight - _top, 1.0f);
world = scale * translate;
gvConstantBufferData.world = XMMatrixTranspose(world);
gvConstantBufferData.index = 1.0f;
gvConstantBufferData.color = _color;
gvContext->PSSetShader(gvPanelPixelshader, NULL, 0);
gvContext->UpdateSubresource(gvConstantBuffer, 0, NULL, &gvConstantBufferData, 0, 0 );
gvContext->IASetIndexBuffer(gvLinelistIndexBuffer, DXGI_FORMAT_R16_UINT, 0);
gvContext->IASetPrimitiveTopology( D3D11_PRIMITIVE_TOPOLOGY_LINELIST );
gvContext->DrawIndexed(8, 0, 0);
gvContext->IASetIndexBuffer(gvTriangleslistIndexBuffer, DXGI_FORMAT_R16_UINT, 0);
gvContext->IASetPrimitiveTopology( D3D11_PRIMITIVE_TOPOLOGY_TRIANGLELIST );
};
gvHeight - _top - I'm using orthographic matrix for projection, so the coordinate center is at left-bottom, that why need to substract for proper Y coordinate.
gvOrthographicProjection = XMMatrixOrthographicOffCenterLH( 0.0f, gvWidth, 0.0f, gvHeight, 0.01f, 100.0f );
Do you have any idea what can cause this pointal incompletness of a rectangle in my case or I need to supply more code info (don't really want to link lines of the whole initializations cause they seem very obvious and simple for me, done for /at c++ and directx amateur level :)
Thank you:)

Rendering visually perfect squares in OpenGL?

In OpenGL's fixed pipeline, by default, specifying vertex coordinates using glVertex3f is equivalent to specifying a location between -1.0 and +1.0 in screen space. Therefore, given a set of 4 perfectly adjacent screen-space vertices using GL_TRIANGLE_STRIP (or even GL_QUADS), and unless your window is already perfectly square, you will always render a rectangle instead of a perfect square...
Knowing the width, height and aspect ratio of a window, is there some way to correct this?
I have tried multiplying the vertex coordinates by the aspect ratio, which unfortunately seemed to achieve the same visual effect.
Here's the full source code I'm currently using:
#include "main.h"
#pragma comment(lib, "glut32.lib")
int g_width = 800;
int g_height = 600;
int g_aspectRatio = double(g_width) / double(g_height);
bool g_bInitialized = false;
int main(int argc, char **argv)
{
glutInit(&argc, argv);
glutInitDisplayMode(GLUT_DEPTH | GLUT_DOUBLE | GLUT_RGBA);
glutInitWindowPosition(0, 0);
glutInitWindowSize(g_width, g_height);
glutCreateWindow("OpenGL Test App");
glutDisplayFunc(onRender);
glutReshapeFunc(onSize);
glutIdleFunc(onRender);
glutMainLoop();
return 0;
}
void onInit()
{
glFrontFace(GL_CW);
}
void onRender()
{
if(!g_bInitialized)
onInit();
static float angle = 0.0f;
const float p = 0.5f * g_aspectRatio;
glLoadIdentity();
gluLookAt(
0.0f, 0.0f, 10.0f,
0.0f, 0.0f, 0.0f,
0.0f, 1.0f, 0.0f
);
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
glScalef(1, -1, 1); // Flip the Y-axis
glRotatef(angle, 0.0f, 1.0f, 0.0f);
glBegin(GL_TRIANGLE_STRIP);
{
glColor4f(1.0, 0.0, 0.0, 1.0); // Red
glVertex3f(-p, -p, 0.0); // Top-Left
glColor4f(0.0, 1.0, 0.0, 1.0); // Green
glVertex3f(p, -p, 0.0); // Top-Right
glColor4f(0.0, 0.0, 1.0, 1.0); // Blue
glVertex3f(-p, p, 0.0); // Bottom-Left
glColor4f(1.0, 1.0, 0.0, 1.0); // Yellow
glVertex3f(p, p, 0.0); // Bottom-Left
}
glEnd();
angle += 0.6f;
glutSwapBuffers();
}
void onSize(int w, int h)
{
g_width = max(w, 1);
g_height = max(h, 1);
g_aspectRatio = double(g_width) / double(g_height);
glMatrixMode(GL_PROJECTION);
glLoadIdentity();
glViewport(0, 0, w, h);
gluPerspective(45, g_aspectRatio, 1, 1000);
glMatrixMode(GL_MODELVIEW);
}
EDIT:
This has been solved... In the above code, I had defined g_aspectRatio as an int instead of a floating-point value. Therefore, it's value was always 1...
In my (old) experience, that's just why you have an aspect ratio argument to gluPerspective().
The manual page says:
In general, the aspect ratio in gluPerspective should match
the aspect ratio of the associated viewport. For example, aspect = 2.0
means the viewer's angle of view is twice as wide in x as it is in y.
If the viewport is twice as wide as it is tall, it displays the image
without distortion.
Check your g_aspectRatio value.
by default, specifying vertex coordinates using glVertex3f is equivalent to specifying a location between -1.0 and +1.0 in screen space
Wrong. Coordinates passed to OpenGL through glVertex or a glVertexPointer vertex array are in model space. The transformation to screen space happens by transforming into view space by the modelview matrix and from view space to clip space by the projection matrix. Then clipping is applied and the perspective divide applied to reach normalized coordinate space.
Hence the value range for glVertex can be whatever you like it to be. By applying the right projection matrix you get your view space to be in [-aspect; aspect]×[-1, 1] if you like that.
glMatrixMode(GL_PROJECTION);
glLoadIdentity();
glOrtho(-aspect, aspect, -1, 1, -1, 1);

glutBitmapString shows nothing

I'm going to show FPS on the screen with the freeglut function glutBitmapString,but it shows nothing. Here is my code. Is there anyone can figure where the problem is?
void PrintFPS()
{
frame++;
time=glutGet(GLUT_ELAPSED_TIME);
if (time - timebase > 100) {
cout << "FPS:\t"<<frame*1000.0/(time-timebase)<<endl;
char* out = new char[30];
sprintf(out,"FPS:%4.2f",frame*1000.0f/(time-timebase));
glColor3f(1.0f,1.0f,1.0f);
glRasterPos2f(20,20);
glutBitmapString(GLUT_BITMAP_TIMES_ROMAN_24,(unsigned char* )out);
timebase = time;
frame = 0;
}
}
void RenderScene(void)
{
// Clear the window with current clearing color
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT | GL_STENCIL_BUFFER_BIT);
GLfloat vRed[] = { 1.0f, 0.0f, 0.0f, 0.5f };
GLfloat vYellow[] = {1.0f,1.0f,0.0f,1.0f};
shaderManager.UseStockShader(GLT_SHADER_IDENTITY, vYellow);
//triangleBatch.Draw();
squareBatch.Draw();
PrintFPS();
glutSwapBuffers();
}
it supposed to show the FPS on the top left of the screen
The position that's provided by glRasterPos is treated just like a vertex, and transformed by the current model-view and projection matrices. In you example, you specify the text to be position at (20,20), which I'm guessing is supposed to be screen (viewport, really) coordinates.
If it's the case that you're rendering 3D geometry, particularly with a perspective projection, your text may be clipped out. However, there are (at least) two simple solutions (presented in order of code simplicity):
use one of the glWindowPos functions instead of glRasterPos. This function bypasses the model-view and projection transformations.
use glMatrixMode, glPushMatrix, and glPopMatrix to temporarily switch to window coordinates for rendering:
// Switch to window coordinates to render
glMatrixMode( GL_MODELVIEW );
glPushMatrix();
glLoadIdentity();
glMatrixMode( GL_PROJECTION );
glPushMatrix();
glLoadIdentity();
gluOrtho2D( 0, windowWidth, 0, windowHeight );
glRasterPos2i( 20, 20 ); // or wherever in window coordinates
glutBitmapString( ... );
glPopMatrix();
glMatrixMode( GL_MODELVIEW );
glPopMatrix();

Direct3D11(C++): Updating Texture coordinates in constant buffer?

I'm trying to make a rather basic 2D Engine with Direct3D.
I made a LoadImage() function which stores all the rather static behaviour of the image in an object. (Shaders, Vertexbuffers, Samplers etc)
I am planning to do the positioning of the vertices with matrices in constant buffers.
However, I would also like to have a DrawImage() function, which would have a parameter to tell what part of the texture should be drawn (clipped), so I would have to update the texture coordinates.
Since the vertexbuffer is already pre-defined, I wondered if there is a way to update texture coordinates via a constantbuffer that would be sent to the vertexshader?
I hope my question is clear enough, if you have any doubts look at the code below.
bool GameManager::GMLoadImage(Image* pImage, const char* pkcFilePath, ImageDesc* pImDesc)
{
pImage = new Image();
ID3D11ShaderResourceView* pColorMap = (pImage)->GetpColorMap();
/// CREATE SHADER RESOURCE VIEW (from file) ///
HRESULT result = D3DX11CreateShaderResourceViewFromFileA(m_pDevice,
pkcFilePath,
0,
0,
&pColorMap,
0);
if (FAILED(result)) {
MessageBoxA(NULL,"Error loading ShaderResourceView from file","Error",MB_OK);
return false;
}
/// RECEIVE TEXTURE DESC ///
ID3D11Resource* pColorTex;
pColorMap->GetResource(&pColorTex);
((ID3D11Texture2D*)pColorTex)->GetDesc(&((pImage)->GetColorTexDesc()));
pColorTex->Release();
/// CREATE VERTEX BUFFER ///
D3D11_TEXTURE2D_DESC colorTexDesc = pImage->GetColorTexDesc();
float halfWidth = static_cast<float>(colorTexDesc.Width)/2.0f;
float halfHeight = static_cast<float>(colorTexDesc.Height)/2.0f;
Vertex.PosTex vertices[]=
{
{XMFLOAT3( halfWidth, halfHeight, 1.0f ), XMFLOAT2( 1.0f, 0.0f )},
{XMFLOAT3( halfWidth, -halfHeight, 1.0f ), XMFLOAT2( 1.0f, 1.0f )},
{XMFLOAT3( -halfWidth, -halfHeight, 1.0f ), XMFLOAT2( 0.0f, 1.0f )},
{XMFLOAT3( -halfWidth, -halfHeight, 1.0f ), XMFLOAT2( 0.0f, 1.0f )},
{XMFLOAT3( -halfWidth, halfHeight, 1.0f ), XMFLOAT2( 0.0f, 0.0f )},
{XMFLOAT3( halfWidth, halfHeight, 1.0f ), XMFLOAT2( 1.0f, 0.0f )}
};
D3D11_BUFFER_DESC vertexDesc;
ZeroMemory(&vertexDesc,sizeof(vertexDesc));
vertexDesc.Usage = D3D11_USAGE_DEFAULT;
vertexDesc.BindFlags = D3D11_BIND_VERTEX_BUFFER;
vertexDesc.ByteWidth = sizeof(Vertex.PosTex)*6;
D3D11_SUBRESOURCE_DATA resourceData;
resourceData.pSysMem = vertices;
ID3D11Buffer* pVBuffer = pImage->GetpVertexBuffer();
result = m_pDevice->CreateBuffer(&vertexDesc,&resourceData,&pVBuffer);
if (FAILED(result))
{
MessageBoxA(NULL,"Error Creating VBuffer","Error",MB_OK);
return false;
}
/// SET POINTER TO IMAGEDESC
ImageDesc* pThisImDesc = pImage->GetpImageDesc();
pThisImDesc = pImDesc;
return true;
}
bool GameManager::GMDrawImage(Image* pImage, const CLIPRECT& rkClip)
{
ImageDesc* thisImDesc = pImage->GetpImageDesc();
if ( (thisImDesc != m_pImDesc) ) {
m_pImDesc = thisImDesc;
m_pContext->IASetInputLayout(m_pImDesc->pInputLayout);
m_pContext->IASetPrimitiveTopology(m_pImDesc->Topology);
m_pContext->VSSetShader(m_pImDesc->pSolidColorVS,0,0);
m_pContext->PSSetShader(m_pImDesc->pSolidColorPS,0,0);
m_pContext->PSSetSamplers(0,1,&m_pImDesc->pSampler);
m_pContext->OMSetBlendState(m_pImDesc->pBlendState,NULL,0xFFFFFFFF);
}
UINT stride = m_pImDesc->VertexSize;
UINT offset = 0;
ID3D11Buffer* pVBuffer = pImage->GetpVertexBuffer();
ID3D11ShaderResourceView* pColorMap = pImage->GetpColorMap();
m_pContext->IASetVertexBuffers(0,1,&pVBuffer,&stride,&offset);
m_pContext->PSSetShaderResources(0,1,&pColorMap);
//set constant buffers?
m_pContext->Draw(6,0);
}
Yes, as long as your texture coordinates are hardcoded to 0.0 through 1.0 in your vertex buffer, you can use a texture transformation matrix. It's a 3x3 matrix that will transform your 2D texture coordinates.
For instance, if you want to use the bottom-right quadrant of your texture (assuming top-left is origin), you could use the following matrix:
0.5 0.0 0.0
0.0 0.5 0.0
0.5 0.5 1.0
Then, in the vertex shader, you multiply the texture coordinates by that matrix like so:
float3 coord = float3(In.texCoord, 1.0);
coord *= textureTransform;
Out.texCoord = coord.xy / coord.z;
In.texCoord and Out.texCoord being float2 input and output texture coordinates respectively.
The division by Z is optional if you are only doing affine transformations (translations, scaling, rotations and skews) so feel free to remove it if not needed.
To generalize the matrix:
Sx 0.0 0.0
0.0 Sy 0.0
Tx Ty 1.0
Where Txy is the position of the clip area and Sxy the size of the clip area, in texture coordinates.