Calculating velocity using consistent speed in basic C++ engine - c++

What I'm trying to achieve is a sprite moving to another sprite in a 2D environment. I started with the basic Mx = Ax - Bx deal. But I noticed that the closer to the target the sprite gets, the more it slows down. So I tried to create a percentage/ratio based on the velocity then each x and y gets their percent of a speed allowance, however, it's acting very strangely and only seems to work if Mx and My are positive
Here's the code extract:
ballX = ball->GetX();
ballY = ball->GetY();
targX = target->GetX();
targY = target->GetY();
ballVx = (targX - ballX);
ballVy = (targY - ballY);
percentComp = (100 / (ballVx + ballVy));
ballVx = (ballVx * percentComp)/10000;
ballVy = (ballVy * percentComp)/10000;
The /10000 is to slow the sprites movement

Assuming you want the sprite to move at a constant speed, you can do a linear fade on both the X and Y position, like this:
#include <stdio.h>
int main(int, char **)
{
float startX = 10.0f, startY = 20.0f;
float endX = 35.0f, endY = -2.5f;
int numSteps = 20;
for (int i=0; i<numSteps; i++)
{
float percentDone = ((float)i)/(numSteps-1);
float curX = (startX*(1.0f-percentDone)) + (endX*percentDone);
float curY = (startY*(1.0f-percentDone)) + (endY*percentDone);
printf("Step %i: percentDone=%f curX=%f curY=%f\n", i, percentDone, curX, curY);
}
return 0;
}

Thanks for the responses, I got it working now but normalising the vectors instead of the whole percent thing, here's what I have now:
ballX = ball->GetX();
ballY = ball->GetY();
targX = target->GetX();
targY = target->GetY();
ballVx = (targX - ballX);
ballVy = (targY - ballY);
vectLength = sqrt((ballVx*ballVx) + (ballVy*ballVy));
ballVx = (ballVx / vectLength)/10;
ballVy = (ballVy / vectLength)/10;

Related

Interpreting visual studio profiler, is this subtraction slow? Can I make all this any faster?

I'm using the Visual Studio profiler for the first time and I'm trying to interpret the results. Looking at the percentages on the left, I found this subtraction's time cost a bit strange:
Other parts of the code contain more complex expressions, like:
Even a simple multiplication seems way faster than the subtraction :
Other multiplications take way longer and I really don't get why, like this :
So, I guess my question is if there is anything weird going on here.
Complex expressions take longer than that subtraction and some expressions take way longer than similar other ones. I run the profiler several times and the distribution of the percentages is always like this. Am I just interpreting this wrong?
Update:
I was asked to give the profile for the whole function so here it is, even though it's a bit big. I ran the function inside a for loop for 1 minute and got 50k samples. The function contains a double loop. I include the text first for ease, followed by the pictures of profiling. Note that the code in text is a bit updated.
for (int i = 0; i < NUMBER_OF_CONTOUR_POINTS; i++) {
vec4 contourPointV(contour3DPoints[i], 1);
float phi = angles[i];
float xW = pose[0][0] * contourPointV.x + pose[1][0] * contourPointV.y + contourPointV.z * pose[2][0] + pose[3][0];
float yW = pose[0][1] * contourPointV.x + pose[1][1] * contourPointV.y + contourPointV.z * pose[2][1] + pose[3][1];
float zW = pose[0][2] * contourPointV.x + pose[1][2] * contourPointV.y + contourPointV.z * pose[2][2] + pose[3][2];
float x = -G_FU_STRICT * xW / zW;
float y = -G_FV_STRICT * yW / zW;
x = (x + 1) * G_WIDTHo2;
y = (y + 1) * G_HEIGHTo2;
y = G_HEIGHT - y;
phi -= extraTheta;
if (phi < 0)phi += CV_PI2;
int indexForTable = phi * oneKoverPI;
//vec2 ray(cos(phi), sin(phi));
vec2 ray(cos_pre[indexForTable], sin_pre[indexForTable]);
vec2 ray2(-ray.x, -ray.y);
float outerStepX = ray.x * step;
float outerStepY = ray.y * step;
cv::Point2f outerPoint(x + outerStepX, y + outerStepY);
cv::Point2f innerPoint(x - outerStepX, y - outerStepY);
cv::Point2f contourPointCV(x, y);
cv::Point2f contourPointCVcopy(x, y);
bool cut = false;
if (!isInView(outerPoint.x, outerPoint.y) || !isInView(innerPoint.x, innerPoint.y)) {
cut = true;
}
bool outside2 = true; bool outside1 = true;
if (cut) {
outside2 = myClipLine(contourPointCV.x, contourPointCV.y, outerPoint.x, outerPoint.y, G_WIDTH - 1, G_HEIGHT - 1);
outside1 = myClipLine(contourPointCVcopy.x, contourPointCVcopy.y, innerPoint.x, innerPoint.y, G_WIDTH - 1, G_HEIGHT - 1);
}
myIterator innerRayMine(contourPointCVcopy, innerPoint);
myIterator outerRayMine(contourPointCV, outerPoint);
if (!outside1) {
innerRayMine.end = true;
innerRayMine.prob = true;
}
if (!outside2) {
outerRayMine.end = true;
innerRayMine.prob = true;
}
vec2 normal = -ray;
float dfdxTerm = -normal.x;
float dfdyTerm = normal.y;
vec3 point3D = vec3(xW, yW, zW);
cv::Point contourPoint((int)x, (int)y);
float Xc = point3D.x; float Xc2 = Xc * Xc; float Yc = point3D.y; float Yc2 = Yc * Yc; float Zc = point3D.z; float Zc2 = Zc * Zc;
float XcYc = Xc * Yc; float dfdxFu = dfdxTerm * G_FU; float dfdyFv = dfdyTerm * G_FU; float overZc2 = 1 / Zc2; float overZc = 1 / Zc;
pixelJacobi[0] = (dfdyFv * (Yc2 + Zc2) + dfdxFu * XcYc) * overZc2;
pixelJacobi[1] = (-dfdxFu * (Xc2 + Zc2) - dfdyFv * XcYc) * overZc2;
pixelJacobi[2] = (-dfdyFv * Xc + dfdxFu * Yc) * overZc;
pixelJacobi[3] = -dfdxFu * overZc;
pixelJacobi[4] = -dfdyFv * overZc;
pixelJacobi[5] = (dfdyFv * Yc + dfdxFu * Xc) * overZc2;
float commonFirstTermsSum = 0;
float commonFirstTermsSquaredSum = 0;
int test = 0;
while (!innerRayMine.end) {
test++;
cv::Point xy = innerRayMine.pos(); innerRayMine++;
int x = xy.x;
int y = xy.y;
float dx = x - contourPoint.x;
float dy = y - contourPoint.y;
vec2 dxdy(dx, dy);
float raw = -glm::dot(dxdy, normal);
float heavisideTerm = heaviside_pre[(int)raw * 100 + 1000];
float deltaTerm = delta_pre[(int)raw * 100 + 1000];
const Vec3b rgb = ante[y * 640 + x];
int red = rgb[0]; int green = rgb[1]; int blue = rgb[2];
red = red >> 3; red = red << 10; green = green >> 3; green = green << 5; blue = blue >> 3;
int colorIndex = red + green + blue;
pF = pFPointer[colorIndex];
pB = pBPointer[colorIndex];
float denAsMul = 1 / (pF + pB + 0.000001);
pF = pF * denAsMul;
float pfMinusPb = 2 * pF - 1;
float denominator = heavisideTerm * (pfMinusPb)+pB + 0.000001;
float commonFirstTerm = -pfMinusPb / denominator * deltaTerm;
commonFirstTermsSum += commonFirstTerm;
commonFirstTermsSquaredSum += commonFirstTerm * commonFirstTerm;
}
}
Visual Studio profiles by sampling: it interrupts execution often and records the value of the instruction pointer; it then maps it to the source and calculates the frequency of hitting that line.
There are few issues with that: it's not always possible to figure out which line produced a specific assembly instruction in the optimized code.
One trick I use is to move the code of interest into a separate function and declare it with __declspec(noinline) .
In your example, are you sure the subtraction was performed as many times as multiplication? I would be more puzzled by the difference in subsequent multiplication (0.39% and 0.53%)
Update:
I believe that the following lines:
float phi = angles[i];
and
phi -= extraTheta;
got moved together in assembly and the time spent getting angles[i] was added to that subtraction line.

Why isn't horizontal advance enough to properly format a glyph?

I am making a simple text renderer with vulkan and I am using freetype to format my text.
I read the freetype tutorial and I have come up with the following function:
void Scribe::CreateSingleLineGeometry(const string &text, ScGeomMetaInfo &info,
ScGeomtry &geometry)
{
float texture_length = info.texture_len;
float h_offset = info.h_offset;
float char_l = info.char_len;
float v_anchor = info.v_offset;
auto &vertices = geometry.first;
auto &indices = geometry.second;
auto length = max(ft_face->bbox.xMax - ft_face->bbox.xMin,
ft_face->bbox.yMax - ft_face->bbox.yMin);
for(auto &c: text)
{
auto glyph_index = FT_Get_Char_Index(ft_face, c);
auto error = FT_Load_Glyph(ft_face, glyph_index, FT_LOAD_NO_HINTING);
auto metrics = ft_face->glyph->metrics;
float g_height = metrics.height;
float g_bearing = metrics.horiBearingY;
float correction = 1.0 - (g_height) / float(length) -
float(metrics.horiBearingY - g_height) / float(length);
correction *= char_l;
float bearingX = char_l * float(metrics.horiBearingX) / float(length);
// Insert the vertex positions and uvs into the returned geometry
float h_coords[] = {h_offset + bearingX, h_offset + bearingX + char_l};
float v_coords[] = {v_anchor + correction, v_anchor + correction + char_l};
auto glyph_data = glyph_map[glyph_index];
float tex_h_coords[] = {glyph_data.lt_uv.x, glyph_data.rb_uv.x};
float tex_v_coords[] = {glyph_data.lt_uv.y, glyph_data.rb_uv.y};
for(int x=0; x<2; x++) {
for(int y=0; y<2; y++) {
vertices.insert(end(vertices), {h_coords[x], v_coords[y], 0});
vertices.insert(end(vertices), {tex_h_coords[x], tex_v_coords[y]});
}
}
// Setup the indices of the current quad
// There's 4 vertices in a quad, so we offset each index by (quad_size * num_quads)
uint delta = 4 * info.c_num++;
indices.insert(end(indices), {0+delta, 3+delta, 1+delta, 0+delta, 3+delta, 2+delta});
h_offset += char_l * float(metrics.horiAdvance) / float(length) + 0.03;
}
}
In particular I want to emphasize the line:
h_offset += char_l * float(metrics.horiAdvance) / float(length) + 0.03;
That 0.03 at the end of the line doesn't come from anywhere, I inserted it there to make things look good.
This is the result with that extra offset:
Which I think looks pretty good. However, if I were to remove the extra offset:
h_offset += char_l * float(metrics.horiAdvance) / float(length);
I get something that doesn't look right at all. Why isn't the advance enough to correctly format the font?

Perlin Noise getting wrong values in Y axis (C++)

Issue
I'm trying to implement the Perlin Noise algorithm in 2D with a single octave with a size of 16x16. I'm using this as heightmap data for a terrain, however it only seems to work in one axis. Whenever the sample point moves to a new Y section in the Perlin Noise grid, the gradient is very different from what I expect (for example, it often flips from 0.98 to -0.97, which is a very sudden change).
This image shows the staggered terrain in the z direction (which is the y axis in the 2D Perlin Noise grid)
Code
I've put the code that calculates which sample point to use at the end since it's quite long and I believe it's not where the issue is, but essentially I scale down the terrain to match the Perlin Noise grid (16x16) and then sample through all the points.
Gradient At Point
So the code that calculates out the gradient at a sample point is the following:
// Find the gradient at a certain sample point
float PerlinNoise::gradientAt(Vector2 point)
{
// Decimal part of float
float relativeX = point.x - (int)point.x;
float relativeY = point.y - (int)point.y;
Vector2 relativePoint = Vector2(relativeX, relativeY);
vector<float> weights(4);
// Find the weights of the 4 surrounding points
weights = surroundingWeights(point);
float fadeX = fadeFunction(relativePoint.x);
float fadeY = fadeFunction(relativePoint.y);
float lerpA = MathUtils::lerp(weights[0], weights[1], fadeX);
float lerpB = MathUtils::lerp(weights[2], weights[3], fadeX);
float lerpC = MathUtils::lerp(lerpA, lerpB, fadeY);
return lerpC;
}
Surrounding Weights of Point
I believe the issue is somewhere here, in the function that calculates the weights for the 4 surrounding points of a sample point, but I can't seem to figure out what is wrong since all the values seem sensible in the function when stepping through it.
// Find the surrounding weight of a point
vector<float> PerlinNoise::surroundingWeights(Vector2 point){
// Produces correct values
vector<Vector2> surroundingPoints = surroundingPointsOf(point);
vector<float> weights;
for (unsigned i = 0; i < surroundingPoints.size(); ++i) {
// The corner to the sample point
Vector2 cornerToPoint = surroundingPoints[i].toVector(point);
// Getting the seeded vector from the grid
float x = surroundingPoints[i].x;
float y = surroundingPoints[i].y;
Vector2 seededVector = baseGrid[x][y];
// Dot product between the seededVector and corner to the sample point vector
float dotProduct = cornerToPoint.dot(seededVector);
weights.push_back(dotProduct);
}
return weights;
}
OpenGL Setup and Sample Point
Setting up the heightmap and getting the sample point. Variables 'wrongA' and 'wrongA' is an example of when the gradient flips and changes suddenly.
void HeightMap::GenerateRandomTerrain() {
int perlinGridSize = 16;
PerlinNoise perlin_noise = PerlinNoise(perlinGridSize, perlinGridSize);
numVertices = RAW_WIDTH * RAW_HEIGHT;
numIndices = (RAW_WIDTH - 1) * (RAW_HEIGHT - 1) * 6;
vertices = new Vector3[numVertices];
textureCoords = new Vector2[numVertices];
indices = new GLuint[numIndices];
float perlinScale = RAW_HEIGHT/ (float) (perlinGridSize -1);
float height = 50;
float wrongA = perlin_noise.gradientAt(Vector2(0, 68.0f / perlinScale));
float wrongB = perlin_noise.gradientAt(Vector2(0, 69.0f / perlinScale));
for (int x = 0; x < RAW_WIDTH; ++x) {
for (int z = 0; z < RAW_HEIGHT; ++z) {
int offset = (x* RAW_WIDTH) + z;
float xVal = (float)x / perlinScale;
float yVal = (float)z / perlinScale;
float noise = perlin_noise.gradientAt(Vector2( xVal , yVal));
vertices[offset] = Vector3(x * HEIGHTMAP_X, noise * height, z * HEIGHTMAP_Z);
textureCoords[offset] = Vector2(x * HEIGHTMAP_TEX_X, z * HEIGHTMAP_TEX_Z);
}
}
numIndices = 0;
for (int x = 0; x < RAW_WIDTH - 1; ++x) {
for (int z = 0; z < RAW_HEIGHT - 1; ++z) {
int a = (x * (RAW_WIDTH)) + z;
int b = ((x + 1)* (RAW_WIDTH)) + z;
int c = ((x + 1)* (RAW_WIDTH)) + (z + 1);
int d = (x * (RAW_WIDTH)) + (z + 1);
indices[numIndices++] = c;
indices[numIndices++] = b;
indices[numIndices++] = a;
indices[numIndices++] = a;
indices[numIndices++] = d;
indices[numIndices++] = c;
}
}
BufferData();
}
Turned out the issue was in the interpolation stage:
float lerpA = MathUtils::lerp(weights[0], weights[1], fadeX);
float lerpB = MathUtils::lerp(weights[2], weights[3], fadeX);
float lerpC = MathUtils::lerp(lerpA, lerpB, fadeY);
I had the interpolation in the y axis the wrong way around, so it should have been:
lerp(lerpB, lerpA, fadeY)
Instead of:
lerp(lerpA, lerpB, fadeY)

Using a random point in two actions

Sorry I'm new to this.
Right now I'm trying to create an endless randomly moving sprite that rotates to its direction. However I can't figure out how to use the random location generated from randomPoint on the rotateAction. Basically the bug randomly rotates instead of using the point it's going to. Is there a way I can use the same random point twice?
-(void)moveRandom:(CCSprite*)roach
{
CGPoint randomPoint = ccp(arc4random()%480, arc4random()%320);
NSLog(#"%#", NSStringFromCGPoint(randomPoint));
int minDuration = 2.0;
int maxDuration = 4.0;
int rangeDuration = maxDuration - minDuration;
int randomDuration = (arc4random() % rangeDuration) + minDuration;
float dY = roach.position.y - randomPoint.y;
float dX = roach.position.x - randomPoint.x;
float offset = dX<0 ? 90.0f : -90.0f;
float angle = CC_RADIANS_TO_DEGREES(atan2f(dY, dX)) + offset;
[roach runAction:
[CCActionSequence actions:
[CCActionRotateTo actionWithDuration:0.001 angle:angle],
[CCActionMoveTo actionWithDuration:randomDuration position: randomPoint],
[CCActionCallBlock actionWithBlock:^{
[self performSelector:#selector(moveRandom:) withObject:roach afterDelay:0.5];
}],
nil]
];
Some things that stand out: It looks like you're trying to get the angle in the wrong direction. The conditional offset is not needed. radians to degrees should be negated .. I'm guessing you want:
float dY = randomPoint.y - roach.position.y;
float dX = randomPoint.x - roach.position.x;
float angle = -CC_RADIANS_TO_DEGREES(atan2f(dY, dX));

Calculating iso tile co-ordinates for a TMX map when zoomed on a CCLayerPanZoom control

I'm working on some code to place isometric CCTMXTiledMap onto a CCLayerPanZoom control and then convert a touch location into ISO tilemap co-ordinates. This all works perfectly well for me, so long as the scale of the CClayerPanZoom is 1 (i.e. if I don't zoom in or zoom out). I can pan the map around and still calculate the correct iso tile co-oridinates. However, as soon as I zoom the tiled map in or out the iso cordinates returned by my code are completely wrong. Please see below for my code to calculate the iso co-ordinates from the touch location.
-(CGPoint) tilePosFromLocation:(CGPoint)location tileMap:(CCTMXTiledMap*)thisTileMap panZoom:(CCLayerPanZoom*)thisPanZoom
{
float midScale = (thisPanZoom.minScale + thisPanZoom.maxScale) / 2.0;
float newScale = (thisPanZoom.scale <= midScale) ? thisPanZoom.maxScale : thisPanZoom.minScale;
if (thisPanZoom.scale < 1)
{
newScale = newScale + thisPanZoom.scale;
}
else
{
newScale = newScale - thisPanZoom.scale;
}
CGFloat deltaX = (location.x - thisPanZoom.anchorPoint.x * (thisPanZoom.contentSize.width / CC_CONTENT_SCALE_FACTOR()) ) * (newScale);
CGFloat deltaY = (location.y - thisPanZoom.anchorPoint.y * (thisPanZoom.contentSize.height / CC_CONTENT_SCALE_FACTOR()) ) * (newScale);
CGPoint position = ccp((thisPanZoom.position.x - deltaX) , (thisPanZoom.position.y - deltaY) );
float halfMapWidth = thisTileMap.mapSize.width * 0.5f;
float mapHeight = thisTileMap.mapSize.height;
float tileWidth = thisTileMap.tileSize.width / CC_CONTENT_SCALE_FACTOR() * newScale;
float tileHeight = thisTileMap.tileSize.height / CC_CONTENT_SCALE_FACTOR() * newScale;
CGPoint tilePosDiv = CGPointMake(position.x / tileWidth, position.y / tileHeight );
float inverseTileY = tilePosDiv.y - (mapHeight * CC_CONTENT_SCALE_FACTOR()) * newScale; //mapHeight + tilePosDiv.y;
float posX = (int)(tilePosDiv.y - tilePosDiv.x + halfMapWidth);
float posY = (int)(inverseTileY + tilePosDiv.x - halfMapWidth + mapHeight);
// make sure coordinates are within isomap bounds
posX = MAX(0, posX);
posX = MIN(thisTileMap.mapSize.width - 1, posX);
posY = MAX(0, posY);
posY = MIN(thisTileMap.mapSize.height - 1, posY);
return CGPointMake(posX, posY);
}
Can anyone offer any insight into where I'm going wrong with this?
Thanks,
Alan