Segmentation fault caused by copying QList - c++

lastly, I run into a very crazy Segfault. I have nothing done to my source code, the only thing I might have done is updated my QT Creator and MinGW. Now my program causes a segmentation fault, before that it works perfectly.
void Parameter::calculateKeyframes() {
auto kfs = Bezier::calculateControlPoints(keyframes.values());
for (auto kf : kfs) {
setKeyframe(kf);
}
paramUpdate();
}
When it runs this function with a valid "keyframes" map, I know it thanks to debugging, it crashes in the Bezier::calculateControlPoints(QList) function at the marked line below.
QList<Keyframe> calculateControlPoints(QList<Keyframe> keyframes) {
if (keyframes.size() < 2) {
return keyframes;
}
int n = keyframes.size();
for (int i = 0; i<n; i++) {
Keyframe last_kf(0, ValueDouble(0.0));
Keyframe kf;
kf = keyframes.at(i);
Keyframe next_kf(0, ValueDouble(0.0));
if (-1 < i-1) last_kf = keyframes[i-1];
else last_kf.frame = -1;
if (keyframes.size() > i+1) next_kf = keyframes[i+1];
else next_kf.frame = -1;
if (kf.mode == Keyframe::STEP || kf.mode == Keyframe::LINEAR) continue;
if (next_kf.frame > -1 && (kf.mode == Keyframe::EASEIN || (kf.mode == Keyframe::EASE && last_kf.frame < 0))) {
double vecx_TtN = (double)next_kf.frame - (double)kf.frame; // vx = nx - x
double vecy_TtN = next_kf.data.toDouble() - kf.data.toDouble(); // vy = ny - y
kf.control2x = (double)kf.frame + vecx_TtN / 4.5; // x = x + vx / 4.5
kf.control2y = (vecy_TtN / vecx_TtN) * (kf.control2x - kf.frame) + kf.data.toDouble(); // y = m * x + t
} else if (last_kf.frame > -1 && (kf.mode == Keyframe::EASEOUT || (kf.mode == Keyframe::EASE && next_kf.frame < 0))) {
double vecx_TtL = (double)last_kf.frame - (double)kf.frame; // vx = lx - x
double vecy_TtL = last_kf.data.toDouble() - kf.data.toDouble(); // vy = ly - y
kf.control1x = (double)kf.frame + vecx_TtL / 4.5; // x = x + vx / 4.5
kf.control1y = (vecy_TtL / vecx_TtL) * (kf.control1x - kf.frame) + kf.data.toDouble(); // y = m * x + t
} else if (kf.mode == Keyframe::EASE && last_kf.frame > -1 && next_kf.frame > -1) {
double vecx_TtL = (double)last_kf.frame - (double)kf.frame; // vx = lx - x
double vecx_TtN = (double)next_kf.frame - (double)kf.frame; // vx = nx - x
double vecx_LtN = (double)next_kf.frame - (double)last_kf.frame; // vx = nx - lx
/* ---> */ double vecy_LtN = next_kf.data.toDouble() - last_kf.data.toDouble(); // vy = ny - ly
kf.control1x = (double)kf.frame + vecx_TtL / 4.5; // x = x + vx / 4.5
kf.control2x = (double)kf.frame + vecx_TtN / 4.5; // x = x + vx / 4.5
kf.control1y = (vecy_LtN/vecx_LtN) * (kf.control1x - kf.frame) + kf.data.toDouble(); // y = m * x + t
kf.control2y = (vecy_LtN/vecx_LtN) * (kf.control2x - kf.frame) + kf.data.toDouble(); // y = m * x + t
}
keyframes[i] = kf;
}
return keyframes;
}
It is caused in the second loop run because the "QList keyframes" has in its member with the index 0 (that means in the second run this member is also copied into "last_kf") an invalid pointer-address in the Keyframes "data" pointer. Now my question is why is data now a invalid pointer... in Parameter::calculateKeyframes() it wasn't.
Here my Keyframe.cpp (if it is important):
#include "keyframe.h"
#include "value.h"
#include "valuedouble.h"
#include <iostream>
Keyframe::Keyframe(long frame, Value v) : frame(frame), control1x(frame), control2x(frame), data(v), control1y(v), control2y(v) {
}
Keyframe::Keyframe() : Keyframe(0.0, ValueDouble(0.0)) {}
void Keyframe::toPipeKF(tutorial::Keyframe* k) {
k->set_mode((tutorial::Keyframe_Mode)(int)mode);
k->set_frame(frame);
k->set_data((const char*)data.toByteArray());
k->set_control1x(control1x);
k->set_control1y(control1y.toByteArray());
k->set_control2x(control2x);
k->set_control2y(control2y.toByteArray());
}
Keyframe.h:
#ifndef KEYFRAME_H
#define KEYFRAME_H
#include "pipeendpoint.h"
#include "value.h"
class Keyframe {
public:
Keyframe(long frame, Value v);
Keyframe();
enum Mode {
STEP,
LINEAR,
EASEIN,
EASE,
EASEOUT,
EASEFIX,
EASECUSTOM
};
Mode mode = EASE;
Value data;
long frame;
double control1x = 0;
Value control1y;
double control2x = 0;
Value control2y;
void toPipeKF(tutorial::Keyframe* kf);
};
#endif // KEYFRAME_H

Related

Rgb color map to a normalized value

I'm using this function to convert a normalized value between 0 and 1 to an RGB value depending on the JET colormap.
std::vector<double> mapJet(double v, double vmin, double vmax)
{
if (v < vmin)
v = vmin;
if (v > vmax)
v = vmax;
double dr, dg, db;
if (v < 0.1242) {
db = 0.504 + ((1.-0.504) / 0.1242)*v;
dg = dr = 0.;
} else if (v < 0.3747) {
db = 1.;
dr = 0.;
dg = (v - 0.1242) * (1. / (0.3747-0.1242));
} else if (v < 0.6253) {
db = (0.6253 - v) * (1. / (0.6253-0.3747));
dg = 1.;
dr = (v - 0.3747) * (1. / (0.6253-0.3747));
} else if (v < 0.8758) {
db = 0.;
dr = 1.;
dg = (0.8758 - v) * (1. / (0.8758-0.6253));
} else {
db = 0.;
dg = 0.;
dr = 1. - (v - 0.8758) * ((1.-0.504) / (1.-0.8758));
}
return std::vector<double> { 255 * dr, 255 * dg, 255 * db };
}
My aim is to find the function double v = mapJet_inv(R,G,B). That is to say, I convert an RGB color to a normalized value between 0 and 1 depending on the colormap. I tried to start from the end of the mapJet function, however I didn't know how to specify the ranges of the R G B components.
Maybe I'm doing this badly. I will appreciate your help.
Thank you
vector<float> colors_to_value(vector<float> colors_tab)
{
double v ,db,dg,dr; v=db=dg=dr=0; vector<float> values_result;
for(int i=0;i<colors_tab.size();i++)
{
dr=colors_tab(i,0)/255.;
dg=colors_tab(i,1)/255.;
db=colors_tab(i,2)/255.;
if ( dg == 0. && dr == 0. ) {
v = (db - 0.504) / (1.-0.504) * 0.1242; // a revoir
}
else if ( db == 1. && dr == 0. ) {
v = dg/4. + 0.1242;
}
else if (db==0 && dr==1)
{
v = 0.8758 -dg*(1. / (0.8758-0.6253)) ;
}
else if ( db==0 && dg ==0)
{
v =( 1. - dr ) / ((1.-0.504) / (1.-0.8758)) + 0.8758;
}
else{
float val1= 0.6253 - db/(1. / (0.6253-0.3747)) ;
float val2=dr/ (1. / (0.6253-0.3747)) + 0.3747;
// v=(val1+val2)/2.;
if (val1>val2)
v=val1;
}
if(v<0) v=0;
values_result.push_back(v);
}
return values_result;
}

SDL2.0 screen nullptr on render of Window

Hey so I'm relatively new to the SDL library and just trying to get to grips with it.
I found a C++ conversion for Minecraft4k but it was based on SDL1.x so I'm trying to convert it to SDL2.0
At present the build is successful, but when it gets to;
plot(x, y, rgbmul(col, fxmul(br, ddist)));
It throws a read access violation exception:
screen was nullptr
This is my code;
// C++ port of Minecraft 4k JS (http://jsdo.it/notch/dB1E)
// By The8BitPimp
// See: the8bitpimp.wordpress.com
#include <SDL.h>
#include <math.h>
#include <windows.h>
#include <tchar.h>
#include "plot.h"
#include "llist.h"
const int w = 320;
const int h = 240;
SDL_Surface *screen = nullptr;
const float math_pi = 3.14159265359f;
static inline float math_sin(float x) {
return sinf(x);
}
static inline float math_cos(float x) {
return cosf(x);
}
// the texture map
int texmap[16 * 16 * 16 * 3];
// the voxel map
char map[64 * 64 * 64];
static inline int random(int max) {
return (rand() ^ (rand() << 16)) % max;
}
static inline void plot(int x, int y, int c) {
int *p = (int*)screen->pixels;
p[y * w + x] = c;
}
static void makeTextures(void) {
// each texture
for (int j = 0; j<16; j++) {
int k = 255 - random(96);
// each pixel in the texture
for (int m = 0; m<16 * 3; m++)
for (int n = 0; n<16; n++) {
int i1 = 0x966C4A;
int i2 = 0;
int i3 = 0;
if (j == 4)
i1 = 0x7F7F7F;
if ((j != 4) || (random(3) == 0))
k = 255 - random(96);
if (j == 1)
{
if (m < (((n * n * 3 + n * 81) >> 2) & 0x3) + 18)
i1 = 0x6AAA40;
else if (m < (((n * n * 3 + n * 81) >> 2) & 0x3) + 19)
k = k * 2 / 3;
}
if (j == 7)
{
i1 = 0x675231;
if ((n > 0) && (n < 15) && (((m > 0) && (m < 15)) || ((m > 32) && (m < 47))))
{
i1 = 0xBC9862;
i2 = n - 7;
i3 = (m & 0xF) - 7;
if (i2 < 0)
i2 = 1 - i2;
if (i3 < 0)
i3 = 1 - i3;
if (i3 > i2)
i2 = i3;
k = 196 - random(32) + i2 % 3 * 32;
}
else if (random(2) == 0)
k = k * (150 - (n & 0x1) * 100) / 100;
}
if (j == 5)
{
i1 = 0xB53A15;
if (((n + m / 4 * 4) % 8 == 0) || (m % 4 == 0))
i1 = 0xBCAFA5;
}
i2 = k;
if (m >= 32)
i2 /= 2;
if (j == 8)
{
i1 = 5298487;
if (random(2) == 0)
{
i1 = 0;
i2 = 255;
}
}
// fixed point colour multiply between i1 and i2
i3 =
((((i1 >> 16) & 0xFF) * i2 / 255) << 16) |
((((i1 >> 8) & 0xFF) * i2 / 255) << 8) |
((i1 & 0xFF) * i2 / 255);
// pack the colour away
texmap[n + m * 16 + j * 256 * 3] = i3;
}
}
}
static void makeMap(void) {
// add random blocks to the map
for (int x = 0; x < 64; x++) {
for (int y = 0; y < 64; y++) {
for (int z = 0; z < 64; z++) {
int i = (z << 12) | (y << 6) | x;
float yd = (y - 32.5) * 0.4;
float zd = (z - 32.5) * 0.4;
map[i] = random(16);
float th = random(256) / 256.0f;
if (th > sqrtf(sqrtf(yd * yd + zd * zd)) - 0.8f)
map[i] = 0;
}
}
}
}
static void init(void) {
makeTextures();
makeMap();
}
// fixed point byte byte multiply
static inline int fxmul(int a, int b) {
return (a*b) >> 8;
}
// fixed point 8bit packed colour multiply
static inline int rgbmul(int a, int b) {
int _r = (((a >> 16) & 0xff) * b) >> 8;
int _g = (((a >> 8) & 0xff) * b) >> 8;
int _b = (((a)& 0xff) * b) >> 8;
return (_r << 16) | (_g << 8) | _b;
}
static void render(void) {
float now = (float)(SDL_GetTicks() % 10000) / 10000.f;
float xRot = math_sin(now * math_pi * 2) * 0.4 + math_pi / 2;
float yRot = math_cos(now * math_pi * 2) * 0.4;
float yCos = math_cos(yRot);
float ySin = math_sin(yRot);
float xCos = math_cos(xRot);
float xSin = math_sin(xRot);
float ox = 32.5 + now * 64.0;
float oy = 32.5;
float oz = 32.5;
// for each column
for (int x = 0; x < w; x++) {
// get the x axis delta
float ___xd = ((float)x - (float)w / 2.f) / (float)h;
// for each row
for (int y = 0; y < h; y++) {
// get the y axis delta
float __yd = ((float)y - (float)h / 2.f) / (float)h;
float __zd = 1;
float ___zd = __zd * yCos + __yd * ySin;
float _yd = __yd * yCos - __zd * ySin;
float _xd = ___xd * xCos + ___zd * xSin;
float _zd = ___zd * xCos - ___xd * xSin;
int col = 0;
int br = 255;
float ddist = 0;
float closest = 32.f;
// for each principle axis x,y,z
for (int d = 0; d < 3; d++) {
float dimLength = _xd;
if (d == 1)
dimLength = _yd;
if (d == 2)
dimLength = _zd;
float ll = 1.0f / (dimLength < 0.f ? -dimLength : dimLength);
float xd = (_xd)* ll;
float yd = (_yd)* ll;
float zd = (_zd)* ll;
float initial = ox - floor(ox);
if (d == 1) initial = oy - floor(oy);
if (d == 2) initial = oz - floor(oz);
if (dimLength > 0) initial = 1 - initial;
float dist = ll * initial;
float xp = ox + xd * initial;
float yp = oy + yd * initial;
float zp = oz + zd * initial;
if (dimLength < 0) {
if (d == 0) xp--;
if (d == 1) yp--;
if (d == 2) zp--;
}
// while we are concidering a ray that is still closer then the best so far
while (dist < closest) {
// quantize to the map grid
int tex = map[(((int)zp & 63) << 12) | (((int)yp & 63) << 6) | ((int)xp & 63)];
// if this voxel has a texture applied
if (tex > 0) {
// find the uv coordinates of the intersection point
int u = ((int)((xp + zp) * 16.f)) & 15;
int v = ((int)(yp * 16.f) & 15) + 16;
// fix uvs for alternate directions?
if (d == 1) {
u = ((int)(xp * 16.f)) & 15;
v = (((int)(zp * 16.f)) & 15);
if (yd < 0)
v += 32;
}
// find the colour at the intersection point
int cc = texmap[u + v * 16 + tex * 256 * 3];
// if the colour is not transparent
if (cc > 0) {
col = cc;
ddist = 255 - ((dist / 32 * 255));
br = 255 * (255 - ((d + 2) % 3) * 50) / 255;
// we now have the closest hit point (also terminates this ray)
closest = dist;
}
}
// advance the ray
xp += xd;
yp += yd;
zp += zd;
dist += ll;
}
}
plot(x, y, rgbmul(col, fxmul(br, ddist)));
}
}
}
int main(int argc, char *argv[]) {
SDL_Init(SDL_INIT_EVERYTHING);
SDL_Window *screen;
screen = SDL_CreateWindow(
"Minecraft4k", // window title
SDL_WINDOWPOS_CENTERED, // initial x position
SDL_WINDOWPOS_CENTERED, // initial y position
320, // width, in pixels
240, // height, in pixels
SDL_WINDOW_OPENGL // flags - see below
);
SDL_Renderer* renderer;
renderer = SDL_CreateRenderer(screen, -1, SDL_RENDERER_ACCELERATED);
if (screen == nullptr) {
return 1;
}
init();
bool running = true;
while (running) {
SDL_Event event;
while (SDL_PollEvent(&event)) {
running &= (event.type != SDL_QUIT);
}
SDL_RenderPresent(renderer);
render();
}
SDL_DestroyWindow(screen);
SDL_Quit();
return 0;
}
When I actually run the code I do get a black screen, but the debugger lands on the line
plot(x, y, rgbmul(col, fxmul(br, ddist)));
in ;
static void render(void)
This is all just "for fun" so any information or guidance is appreciated.
You define screen twice (the first time as a global variable, the second time within your main), but you initialize it only once (within your main).
Because of that, the global variable screen actually is set to nullptr and plot fails trying to use it, as the error message states.

Get the side a player is looking on the block | bukkit

It is possible to get the Block the Player actually is looking at.
p.getTargetBlock()
But is there a way to get the side of the block that the player is looking at?
(top, down, north, south, east, west side of the block)
I haven't found a function that will do that. Does it give me something, that I can use to find this side?
I have implemented a Utility method for achieving this given the AxisAlignedBB (NMS) of the Block the player is targeting. This block can be obtained by Player#getTargetBlock() as mentioned in the question or with a custom raycast algorithm.
public static BlockFace blockFaceCollide(Vector startLocation, Vector direction, AxisAlignedBB objectBoundry){
double constant = Double.MAX_VALUE;
BlockFace blockFace = null;
double directionX = direction.getX();
double directionY = direction.getY();
double directionZ = direction.getZ();
if(directionY > 0){
double b = objectBoundry.b - startLocation.getY();
double tempConstant = b / directionY;
if(tempConstant > 0 && tempConstant < constant){
double xAtCollide = tempConstant * directionX + startLocation.getX();
double zAtCollide = tempConstant * directionZ + startLocation.getZ();
if (between(xAtCollide, objectBoundry.a, objectBoundry.d, 0)
&& between(zAtCollide, objectBoundry.c, objectBoundry.f, 0)) {
constant = tempConstant;
blockFace = BlockFace.DOWN;
}
}
}
else {
double e = objectBoundry.e - startLocation.getY();
double tempConstant = e / directionY;
if (tempConstant > 0 && tempConstant < constant) {
double xAtCollide = tempConstant * directionX + startLocation.getX();
double zAtCollide = tempConstant * directionZ + startLocation.getZ();
if (between(xAtCollide, objectBoundry.a, objectBoundry.d, 0)
&& between(zAtCollide, objectBoundry.c, objectBoundry.f, 0)) {
constant = tempConstant;
blockFace = BlockFace.UP;
}
}
}
if(directionX < 0) {
double d = objectBoundry.d - startLocation.getX();
double tempConstant = d / directionX;
if (tempConstant > 0 && tempConstant < constant) {
double yAtCollide = tempConstant * directionY + startLocation.getY();
double zAtCollide = tempConstant * directionZ + startLocation.getZ();
if (between(yAtCollide, objectBoundry.b, objectBoundry.e, 0)
&& between(zAtCollide, objectBoundry.c, objectBoundry.f, 0)) {
constant = tempConstant;
blockFace = BlockFace.EAST;
}
}
}
else {
double a = objectBoundry.a - startLocation.getX();
double tempConstant = a / directionX;
if (tempConstant > 0 && tempConstant < constant) {
double yAtCollide = tempConstant * directionY + startLocation.getY();
double zAtCollide = tempConstant * directionZ + startLocation.getZ();
if (between(yAtCollide, objectBoundry.b, objectBoundry.e, 0)
&& between(zAtCollide, objectBoundry.c, objectBoundry.f, 0)) {
constant = tempConstant;
blockFace = BlockFace.WEST;
}
}
}
if(directionZ > 0) {
double c = objectBoundry.c - startLocation.getZ();
double tempConstant = c / directionZ;
if(tempConstant > 0 && tempConstant < constant) {
double yAtCollide = tempConstant * directionY + startLocation.getY();
double xAtCollide = tempConstant * directionX + startLocation.getX();
if (between(yAtCollide, objectBoundry.b, objectBoundry.e, 0)
&& between(xAtCollide, objectBoundry.a, objectBoundry.d, 0)) {
blockFace = BlockFace.NORTH;
}
}
}
else {
double f = objectBoundry.f - startLocation.getZ();
double tempConstant = f / directionZ;
if(tempConstant < constant) {
double yAtCollide = tempConstant * directionY + startLocation.getY();
double xAtCollide = tempConstant * directionX + startLocation.getX();
if (between(yAtCollide, objectBoundry.b, objectBoundry.e, 0)
&& between(xAtCollide, objectBoundry.a, objectBoundry.d, 0)) {
blockFace = BlockFace.SOUTH;
}
}
}
return blockFace;
}
public static boolean between(double num, double a, double b, double EOF) {
if (a <= b)
return num + EOF >= a && num - EOF <= b;
return num + EOF >= b && num - EOF <= a;
}
I don't think there is way to get the block they are looking at, however you can get the block face that they click by using PlayerInteractEvent and its getBlockFace() method.
EDIT: Check out this thread on the bukkit forums.
EDIT 2: Or as mentioned in this thread, you can try putting this in a PlayerMoveEvent:
List<Block> blocks = event.getPlayer().getLastTwoTargetBlocks(null, 10);
if (blocks.size() > 1)
face = blocks.get(1).getFace(blocks.get(0));

Sunrise and sunset times based on coordinates and altitude

I am using this code for calculating sunrise and sunset times.
// Get the daylight status of the current time.
bool
SunLight::CalculateDaylightStatus()
{
// Calculate the current time of day.
time_t currentTime = time(NULL);
m_LocalTime = localtime(&currentTime);
// Initialize the sunrise and set times.
*m_Sunrise = *m_LocalTime;
*m_Sunset = *m_LocalTime;
// Flags to check whether sunrise or set available on the day or not.
m_IsSunrise = false;
m_IsSunset = false;
m_RiseAzimuth = 0.0;
m_SetAzimuth = 0.0;
for (unsigned int i = 0; i < 3; i++)
{
m_RightAscention[i] = 0.0;
m_Decension[i] = 0.0;
m_VHz[i] = 0.0;
}
for (unsigned int i = 0; i < 2; i++)
{
m_SunPositionInSky[i] = 0.0;
m_RiseTime[i] = 0;
m_SetTime[i] = 0;
}
// Calculate the sunrise and set times.
CalculateSunRiseSetTimes();
return (mktime(m_LocalTime) >= mktime(m_Sunrise) && mktime(m_LocalTime) < mktime(m_Sunset))
? true
: false;
}
//---------------------------------------------------------------------
bool
SunLight::CalculateSunRiseSetTimes()
{
double zone = timezone/3600 - m_LocalTime->tm_isdst;
// Julian day relative to Jan 1.5, 2000.
double jd = GetJulianDay() - 2451545;
if ((Sign(zone) == Sign(m_Config->Longitude())) && (zone != 0))
{
return false;
}
double tz = zone / 24;
// Centuries since 1900.0
double ct = jd / 36525 + 1;
// Local sidereal time.
double t0 = LocalSiderealTimeForTimeZone(jd, tz, m_Config->Longitude()/360);
// Get sun position at start of day.
jd += tz;
// Calculate the position of the sun.
CalculateSunPosition(jd, ct);
double ra0 = m_SunPositionInSky[0];
double dec0 = m_SunPositionInSky[1];
// Get sun position at end of day.
jd += 1;
// Calculate the position of the sun.
CalculateSunPosition(jd, ct);
double ra1 = m_SunPositionInSky[0];
double dec1 = m_SunPositionInSky[1];
// make continuous
if (ra1 < ra0)
ra1 += 2 * M_PI;
m_RightAscention[0] = ra0;
m_Decension[0] = dec0;
// check each hour of this day
for (int k = 0; k < 24; k++)
{
m_RightAscention[2] = ra0 + (k + 1) * (ra1 - ra0) / 24;
m_Decension[2] = dec0 + (k + 1) * (dec1 - dec0) / 24;
m_VHz[2] = TestHour(k, t0, m_Config->Latitude());
// advance to next hour
m_RightAscention[0] = m_RightAscention[2];
m_Decension[0] = m_Decension[2];
m_VHz[0] = m_VHz[2];
}
// Update the tm structure with time values.
m_Sunrise->tm_hour = m_RiseTime[0];
m_Sunrise->tm_min = m_RiseTime[1];
m_Sunset->tm_hour = m_SetTime[0];
m_Sunset->tm_min = m_SetTime[1];
// neither sunrise nor sunset
if ((!m_IsSunrise) && (!m_IsSunset))
{
// Sun down all day.
if (m_VHz[2] < 0)
m_IsSunset = true;
// Sun up all day.
else
m_IsSunrise = true;
}
return true;
}
//---------------------------------------------------------------------
int
SunLight::Sign(double value)
{
if (value > 0.0)
return 1;
else if (value < 0.0)
return -1;
else
return 0;
}
//---------------------------------------------------------------------
// Local Sidereal Time for zone.
double
SunLight::LocalSiderealTimeForTimeZone(double jd, double z, double lon)
{
double s = 24110.5 + 8640184.812999999 * jd / 36525 + 86636.6 * z + 86400 * lon;
s = s / 86400;
s = s - floor(s);
return s * 360 * cDegToRad;
}
//---------------------------------------------------------------------
// Determine Julian day from calendar date
// (Jean Meeus, "Astronomical Algorithms", Willmann-Bell, 1991).
double
SunLight::GetJulianDay()
{
int month = m_LocalTime->tm_mon + 1;
int day = m_LocalTime->tm_mday;
int year = 1900 + m_LocalTime->tm_year;
bool gregorian = (year < 1583) ? false : true;
if ((month == 1) || (month == 2))
{
year = year - 1;
month = month + 12;
}
double a = floor((double)year / 100);
double b = 0;
if (gregorian)
b = 2 - a + floor(a / 4);
else
b = 0.0;
double jd = floor(365.25 * (year + 4716))
+ floor(30.6001 * (month + 1))
+ day + b - 1524.5;
return jd;
}
//---------------------------------------------------------------------
// Sun's position using fundamental arguments
// (Van Flandern & Pulkkinen, 1979).
void
SunLight::CalculateSunPosition(double jd, double ct)
{
double g, lo, s, u, v, w;
lo = 0.779072 + 0.00273790931 * jd;
lo = lo - floor(lo);
lo = lo * 2 * M_PI;
g = 0.993126 + 0.0027377785 * jd;
g = g - floor(g);
g = g * 2 * M_PI;
v = 0.39785 * sin(lo);
v = v - 0.01 * sin(lo - g);
v = v + 0.00333 * sin(lo + g);
v = v - 0.00021 * ct * sin(lo);
u = 1 - 0.03349 * cos(g);
u = u - 0.00014 * cos(2 * lo);
u = u + 0.00008 * cos(lo);
w = -0.0001 - 0.04129 * sin(2 * lo);
w = w + 0.03211 * sin(g);
w = w + 0.00104 * sin(2 * lo - g);
w = w - 0.00035 * sin(2 * lo + g);
w = w - 0.00008 * ct * sin(g);
// compute sun's right ascension
s = w / sqrt(u - v * v);
m_SunPositionInSky[0] = lo + atan(s / sqrt(1 - s * s));
// ...and declination
s = v / sqrt(u);
m_SunPositionInSky[1] = atan(s / sqrt(1 - s * s));
}
//---------------------------------------------------------------------
// Test an hour for an event.
double
SunLight::TestHour(int k, double t0, double prmLatitude)
{
double ha[3];
double a, b, c, d, e, s, z;
double time;
double az, dz, hz, nz;
int hr, min;
ha[0] = t0 - m_RightAscention[0] + k * cK1;
ha[2] = t0 - m_RightAscention[2] + k * cK1 + cK1;
ha[1] = (ha[2] + ha[0]) / 2; // hour angle at half hour
m_Decension[1] = (m_Decension[2] + m_Decension[0]) / 2; // declination at half hour
s = sin(prmLatitude * cDegToRad);
c = cos(prmLatitude * cDegToRad);
z = cos(90.833 * cDegToRad); // refraction + sun semi-diameter at horizon
if (k <= 0)
m_VHz[0] = s * sin(m_Decension[0]) + c * cos(m_Decension[0]) * cos(ha[0]) - z;
m_VHz[2] = s * sin(m_Decension[2]) + c * cos(m_Decension[2]) * cos(ha[2]) - z;
if (Sign(m_VHz[0]) == Sign(m_VHz[2]))
return m_VHz[2]; // no event this hour
m_VHz[1] = s * sin(m_Decension[1]) + c * cos(m_Decension[1]) * cos(ha[1]) - z;
a = 2 * m_VHz[0] - 4 * m_VHz[1] + 2 * m_VHz[2];
b = -3 * m_VHz[0] + 4 * m_VHz[1] - m_VHz[2];
d = b * b - 4 * a * m_VHz[0];
if (d < 0)
return m_VHz[2]; // no event this hour
d = sqrt(d);
e = (-b + d) / (2 * a);
if ((e > 1) || (e < 0))
e = (-b - d) / (2 * a);
time = (double)k + e + (double)1 / (double)120; // time of an event
hr = (int)floor(time);
min = (int)floor((time - hr) * 60);
hz = ha[0] + e * (ha[2] - ha[0]); // azimuth of the sun at the event
nz = -cos(m_Decension[1]) * sin(hz);
dz = c * sin(m_Decension[1]) - s * cos(m_Decension[1]) * cos(hz);
az = atan2(nz, dz) / cDegToRad;
if (az < 0) az = az + 360;
if ((m_VHz[0] < 0) && (m_VHz[2] > 0))
{
m_RiseTime[0] = hr;
m_RiseTime[1] = min;
m_RiseAzimuth = az;
m_IsSunrise = true;
}
if ((m_VHz[0] > 0) && (m_VHz[2] < 0))
{
m_SetTime[0] = hr;
m_SetTime[1] = min;
m_SetAzimuth = az;
m_IsSunset = true;
}
return m_VHz[2];
}
//---------------------------------------------------------------------
I need to introduce altitude in the formula which gives more accurate result. Can someone give me a quick solution what I have to modify to add altitude in the formula?
That algorithm is nowhere near calculating the times of sunrise and sunset. What you need is Jean Meeus' book "Astronomical Algorithms". You will need to account for the observer's longitude and latitude, the difference between dynamical time and universal time, and the eccentricity of the Earth's orbit to obtain even a low accuracy result.
This seems to be called sunrise equation. The formulas in that Wiki article are unbelievably simple, and they do account for the geographic location.

how to convert image in to matrix using opencv?

I am trying to make a program in OpenCV to convert an image into matrix form, with each value representing an image's pixel. I have converted the image into binary form and now I want to convert it's pixel values into a matrix.
If You need to use CvMat object, You may want to try to use cvCopy function. It takes CvArr* as its arguments, so both IPLImage and CvMat will fit. If You would leave the C API and go to something more modern, You can use cv::Mat object to load image into and use C++ threshold.
The question is why do You want to convert the format of matrix that you already have (IPLImage as well as all others are matrices already). If You want to have a matrix of bool type, use Matx or Mat_ template class for this.
First glance at your question raises more questions... try to specify a bit (I don't seem to be able to see your code example, I'm new to stackoverflow)
Such as, your open cv version and IDE (like codeblocks or Microsoft Visual Studio). But include it in your question. What I would also like to know, is what is the purpose of this? Why do you need a matrix and so forth :)
attempted answer
from what I can gather
"but I have installed OpenCV version 2.3.1 on Visual C++ 2010 – Ayesha Khan"
OpenCV uses the class called Mat, which you should have encountered a lot. This class is essentially a matrix already. If I remember correctly it is very similar to vectors, which I won't cover here.
so if you need to access any pixel value in, lets say.
Mat Img;
you would use a function in this instance of the class, as such
cout << Img.at<uchar>(x,y);
This will access and print the value of the pixel with the coordinates of x,y, to console. In this example I use uchar inside the pointy brackets <>. uchar is used for 8bit picures. You will have to change this if you work with images of more detail (more bits).
When using a binary picture, OpenCV will most likely will allocate the memory of 8bit, which means you need the example above.
I'd like to give more details, but not before you've specified what exactly it is that you are attempting to do.
Regards Scrub # Stackoverflow
Your code uses OpenCV version 1. I'll let someone else answer, since it's not my forte. In my opinion, the 2.0 template-based interface is much more intuitive, and it's my recommendation to use it for all new endeavors.
Have a look at the way I use imread() in this program...
Please inspect the type of value returned from imread()...
Also, search in the code for originalColor = imageArg(/*row*/chosenX, /*column*/chosenY); It's a way to index into the matrix returned from imread
// HW1 Intro to Digital Image Processing
// used OpenCV 2.3.1 and VS2010 SP1 to develop this solution
#include "opencv2/core/core.hpp"
#include "opencv2/highgui/highgui.hpp"
#include <iostream>
#include <cassert>
using namespace cv;
Mat_<Vec3b> image;
int discreteAngles = 512;
void on_mouse(int eventCode, int centerX, int centerY, int flags, void* params);
int str2int(const std::string &str);
int main(int argc, char* argv[])
{
// command itself is one element of argument array...
if(argc != 1 && argc != 3)
{
std::cout << "Expecting two arguments to the application: angular granularity as a whole number and a file name." << std::endl;
exit(0);
}
std::string discreteAnglesStr, fileName;
if(argc == 3)
{
discreteAnglesStr = argv[1];
fileName = argv[2];
}
else
{
discreteAnglesStr = "64";
fileName = "boats.tif";
}
try
{
discreteAngles = str2int(discreteAnglesStr);
auto image_ = imread(fileName);
int channels = image_.channels();
assert(channels == 3);
image = image_;
if(image.rows == 0)
throw new std::exception();
auto originalImageStr = "Original Image";
namedWindow(originalImageStr);
setMouseCallback(originalImageStr, on_mouse);
imshow(originalImageStr, image);
}
catch(std::exception e)
{
std::cout << "could not load image." << std::endl;
}
waitKey(0);
return -1;
}
// borrowed from http://stackoverflow.com/q/194465/90475, courtesy of Luka Marinko
int str2int(const std::string &str)
{
std::stringstream ss(str);
int num;
if((ss >> num).fail())
{
throw new std::exception("could not parse user input!");
}
return num;
}
double compute_max_madius(int imageRows, int imageCols, int centerX, int centerY)
{
auto otherX = imageCols - centerX;
auto otherY = imageRows - centerY;
auto a = sqrt((double)centerX * centerX + centerY * centerY);
auto b = sqrt((double)otherX * otherX + centerY * centerY);
auto c = sqrt((double)centerX * centerX + otherY * otherY);
auto d = sqrt((double)otherX * otherX + otherY * otherY);
return max(max(a,b), max(c,d));
}
Vec3b interpolate_with_nearest(const Mat_<Vec3b>& imageArg, double x, double y)
{
auto x0 = static_cast<int>(floor(x)); auto y0 = static_cast<int>(floor(y));
auto x1 = static_cast<int>(ceil(x)); auto y1 = static_cast<int>(ceil(y));
// Rolls over to the other side, esp. for angles
if(x0 < 0) x0 = imageArg.rows - 1;
if(y0 < 0) y0 = imageArg.cols - 1;
if (x1 == imageArg.rows) x1 = 0;
if (y1 == imageArg.cols) y1 = 0;
int chosenX, chosenY;
if (x - x0 < 0.5) chosenX = x0; else chosenX = x1;
if (y - y0 < 0.5) chosenY = y0; else chosenY = y1;
Vec3b originalColor = Vec3b(0, 0, 0);
if (chosenX >= 0 && chosenX < imageArg.rows &&
chosenY >= 0 && chosenY < imageArg.cols)
{
originalColor = imageArg(/*row*/chosenX, /*column*/chosenY);
}
return originalColor;
}
Vec3b interpolate_with_bilinear(const Mat_<Vec3b>& imageArg, double x, double y)
{
auto x0 = static_cast<int>(floor(x)); auto y0 = static_cast<int>(floor(y));
auto x1 = static_cast<int>(ceil(x)); auto y1 = static_cast<int>(ceil(y));
// Rolls over to the other side, esp. for angles
if(x0 < 0) x0 = imageArg.rows - 1;
if(y0 < 0) y0 = imageArg.cols - 1;
if (x1 == imageArg.rows) x1 = 0;
if (y1 == imageArg.cols) y1 = 0;
if (!(
x0 >= 0 && x0 < imageArg.rows &&
x1 >= 0 && x1 < imageArg.rows &&
y0 >= 0 && y0 < imageArg.cols &&
y1 >= 0 && y1 < imageArg.cols))
return Vec3b(0, 0, 0);
auto f00 = imageArg(x0, y0);
auto f01 = imageArg(x0, y1);
auto f10 = imageArg(x1, y0);
auto f11 = imageArg(x1, y1);
auto b1 = f00;
auto b2 = f10 - f00;
auto b3 = f01 - f00;
auto b4 = f00 + f11 - f01 - f10;
x = x - x0;
y = y - y0;
return b1 + b2 * x + b3 * y + b4 * x * y;
}
void on_mouse(int eventCode, int centerX, int centerY, int flags, void* params)
{
if(eventCode == 0)
return;
switch( eventCode )
{
case CV_EVENT_LBUTTONDOWN:
{
std::cout << "Center was (" << centerX << ", " << centerY << ")" << std::endl;
auto maxRadiusXY = compute_max_madius(image.rows, image.cols, centerX, centerY);
int discreteRadii = static_cast<int>(floor(maxRadiusXY));
Mat_<Vec3b> polarImg1;
polarImg1.create(/*rows*/discreteRadii, /*cols*/discreteAngles);
Mat_<Vec3b> polarImg2;
polarImg2.create(/*rows*/discreteRadii, /*cols*/discreteAngles);
for (int radius = 0; radius < discreteRadii; radius++) // radii
{
for (int discreteAngle = 0; discreteAngle < discreteAngles; discreteAngle++) // discreteAngles
{
// 3
auto angleRad = discreteAngle * 2.0 * CV_PI / discreteAngles;
// 2
auto xTranslated = cos(angleRad) * radius;
auto yTranslated = sin(angleRad) * radius;
// 1
auto x = centerX + xTranslated;
auto y = centerY - yTranslated;
polarImg1(/*row*/ radius, /*column*/ discreteAngle) = interpolate_with_nearest(image, /*row*/y, /*column*/x);
polarImg2(/*row*/ radius, /*column*/ discreteAngle) = interpolate_with_bilinear(image, /*row*/y, /*column*/x);
}
}
auto polarImage1Str = "Polar (nearest)";
namedWindow(polarImage1Str);
imshow(polarImage1Str, polarImg1);
auto polarImage2Str = "Polar (bilinear)";
namedWindow(polarImage2Str);
imshow(polarImage2Str, polarImg2);
Mat_<Vec3b> reprocessedImg1;
reprocessedImg1.create(Size(image.rows, image.cols));
Mat_<Vec3b> reprocessedImg2;
reprocessedImg2.create(Size(image.rows, image.cols));
for(int y = 0; y < image.rows; y++)
{
for(int x = 0; x < image.cols; x++)
{
// 1
auto xTranslated = x - centerX;
auto yTranslated = -(y - centerY);
// 2
auto radius = sqrt((double)xTranslated * xTranslated + yTranslated * yTranslated);
double angleRad;
if(xTranslated != 0)
{
angleRad = atan((double)abs(yTranslated) / abs(xTranslated));
// I Quadrant
if (xTranslated > 0 && yTranslated > 0)
angleRad = angleRad;
// II Quadrant
if (xTranslated < 0 && yTranslated > 0)
angleRad = CV_PI - angleRad;
// III Quadrant
if (xTranslated < 0 && yTranslated < 0)
angleRad = CV_PI + angleRad;
/// IV Quadrant
if (xTranslated > 0 && yTranslated < 0)
angleRad = 2 * CV_PI - angleRad;
if (yTranslated == 0)
if (xTranslated > 0) angleRad = 0;
else angleRad = CV_PI;
}
else
{
if (yTranslated > 0) angleRad = CV_PI / 2;
else angleRad = 3 * CV_PI / 2;
}
// 3
auto discreteAngle = angleRad * discreteAngles / (2.0 * CV_PI);
reprocessedImg1(/*row*/ y, /*column*/ x) = interpolate_with_nearest(polarImg1, /*row*/radius, /*column*/discreteAngle);
reprocessedImg2(/*row*/ y, /*column*/ x) = interpolate_with_bilinear(polarImg2, /*row*/radius, /*column*/discreteAngle);
}
}
auto reprocessedImg1Str = "Re-processed (nearest)";
namedWindow(reprocessedImg1Str);
imshow(reprocessedImg1Str, reprocessedImg1);
auto reprocessedImg2Str = "Re-processed (bilinear)";
namedWindow(reprocessedImg2Str);
imshow(reprocessedImg2Str, reprocessedImg2);
} break;
}
}