OpenGL screen coordinates to world coordinates - c++

I am trying to get world Coordinates from screen coordinates with OpenGl. I made some research but values returned are uncoherent. In fact in my world I have a surface at Z=0 and I want to have the 3d coordinates of a point in that surface so with Z=0. Here my research :
float cursZ = 1;
long posX, posY, posZ = 0;
int viewport[4];
viewport[0] = Originex;
viewport[1] = Originey;
viewport[2] = largeFen;
viewport[3] = hautFen;
CMatrix4 modelView;
CMatrix4 proj;
CMatrix4 A;
CMatrix4 out;
typeCoorDepl proche = 100.;
typeCoorDepl loin = largeFen + hautFen;
Renderer.GetMatrix(MAT_MODELVIEW, modelView);
Renderer.GetMatrix(MAT_PROJECTION, proj);
modelView.LookAt(TVector3T(sendAngleX, angleY, SendAngleZ), TVector3T(0, 0, 0));
switch (modeVue)// get MAT_PROJECTION
{
case 1: // mode isometrique rotation centree sur l'ecran
proj.OrthoOffCenter(-(largeFen / 2.0f), (hautFen / 2.0f), (largeFen / 2.0f), -(hautFen / 2.0f), proche, loin);
break;
case 2: // mode conique rotation centree sur l'ecran
proj.PerspectiveFOV((70.0*pi / 180) / 2, largeFen / hautFen, proche, loin);
break;
case 3: // mode isometrique rotation centree sur l'ecran
proj.OrthoOffCenter(-(largeFen / 2.0f), (hautFen / 2.0f), (largeFen / 2.0f), -(hautFen / 2.0f), proche, loin);
break;
case 4: // mode conique rotation centree sur l'objet et non sur l'ecran
proj.PerspectiveFOV((70.0*pi / 180), largeFen / hautFen, proche, loin);
break;
default:
break;
}
cursY = hautFen - cursY; // windows start from top-left as openGL from bottom-left
double posx, posy, posz;
float m[16];//, A[16];
float in[4];//, out[4];
A = proj*modelView;
A.Inverse();
in[0] = cursX;
in[1] = cursY;
in[2] = cursZ;
in[3] = 1.0;
/* Map x and y from window coordinates */
in[0] = (in[0] - viewport[0]) / viewport[2];
in[1] = (in[1] - viewport[1]) / viewport[3];
/* Map to range -1 to 1 */
in[0] = in[0] * 2 - 1;
in[1] = in[1] * 2 - 1;
in[2] = in[2] * 2 - 1;
CMatrix4 forTestIn;
forTestIn.a11 = in[0];
forTestIn.a12 = in[1];
forTestIn.a13 = in[2];
forTestIn.a14 = in[3];
out = A*forTestIn;
out.a11 /= out.a14;
out.a12 /= out.a14;
out.a13 /= out.a14;
*finPosX = out.a11;
*finPosY = out.a12;
*finPosZ = out.a13;
Here my OrthoOffCenter and PerspectiveFOV function :
inline void CMatrix4::OrthoOffCenter(typeCoorDepl Left, typeCoorDepl Top, typeCoorDepl Right, typeCoorDepl Bottom, typeCoorDepl Near, typeCoorDepl Far)
{
a11 = 2 / (Right - Left); a12 = 0.0f; a13 = 0.0f; a14 = (Left + Right) / (Left - Right);
a21 = 0.0f; a22 = 2 / (Top - Bottom); a23 = 0.0f; a24 = (Bottom + Top) / (Bottom - Top);
// a31 = 0.0f; a32 = 0.0f; a33 = -2/(Far - Near); a34 = (Near + Far) / (Far - Near);
// substitution Far/Near cause pb depth buffer
a31 = 0.0f; a32 = 0.0f; a33 = -2/(Near - Far); a34 = (Far + Near) / (Near - Far);
a41 = 0.0f; a42 = 0.0f; a43 = 0.0f; a44 = 1.0f;
}
inline void CMatrix4::PerspectiveFOV(typeCoorDepl Fov, typeCoorDepl Ratio, typeCoorDepl Near, typeCoorDepl Far)
{
typeCoorDepl YScale = 1.0f / std::tan(Fov / 2);
typeCoorDepl XScale = YScale / Ratio;
typeCoorDepl Coeff = Far / (Far - Near);
a11 = XScale; a12 = 0.0f; a13 = 0.0f; a14 = 0.0f;
a21 = 0.0f; a22 = YScale; a23 = 0.0f; a24 = 0.0f;
a31 = 0.0f; a32 = 0.0f; a33 = Coeff; a34 = Near * -Coeff;
a41 = 0.0f; a42 = 0.0f; a43 = 1.0f; a44 = 0.0f;
// nb : agir sur a14, a24 déplace le centre "de rotation"
}

If you draw something on the viewport the following transformations are performed:
Transformation by the modelview matrix (which may consists of a separated model matrix and view matrix).
Transformation by the projection matrix (regardless of whether you have perspective or orthographic projection).
After this transformations the coordinates are in the Normalized Device Coordinates space (NDC). The NDC is in a range from (-1,-1,-1) to (1,1,1).
If you want to convert back a point from the Normalized Device Coordinates (NDC) to the world space you have to do the exactly reverse transformations:
Transform back by the inverse projection matrix.
Transform back by the inverse modelview matrix.
See the answers to the Stack Overflow Question inverting a 4x4 matrix to see how to calculate a inverse matrix:
template < typename T_MD, typename T_MS = T_MD, typename T_SCALAR = float >
void mat44_inverse( T_MD &res, const T_MS &m )
{
T_SCALAR Coef00 = (T_SCALAR)(m[2][2] * m[3][3] - m[3][2] * m[2][3]);
T_SCALAR Coef02 = (T_SCALAR)(m[1][2] * m[3][3] - m[3][2] * m[1][3]);
T_SCALAR Coef03 = (T_SCALAR)(m[1][2] * m[2][3] - m[2][2] * m[1][3]);
T_SCALAR Coef04 = (T_SCALAR)(m[2][1] * m[3][3] - m[3][1] * m[2][3]);
T_SCALAR Coef06 = (T_SCALAR)(m[1][1] * m[3][3] - m[3][1] * m[1][3]);
T_SCALAR Coef07 = (T_SCALAR)(m[1][1] * m[2][3] - m[2][1] * m[1][3]);
T_SCALAR Coef08 = (T_SCALAR)(m[2][1] * m[3][2] - m[3][1] * m[2][2]);
T_SCALAR Coef10 = (T_SCALAR)(m[1][1] * m[3][2] - m[3][1] * m[1][2]);
T_SCALAR Coef11 = (T_SCALAR)(m[1][1] * m[2][2] - m[2][1] * m[1][2]);
T_SCALAR Coef12 = (T_SCALAR)(m[2][0] * m[3][3] - m[3][0] * m[2][3]);
T_SCALAR Coef14 = (T_SCALAR)(m[1][0] * m[3][3] - m[3][0] * m[1][3]);
T_SCALAR Coef15 = (T_SCALAR)(m[1][0] * m[2][3] - m[2][0] * m[1][3]);
T_SCALAR Coef16 = (T_SCALAR)(m[2][0] * m[3][2] - m[3][0] * m[2][2]);
T_SCALAR Coef18 = (T_SCALAR)(m[1][0] * m[3][2] - m[3][0] * m[1][2]);
T_SCALAR Coef19 = (T_SCALAR)(m[1][0] * m[2][2] - m[2][0] * m[1][2]);
T_SCALAR Coef20 = (T_SCALAR)(m[2][0] * m[3][1] - m[3][0] * m[2][1]);
T_SCALAR Coef22 = (T_SCALAR)(m[1][0] * m[3][1] - m[3][0] * m[1][1]);
T_SCALAR Coef23 = (T_SCALAR)(m[1][0] * m[2][1] - m[2][0] * m[1][1]);
_TMat44Helper<T_SCALAR> Fac0(Coef00, Coef00, Coef02, Coef03);
_TMat44Helper<T_SCALAR> Fac1(Coef04, Coef04, Coef06, Coef07);
_TMat44Helper<T_SCALAR> Fac2(Coef08, Coef08, Coef10, Coef11);
_TMat44Helper<T_SCALAR> Fac3(Coef12, Coef12, Coef14, Coef15);
_TMat44Helper<T_SCALAR> Fac4(Coef16, Coef16, Coef18, Coef19);
_TMat44Helper<T_SCALAR> Fac5(Coef20, Coef20, Coef22, Coef23);
_TMat44Helper<T_SCALAR> Vec0((T_SCALAR)m[1][0], (T_SCALAR)m[0][0], (T_SCALAR)m[0][0], (T_SCALAR)m[0][0]);
_TMat44Helper<T_SCALAR> Vec1((T_SCALAR)m[1][1], (T_SCALAR)m[0][1], (T_SCALAR)m[0][1], (T_SCALAR)m[0][1]);
_TMat44Helper<T_SCALAR> Vec2((T_SCALAR)m[1][2], (T_SCALAR)m[0][2], (T_SCALAR)m[0][2], (T_SCALAR)m[0][2]);
_TMat44Helper<T_SCALAR> Vec3((T_SCALAR)m[1][3], (T_SCALAR)m[0][3], (T_SCALAR)m[0][3], (T_SCALAR)m[0][3]);
_TMat44Helper<T_SCALAR> Inv0(Vec1 * Fac0 - Vec2 * Fac1 + Vec3 * Fac2);
_TMat44Helper<T_SCALAR> Inv1(Vec0 * Fac0 - Vec2 * Fac3 + Vec3 * Fac4);
_TMat44Helper<T_SCALAR> Inv2(Vec0 * Fac1 - Vec1 * Fac3 + Vec3 * Fac5);
_TMat44Helper<T_SCALAR> Inv3(Vec0 * Fac2 - Vec1 * Fac4 + Vec2 * Fac5);
_TMat44Helper<T_SCALAR> SignA((T_SCALAR)+1.0, (T_SCALAR)-1.0, (T_SCALAR)+1.0, (T_SCALAR)-1.0);
_TMat44Helper<T_SCALAR> SignB((T_SCALAR)-1.0, (T_SCALAR)+1.0, (T_SCALAR)-1.0, (T_SCALAR)+1.0);
std::array< _TMat44Helper<T_SCALAR>, 4 > Inverse{ Inv0 * SignA, Inv1 * SignB, Inv2 * SignA, Inv3 * SignB };
_TMat44Helper<T_SCALAR> Row0(Inverse[0][0], Inverse[1][0], Inverse[2][0], Inverse[3][0]);
_TMat44Helper<T_SCALAR> Dot0( Row0 );
Dot0 = Dot0 * _TMat44Helper<T_SCALAR>( (T_SCALAR)m[0][0], (T_SCALAR)m[0][1], (T_SCALAR)m[0][2], (T_SCALAR)m[0][3] );
T_SCALAR Dot1 = (Dot0[0] + Dot0[1]) + (Dot0[2] + Dot0[3]);
T_SCALAR OneOverDeterminant = static_cast<T_SCALAR>(1.0) / Dot1;
for ( int inx1 = 0; inx1 < 4; inx1 ++ )
{
for ( int inx2 = 0; inx2 < 4; inx2 ++ )
res[inx1][inx2] = Inverse[inx1][inx2] * OneOverDeterminant;
}
}
template< typename SCALAR_TYPE = float >
struct _TMat44Helper
{
_TMat44Helper( void ) {}
_TMat44Helper( SCALAR_TYPE x, SCALAR_TYPE y, SCALAR_TYPE z, SCALAR_TYPE w ) : m_v( { x, y, z, w } ){}
_TMat44Helper( const std::array< SCALAR_TYPE, 4 > & v ) : m_v( v ) {}
_TMat44Helper( const SCALAR_TYPE v[4] ) { std::memcpy( m_v.data(), v, 4 * sizeof( SCALAR_TYPE ) ); }
_TMat44Helper( const _TMat44Helper &src ) : m_v( src.m_v ) {}
_TMat44Helper & operator = ( const _TMat44Helper<SCALAR_TYPE> &src ) { m_v = src.m_v; return *this; }
SCALAR_TYPE & operator[](int inx) { return m_v[inx]; }
template< typename SCALAR_TYPE_OP >
_TMat44Helper<SCALAR_TYPE> operator *( SCALAR_TYPE_OP s )
{
_TMat44Helper res;
for ( int inx = 0; inx < 4; inx ++ )
res.m_v[inx] = m_v[inx] * (SCALAR_TYPE)s;
return res;
}
template< typename SCALAR_TYPE_OP >
_TMat44Helper<SCALAR_TYPE> operator +( const _TMat44Helper<SCALAR_TYPE_OP> &b )
{
_TMat44Helper<SCALAR_TYPE> res;
for ( int inx = 0; inx < 4; inx ++ )
res.m_v[inx] = m_v[inx] + (SCALAR_TYPE)b.m_v[inx];
return res;
}
template< typename SCALAR_TYPE_OP >
_TMat44Helper<SCALAR_TYPE> operator -( const _TMat44Helper<SCALAR_TYPE_OP> &b )
{
_TMat44Helper<SCALAR_TYPE> res;
for ( int inx = 0; inx < 4; inx ++ )
res.m_v[inx] = m_v[inx] - (SCALAR_TYPE)b.m_v[inx];
return res;
}
template< typename SCALAR_TYPE_OP >
_TMat44Helper<SCALAR_TYPE> operator *( const _TMat44Helper<SCALAR_TYPE_OP> &b )
{
_TMat44Helper<SCALAR_TYPE> res;
for ( int inx = 0; inx < 4; inx ++ )
res.m_v[inx] = m_v[inx] * (SCALAR_TYPE)b.m_v[inx];
return res;
}
std::array< SCALAR_TYPE, 4 > m_v;
};
To map a point from viewport (screen) coordinates to the NDC you have to map the X-coordinate and the Y-coordinate to the range (-1.0, 1.0).
The Z-coordinate is more tricky, for this you have to access the depth buffer (The depth buffer can be rendered to a texture).
Read the depth at the XY position and convert it to the NDC:
X_ndc = X_screen * 2.0 / VP_sizeX - 1.0;
Y_ndc = Y_screen * 2.0 / VP_sizeY - 1.0;
Z_ndc = 2.0 * depth - 1.0;
In your case you have only a 2D geometry and an orthographic projection, so you do not have to care about the depth, because the Z coordinate is always 0.0.
If you transform a point by the projection matrix (or the inverse projection matrix) you will get a point in a homogeneous coordinate system. To transform a point from a homogeneous coordinates system to a cartesian coordinate system you have to devide its X, Y and Z coordinate by it weight.
A general transformation function of a point in a cartesian coordinate system with a homogeneous 4*4 matrix may look like this:
using TVec3 = std::array<float, 3>;
using TVec4 = std::array<float, 4>;
using TMat4 = std::array<TVec4, 4>;
TVec3 Transform( const TVec3 &vec, const TMat4 &mat )
{
h {
vec[0] * mat[0][0] + vec[1] * mat[1][0] + vec[2] * mat[2][0] + mat[3][0],
vec[0] * mat[0][1] + vec[1] * mat[1][1] + vec[2] * mat[2][1] + mat[3][1],
vec[0] * mat[0][2] + vec[1] * mat[1][2] + vec[2] * mat[2][2] + mat[3][2],
vec[0] * mat[0][3] + vec[1] * mat[1][3] + vec[2] * mat[2][3] + mat[3][3]
};
if ( h[3] == 0.0 )
return TVec3{ 1.0e99 }; // division by zero
return TVec3{ h[0]/h[3], h[1]/h[3], h[2]/h[3] };
}
See the following WebGL example, where the color of the object, where the mouse hovers, is found by calculating back from the screen coordinates to the model coordinates.
<script type="text/javascript">
draw_vert =
"precision mediump float; \n" +
"attribute vec2 inPos; \n" +
"uniform mat4 u_projectionMat44;" +
"uniform mat4 u_modelViewMat44;" +
"void main()" +
"{" +
" vec4 viewPos = u_modelViewMat44 * vec4( inPos.xy, 0.0, 1.0 );" +
" gl_Position = u_projectionMat44 * viewPos;" +
"}";
draw_frag =
"precision mediump float; \n" +
"uniform vec3 u_color;" +
"void main()" +
"{" +
" gl_FragColor = vec4( u_color.xyz, 1.0 );" +
"}";
glArrayType = typeof Float32Array !="undefined" ? Float32Array : ( typeof WebGLFloatArray != "undefined" ? WebGLFloatArray : Array );
function IdentityMat44() {
var m = new glArrayType(16);
m[0] = 1; m[1] = 0; m[2] = 0; m[3] = 0;
m[4] = 0; m[5] = 1; m[6] = 0; m[7] = 0;
m[8] = 0; m[9] = 0; m[10] = 1; m[11] = 0;
m[12] = 0; m[13] = 0; m[14] = 0; m[15] = 1;
return m;
};
function RotateAxis(matA, angRad, axis) {
var aMap = [ [1, 2], [2, 0], [0, 1] ];
var a0 = aMap[axis][0], a1 = aMap[axis][1];
var sinAng = Math.sin(angRad), cosAng = Math.cos(angRad);
var matB = new glArrayType(16);
for ( var i = 0; i < 16; ++ i ) matB[i] = matA[i];
for ( var i = 0; i < 3; ++ i ) {
matB[a0*4+i] = matA[a0*4+i] * cosAng + matA[a1*4+i] * sinAng;
matB[a1*4+i] = matA[a0*4+i] * -sinAng + matA[a1*4+i] * cosAng;
}
return matB;
}
function Translate( matA, trans ) {
var matB = new glArrayType(16);
for ( var i = 0; i < 16; ++ i ) matB[i] = matA[i];
for ( var i = 0; i < 3; ++ i )
matB[12+i] = matA[i] * trans[0] + matA[4+i] * trans[1] + matA[8+i] * trans[2] + matA[12+i];
return matB;
}
function Scale( matA, scale ) {
var matB = new glArrayType(16);
for ( var i = 0; i < 16; ++ i ) matB[i] = matA[i];
for ( var a = 0; a < 4; ++ a )
for ( var i = 0; i < 3; ++ i )
matB[a*4+i] = matA[a*4+i] * scale[0];
return matB;
}
Ortho = function( l, r, t, b, n, f ) {
var fn = f + n;
var f_n = f - n;
var m = IdentityMat44();
m[0] = 2/(r-l); m[1] = 0; m[2] = 0; m[3] = 0;
m[4] = 0; m[5] = 2/(t-b); m[6] = 0; m[7] = 0;
m[8] = 0; m[9] = 0; m[10] = -2 / f_n; m[11] = -fn / f_n;
m[12] = 0; m[13] = 0; m[14] = 0; m[15] = 1;
return m;
}
vec4_add = function( a, b ) { return [ a[0]+b[0], a[1]+b[1], a[2]+b[2], a[3]+b[3] ]; }
vec4_sub = function( a, b ) { return [ a[0]-b[0], a[1]-b[1], a[2]-b[2], a[3]-b[3] ]; }
vec4_mul = function( a, b ) { return [ a[0]*b[0], a[1]*b[1], a[2]*b[2], a[3]*b[3] ]; }
vec4_scale = function( a, s ) { return [ a[0]*s, a[1]*s, a[2]*s, a[3]*s ]; }
mat44_inverse = function( m ) {
var Coef00 = m[2*4+2] * m[3*4+3] - m[3*4+2] * m[2*4+3];
var Coef02 = m[1*4+2] * m[3*4+3] - m[3*4+2] * m[1*4+3];
var Coef03 = m[1*4+2] * m[2*4+3] - m[2*4+2] * m[1*4+3];
var Coef04 = m[2*4+1] * m[3*4+3] - m[3*4+1] * m[2*4+3];
var Coef06 = m[1*4+1] * m[3*4+3] - m[3*4+1] * m[1*4+3];
var Coef07 = m[1*4+1] * m[2*4+3] - m[2*4+1] * m[1*4+3];
var Coef08 = m[2*4+1] * m[3*4+2] - m[3*4+1] * m[2*4+2];
var Coef10 = m[1*4+1] * m[3*4+2] - m[3*4+1] * m[1*4+2];
var Coef11 = m[1*4+1] * m[2*4+2] - m[2*4+1] * m[1*4+2];
var Coef12 = m[2*4+0] * m[3*4+3] - m[3*4+0] * m[2*4+3];
var Coef14 = m[1*4+0] * m[3*4+3] - m[3*4+0] * m[1*4+3];
var Coef15 = m[1*4+0] * m[2*4+3] - m[2*4+0] * m[1*4+3];
var Coef16 = m[2*4+0] * m[3*4+2] - m[3*4+0] * m[2*4+2];
var Coef18 = m[1*4+0] * m[3*4+2] - m[3*4+0] * m[1*4+2];
var Coef19 = m[1*4+0] * m[2*4+2] - m[2*4+0] * m[1*4+2];
var Coef20 = m[2*4+0] * m[3*4+1] - m[3*4+0] * m[2*4+1];
var Coef22 = m[1*4+0] * m[3*4+1] - m[3*4+0] * m[1*4+1];
var Coef23 = m[1*4+0] * m[2*4+1] - m[2*4+0] * m[1*4+1];
var Fac0 = [Coef00, Coef00, Coef02, Coef03];
var Fac1 = [Coef04, Coef04, Coef06, Coef07];
var Fac2 = [Coef08, Coef08, Coef10, Coef11];
var Fac3 = [Coef12, Coef12, Coef14, Coef15];
var Fac4 = [Coef16, Coef16, Coef18, Coef19];
var Fac5 = [Coef20, Coef20, Coef22, Coef23];
var Vec0 = [ m[1*4+0], m[0*4+0], m[0*4+0], m[0*4+0] ];
var Vec1 = [ m[1*4+1], m[0*4+1], m[0*4+1], m[0*4+1] ];
var Vec2 = [ m[1*4+2], m[0*4+2], m[0*4+2], m[0*4+2] ];
var Vec3 = [ m[1*4+3], m[0*4+3], m[0*4+3], m[0*4+3] ];
var Inv0 = vec4_add( vec4_sub( vec4_mul(Vec1, Fac0), vec4_mul(Vec2, Fac1) ), vec4_mul( Vec3, Fac2 ) );
var Inv1 = vec4_add( vec4_sub( vec4_mul(Vec0, Fac0), vec4_mul(Vec2, Fac3) ), vec4_mul( Vec3, Fac4 ) );
var Inv2 = vec4_add( vec4_sub( vec4_mul(Vec0, Fac1), vec4_mul(Vec1, Fac3) ), vec4_mul( Vec3, Fac5 ) );
var Inv3 = vec4_add( vec4_sub( vec4_mul(Vec0, Fac2), vec4_mul(Vec1, Fac4) ), vec4_mul( Vec2, Fac5 ) );
var SignA = [+1.0, -1.0, +1.0, -1.0];
var SignB = [-1.0, +1.0, -1.0, +1.0];
var Inverse = [ vec4_mul(Inv0, SignA), vec4_mul(Inv1, SignB), vec4_mul(Inv2, SignA), vec4_mul(Inv3, SignB) ];
var Row0 = [Inverse[0][0], Inverse[1][0], Inverse[2][0], Inverse[3][0] ];
var Dot0 = [Row0[0], Row0[1], Row0[2], Row0[3] ];
Dot0 = vec4_mul( Dot0, [ m[0], m[1], m[2], m[3] ] );
var Dot1 = (Dot0[0] + Dot0[1]) + (Dot0[2] + Dot0[3]);
var OneOverDeterminant = 1 / Dot1;
var res = IdentityMat44();
for ( var inx1 = 0; inx1 < 4; inx1 ++ ) {
for ( var inx2 = 0; inx2 < 4; inx2 ++ )
res[inx1*4+inx2] = Inverse[inx1][inx2] * OneOverDeterminant;
}
return res;
}
Transform = function(vec, mat) {
var h = [
vec[0] * mat[0*4+0] + vec[1] * mat[1*4+0] + vec[2] * mat[2*4+0] + mat[3*4+0],
vec[0] * mat[0*4+1] + vec[1] * mat[1*4+1] + vec[2] * mat[2*4+1] + mat[3*4+1],
vec[0] * mat[0*4+2] + vec[1] * mat[1*4+2] + vec[2] * mat[2*4+2] + mat[3*4+2],
vec[0] * mat[0*4+3] + vec[1] * mat[1*4+3] + vec[2] * mat[2*4+3] + mat[3*4+3] ]
if ( h[3] == 0.0 )
return [0, 0, 0]
return [ h[0]/h[3], h[1]/h[3], h[2]/h[3] ];
}
// shader program object
var ShaderProgram = {};
ShaderProgram.Create = function( shaderList, uniformNames ) {
var shaderObjs = [];
for ( var i_sh = 0; i_sh < shaderList.length; ++ i_sh ) {
var shderObj = this.CompileShader( shaderList[i_sh].source, shaderList[i_sh].stage );
if ( shderObj == 0 )
return 0;
shaderObjs.push( shderObj );
}
var progObj = this.LinkProgram( shaderObjs )
if ( progObj != 0 ) {
progObj.unifomLocation = {};
for ( var i_n = 0; i_n < uniformNames.length; ++ i_n ) {
var name = uniformNames[i_n];
progObj.unifomLocation[name] = gl.getUniformLocation( progObj, name );
}
}
return progObj;
}
ShaderProgram.Use = function( progObj ) { gl.useProgram( progObj ); }
ShaderProgram.SetUniformInt = function( progObj, name, val ) { gl.uniform1i( progObj.unifomLocation[name], val ); }
ShaderProgram.SetUniform2f = function( progObj, name, arr ) { gl.uniform2fv( progObj.unifomLocation[name], arr ); }
ShaderProgram.SetUniform3f = function( progObj, name, arr ) { gl.uniform3fv( progObj.unifomLocation[name], arr ); }
ShaderProgram.SetUniformMat44 = function( progObj, name, mat ) { gl.uniformMatrix4fv( progObj.unifomLocation[name], false, mat ); }
ShaderProgram.CompileShader = function( source, shaderStage ) {
var shaderObj = gl.createShader( shaderStage );
gl.shaderSource( shaderObj, source );
gl.compileShader( shaderObj );
var status = gl.getShaderParameter( shaderObj, gl.COMPILE_STATUS );
if ( !status ) alert(gl.getShaderInfoLog(shaderObj));
return status ? shaderObj : 0;
}
ShaderProgram.LinkProgram = function( shaderObjs ) {
var prog = gl.createProgram();
for ( var i_sh = 0; i_sh < shaderObjs.length; ++ i_sh )
gl.attachShader( prog, shaderObjs[i_sh] );
gl.linkProgram( prog );
status = gl.getProgramParameter( prog, gl.LINK_STATUS );
if ( !status ) alert("Could not initialise shaders");
gl.useProgram( null );
return status ? prog : 0;
}
function drawScene(){
var canvas = document.getElementById( "camera-canvas" );
var currentTime = Date.now();
var deltaMS = currentTime - startTime;
var aspect = canvas.width / canvas.height;
var matOrtho = Ortho( -aspect, aspect, 1, -1, -1, 1 );
var matOrthoInv = mat44_inverse( matOrtho )
gl.viewport( 0, 0, canvas.width, canvas.height );
gl.enable( gl.DEPTH_TEST );
gl.clearColor( 0.0, 0.0, 0.0, 1.0 );
gl.clear( gl.COLOR_BUFFER_BIT | gl.DEPTH_BUFFER_BIT );
ShaderProgram.Use( progDraw );
gl.enableVertexAttribArray( progDraw.inPos );
gl.bindBuffer( gl.ARRAY_BUFFER, bufObj.pos );
gl.vertexAttribPointer( progDraw.inPos, 2, gl.FLOAT, false, 0, 0 );
gl.bindBuffer( gl.ELEMENT_ARRAY_BUFFER, bufObj.inx );
ShaderProgram.SetUniformMat44( progDraw, "u_projectionMat44", matOrtho );
var col = [ [1.0,0.0,0.0], [1.0,1.0,0.0], [0.0,0.0,1.0] ];
var invMat = []
for ( var i = 0; i < 3; ++ i ) {
var modelMat = Scale( IdentityMat44(), [ 0.3, 0.3, 0.3] );
var angRad = CalcAng( currentTime, 20.0 ) + i * Math.PI * 2 / 3;
var sinAng = Math.sin(angRad), cosAng = Math.cos(angRad);
modelMat[12] = cosAng * 0.6;
modelMat[13] = sinAng * 0.6;
invMat.push( mat44_inverse( modelMat ) );
ShaderProgram.SetUniformMat44( progDraw, "u_modelViewMat44", modelMat );
ShaderProgram.SetUniform3f( progDraw, "u_color", col[i] );
gl.drawElements( gl.TRIANGLES, bufObj.inx.len, gl.UNSIGNED_SHORT, 0 );
}
gl.disableVertexAttribArray( progDraw.pos );
var newColor = "#000000";
var colorMap = ["#ff0000", "#ffff00", "#0000ff" ];
var pos = [-1, -1];
if (mousePos[0] > 0 && mousePos[1] > 0 ) {
var pos = [2.0 * mousePos[0] / canvas.width - 1.0, 1.0 - 2.0 * mousePos[1] / canvas.height];
for ( var i = 0; i < 3; ++ i ) {
var testVec = [ pos[0], pos[1], 0 ];
testVec = Transform(testVec, matOrthoInv);
testVec = Transform(testVec, invMat[i]);
if (testVec[0] > -1.0 && testVec[0] < 1.0 && testVec[1] > -1.0 && testVec[1] < 1.0 ) {
newColor = colorMap[i];
}
}
}
document.getElementById( "color" ).value = newColor;
document.getElementById( "mouseX" ).innerHTML = pos[0];
document.getElementById( "mouseY" ).innerHTML = pos[1];
}
var startTime;
function Fract( val ) {
return val - Math.trunc( val );
}
function CalcAng( currentTime, intervall ) {
return Fract( (currentTime - startTime) / (1000*intervall) ) * 2.0 * Math.PI;
}
function CalcMove( currentTime, intervall, range ) {
var pos = self.Fract( (currentTime - startTime) / (1000*intervall) ) * 2.0
var pos = pos < 1.0 ? pos : (2.0-pos)
return range[0] + (range[1] - range[0]) * pos;
}
var mousePos = [-1, -1];
var gl;
var prog;
var bufObj = {};
function cameraStart() {
var canvas = document.getElementById( "camera-canvas");
gl = canvas.getContext( "experimental-webgl" );
if ( !gl )
return;
progDraw = ShaderProgram.Create(
[ { source : draw_vert, stage : gl.VERTEX_SHADER },
{ source : draw_frag, stage : gl.FRAGMENT_SHADER }
],
[ "u_projectionMat44", "u_modelViewMat44", "u_color"] );
progDraw.inPos = gl.getAttribLocation( progDraw, "inPos" );
if ( prog == 0 )
return;
var pos = [ -1, -1, 1, -1, 1, 1, -1, 1 ];
var inx = [ 0, 1, 2, 0, 2, 3 ];
bufObj.pos = gl.createBuffer();
gl.bindBuffer( gl.ARRAY_BUFFER, bufObj.pos );
gl.bufferData( gl.ARRAY_BUFFER, new Float32Array( pos ), gl.STATIC_DRAW );
bufObj.inx = gl.createBuffer();
bufObj.inx.len = inx.length;
gl.bindBuffer( gl.ELEMENT_ARRAY_BUFFER, bufObj.inx );
gl.bufferData( gl.ELEMENT_ARRAY_BUFFER, new Uint16Array( inx ), gl.STATIC_DRAW );
startTime = Date.now();
setInterval(drawScene, 50);
}
(function() {
document.onmousemove = handleMouseMove;
function handleMouseMove(event) {
var dot, eventDoc, doc, body, pageX, pageY;
event = event || window.event; // IE-ism
if (event.pageX == null && event.clientX != null) {
eventDoc = (event.target && event.target.ownerDocument) || document;
doc = eventDoc.documentElement;
body = eventDoc.body;
event.pageX = event.clientX +
(doc && doc.scrollLeft || body && body.scrollLeft || 0) -
(doc && doc.clientLeft || body && body.clientLeft || 0);
event.pageY = event.clientY +
(doc && doc.scrollTop || body && body.scrollTop || 0) -
(doc && doc.clientTop || body && body.clientTop || 0 );
}
var canvas = document.getElementById( "camera-canvas");
var x = event.pageX - canvas.offsetLeft;
var y = event.pageY - canvas.offsetTop;
mousePos = [-1, -1];
if ( x >= 0 && x < canvas.width && y >= 0 && y < canvas.height ) {
mousePos = [x, y];
}
}
})();
</script>
<body onload="cameraStart();">
<div style="margin-left: 260px;">
<div style="float: right; width: 100%; background-color: #CCF;">
<form name="inputs">
<table>
<tr> <td> <input type="color" value="#000000" id="color" disabled></td> </tr>
<tr> <td> <span id="mouseX">0</span> </td> </tr>
<tr> <td> <span id="mouseY">0</span> </td> </tr>
</table>
</form>
</div>
<div style="float: right; width: 260px; margin-left: -260px;">
<canvas id="camera-canvas" style="border: none;" width="256" height="256"></canvas>
</div>
<div style="clear: both;"></div>
</div>
</body>

Related

Shader - Anti fisheye by "pulling" pixels

I would like to fix the distortion of this image :
Sorry for the quality, but this is the best example I could find.
I don't know if fixing this distortion is possible (I would like to have straight doors, and a straight ceiling), but basically, instead of pushing the pixels outside of the image (red arrows) that is adding a blur effect, I would like to do the opposite (green arrows), to pull the pixels towards the center.
If you have any idea, that would be awesome. Other solutions are welcomed as well !
For the definition of the fisheye effect, you've to associate an angle (alpha) to the diagonal (d) of the viewport.
The diagonal of the viewport is the diameter of of circle which includes the entire viewport.
The relation between the radius of the perimeter circle (r) and the angle (alpha) is:
r = tan(alpha / 2)
In the following eye_angle corresponds to alpha and half_dist to r:
float half_angle = eye_angle/2.0;
float half_dist = tan(half_angle);
With the the aspect ration of the viewport (aspect) and is the normalized device position of the fragment on the viewport ndcPos the position P can be calculated. In normalized device space the x and y are in range [-1, 1]:
vec2 vp_scale = vec2(aspect, 1.0);
vec2 P = ndcPos * vp_scale;
For each point (P) on the view port the relative distance (rel_dist) to the center of the viewport in relation to the perimeter circle has to be calculated. And the relative position (rel_P) of the point (P) in the relation to the aspect ration is needed:
float vp_dia = length(vp_scale);
float rel_dist = length(P) / vp_dia;
vec2 rel_P = normalize(P) / normalize(vp_scale);
The fisheye effect is caused by the projection of a spherical surface on a plane. To calculate from and to the projection, the relation between the arc length and the distance to the center of the plane has to be found:
If the radius of a circle is 1, the length of the arc is equal the angle of the arc segment in radians. So the relation between the distance to the point P and the angle beta is:
|P|/r = tan(beta)
beta = atan(|P|/r)
If the projection from the spherical surface to the plane is:
float beta = rel_dist * half_angle;
vec3 pos_prj = rel_P * tan(beta) / half_dist;
And the projection from the plane to the spherical surface is:
float beta = atan(rel_dist * half_dist);
vec2 pos_prj = rel_P * beta / half_angle;
See the following WebGL example which uses an fragment shader where the algorithm is implemented. The angle alpha is set be the uniform variable u_alpha.
If u_alpha > 0.0, then the projection form spherical surface to the plane is calculated.
If u_alpha < 0.0, then the projection from the plane to the spherical surface is calculated.
(function loadscene() {
var canvas, gl, vp_size, texture, prog, bufObj = {};
function initScene() {
canvas = document.getElementById( "ogl-canvas");
gl = canvas.getContext( "experimental-webgl" );
if ( !gl )
return;
texture = new Texture( "https://raw.githubusercontent.com/Rabbid76/graphics-snippets/master/resource/texture/supermario.jpg" );
texture.bound = false;
progDraw = gl.createProgram();
for (let i = 0; i < 2; ++i) {
let source = document.getElementById(i==0 ? "draw-shader-vs" : "draw-shader-fs").text;
let shaderObj = gl.createShader(i==0 ? gl.VERTEX_SHADER : gl.FRAGMENT_SHADER);
gl.shaderSource(shaderObj, source);
gl.compileShader(shaderObj);
let status = gl.getShaderParameter(shaderObj, gl.COMPILE_STATUS);
if (!status) alert(gl.getShaderInfoLog(shaderObj));
gl.attachShader(progDraw, shaderObj);
gl.linkProgram(progDraw);
}
status = gl.getProgramParameter(progDraw, gl.LINK_STATUS);
if ( !status ) alert(gl.getProgramInfoLog(progDraw));
progDraw.inPos = gl.getAttribLocation(progDraw, "inPos");
progDraw.u_time = gl.getUniformLocation(progDraw, "u_time");
progDraw.u_resolution = gl.getUniformLocation(progDraw, "u_resolution");
progDraw.u_texture = gl.getUniformLocation(progDraw, "u_texture");
progDraw.u_angle = gl.getUniformLocation(progDraw, "u_angle");
gl.useProgram(progDraw);
var pos = [ -1, -1, 1, -1, 1, 1, -1, 1 ];
var inx = [ 0, 1, 2, 0, 2, 3 ];
bufObj.pos = gl.createBuffer();
gl.bindBuffer( gl.ARRAY_BUFFER, bufObj.pos );
gl.bufferData( gl.ARRAY_BUFFER, new Float32Array( pos ), gl.STATIC_DRAW );
bufObj.inx = gl.createBuffer();
bufObj.inx.len = inx.length;
gl.bindBuffer( gl.ELEMENT_ARRAY_BUFFER, bufObj.inx );
gl.bufferData( gl.ELEMENT_ARRAY_BUFFER, new Uint16Array( inx ), gl.STATIC_DRAW );
gl.enableVertexAttribArray( progDraw.inPos );
gl.vertexAttribPointer( progDraw.inPos, 2, gl.FLOAT, false, 0, 0 );
gl.enable( gl.DEPTH_TEST );
gl.clearColor( 0.0, 0.0, 0.0, 1.0 );
window.onresize = resize;
resize();
requestAnimationFrame(render);
}
function resize() {
vp_size = [window.innerWidth, window.innerHeight];
canvas.width = vp_size[0];
canvas.height = vp_size[1];
}
function render(deltaMS) {
scale = document.getElementById( "scale" ).value / 100 * 2.0 - 1.0;
gl.viewport( 0, 0, canvas.width, canvas.height );
gl.clear( gl.COLOR_BUFFER_BIT | gl.DEPTH_BUFFER_BIT );
texture.bound = texture.bound || texture.bind( 0 );
gl.uniform1i(progDraw.u_texture, 0);
gl.uniform1f(progDraw.u_time, deltaMS/2000.0);
gl.uniform2f(progDraw.u_resolution, canvas.width, canvas.height);
gl.uniform1f(progDraw.u_angle, scale * Math.PI * 0.9);
gl.drawElements( gl.TRIANGLES, bufObj.inx.len, gl.UNSIGNED_SHORT, 0 );
requestAnimationFrame(render);
}
class Texture {
constructor( name, dflt ) {
let texture = this;
this.dflt = dflt || [128,128,128,255]
let image = { "cx": this.dflt.w || 1, "cy": this.dflt.h || 1, "plane": this.dflt.p || this.dflt };
this.size = [image.cx, image.cy];
this.dummyObj = Texture.createTexture2D( image, true )
this.image = new Image(64,64);
this.image.setAttribute('crossorigin', 'anonymous');
this.image.onload = function () {
let cx = 1 << 31 - Math.clz32(texture.image.naturalWidth);
if ( cx < texture.image.naturalWidth ) cx *= 2;
let cy = 1 << 31 - Math.clz32(texture.image.naturalHeight);
if ( cy < texture.image.naturalHeight ) cy *= 2;
var canvas = document.createElement( 'canvas' );
canvas.width = cx;
canvas.height = cy;
var context = canvas.getContext( '2d' );
context.drawImage( texture.image, 0, 0, canvas.width, canvas.height );
texture.textureObj = Texture.createTexture2D( canvas, true );
texture.size = [cx, cy];
}
this.image.src = name;
}
static createTexture2D( image, flipY ) {
let t = gl.createTexture();
gl.activeTexture( gl.TEXTURE0 );
gl.bindTexture( gl.TEXTURE_2D, t );
gl.pixelStorei( gl.UNPACK_FLIP_Y_WEBGL, flipY != undefined && flipY == true );
if ( image.cx && image.cy && image.plane )
gl.texImage2D( gl.TEXTURE_2D, 0, gl.RGBA, image.cx, image.cy, 0, gl.RGBA, gl.UNSIGNED_BYTE, new Uint8Array(image.plane) );
else
gl.texImage2D( gl.TEXTURE_2D, 0, gl.RGBA, gl.RGBA, gl.UNSIGNED_BYTE, image );
gl.texParameteri( gl.TEXTURE_2D, gl.TEXTURE_MAG_FILTER, gl.LINEAR );
gl.texParameteri( gl.TEXTURE_2D, gl.TEXTURE_MIN_FILTER, gl.LINEAR );
gl.texParameteri( gl.TEXTURE_2D, gl.TEXTURE_WRAP_S, gl.REPEAT );
gl.texParameteri( gl.TEXTURE_2D, gl.TEXTURE_WRAP_T, gl.REPEAT );
gl.bindTexture( gl.TEXTURE_2D, null );
return t;
}
bind( texUnit = 0 ) {
gl.activeTexture( gl.TEXTURE0 + texUnit );
if ( this.textureObj ) {
gl.bindTexture( gl.TEXTURE_2D, this.textureObj );
return true;
}
gl.bindTexture( gl.TEXTURE_2D, this.dummyObj );
return false;
}
};
initScene();
})();
html,body { margin: 0; overflow: hidden; }
#gui { position : absolute; top : 0; left : 0; }
<script id="draw-shader-fs" type="x-shader/x-fragment">
precision mediump float;
uniform float u_time;
uniform vec2 u_resolution;
uniform float u_angle;
uniform sampler2D u_texture;
void main()
{
vec2 uv = gl_FragCoord.xy / u_resolution;
vec2 ndcPos = uv * 2.0 - 1.0;
float aspect = u_resolution.x / u_resolution.y;
float eye_angle = abs(u_angle);
float half_angle = eye_angle/2.0;
float half_dist = tan(half_angle);
vec2 vp_scale = vec2(aspect, 1.0);
vec2 P = ndcPos * vp_scale;
float vp_dia = length(vp_scale);
float rel_dist = length(P) / vp_dia;
vec2 rel_P = normalize(P) / normalize(vp_scale);
vec2 pos_prj = ndcPos;
if (u_angle > 0.0)
{
float beta = rel_dist * half_angle;
pos_prj = rel_P * tan(beta) / half_dist;
}
else if (u_angle < 0.0)
{
float beta = atan(rel_dist * half_dist);
pos_prj = rel_P * beta / half_angle;
}
vec2 uv_prj = pos_prj * 0.5 + 0.5;
vec2 rangeCheck = step(vec2(0.0), uv_prj) * step(uv_prj, vec2(1.0));
if (rangeCheck.x * rangeCheck.y < 0.5)
discard;
vec4 texColor = texture2D(u_texture, uv_prj.st);
gl_FragColor = vec4( texColor.rgb, 1.0 );
}
</script>
<script id="draw-shader-vs" type="x-shader/x-vertex">
precision mediump float;
attribute vec2 inPos;
void main()
{
gl_Position = vec4( inPos.xy, 0.0, 1.0 );
}
</script>
<canvas id="ogl-canvas" style="border: none"></canvas>
<form id="gui" name="inputs">
<input type="range" id="scale" min="0" max="100" value="20"/>
</form>
This kind of distortion is a property of a camera. If you have access to the camera used to take the picture, you can calibrate it in order to extract the deformation it applies. Otherwise, you can still use some general equations. Have a look at Paul Bourke's article, this Stack Overflow answer or this Shader Toy example, to see how it could be done.

OpenGL texture mapping not using image [closed]

Closed. This question needs debugging details. It is not currently accepting answers.
Edit the question to include desired behavior, a specific problem or error, and the shortest code necessary to reproduce the problem. This will help others answer the question.
Closed 4 years ago.
Improve this question
I want to map textures to an object(maybe cube).
But i don't want to use image.
I know that usually using image for texture mapping like this:
glGenTextures(1, &texName);
glBindTexture(GL_TEXTURE_2D, texName);
image = SOIL_load_image("cube.jpg", &width, &height, 0, SOIL_LOAD_RGB);
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGB, width, height, 0, GL_RGB, GL_UNSIGNED_BYTE, image);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_REPEAT);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_REPEAT);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
But Is there any solution that not using image?
If there is color spectrum, I want to store the color information in a buffer and map this spectrum like a texture.
Please Let me know If there is any solution.
If you want to create a texture you have to allocate the memory for the color plane first.
The easiest way is to create an RGBA texture, because for one texel exactly 4 byte (32 bit) is needed and you do not have to worry about the alignment.
In c++ I recommend to use a std::vector:
int width = ...;
int height = ...;
std::vector<unsignd char> colorPlane( width * height * 4 ); // * 4 because of RGBA
But you can also use old school dynamic memory allocation:
unsignd char *colorPlane = new unsignd char[ width * height * 4 ];
The byte index of a texel inside the plane is calculated as follows:
int posX = ...;
int posY = ...;
int index = (posY * width + posX) * 4;
If you want to set a pixel you have to assign the proper red, green and blue color channels in the range [0, 255]. For an opaque texel you have to set an alpha channel of 255:
e.g.: Set Red color:
colorPlane[index + 0] = 255; // red component
colorPlane[index + 1] = 0; // green component
colorPlane[index + 2] = 0; // blue component
colorPlane[index + 3] = 255; // alpha channel (255 == opaque)
Finally you have to set the color plane to the texture.
// std::vector<unsigned char>
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA8, width, height, 0, GL_RGBA, GL_UNSIGNED_BYTE, colorPlane.data() );
// unsigned char*
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA8, width, height, 0, GL_RGBA, GL_UNSIGNED_BYTE, colorPlane );
You can create a gradient texture by interpolating the color components.
See the following example
for ( int iy = 0; iy < height; ++ iy )
{
for ( int ix = 0; ix < width; ++ ix )
{
int index = (iy * width + ix) * 4;
float gradX = (float)ix / width;
float gradY = (float)iy / height;
colorPlane[index + 0] = (unsigned char)(255.0 * (1.0-gradX));
colorPlane[index + 1] = (unsigned char)(255.0 * (1.0-gradY));
colorPlane[index + 2] = (unsigned char)(255.0 * gradX * gradY);
colorPlane[index + 3] = 255;
}
}
A rainbow texture can be created as follows:
for ( int iy = 0; iy < height; ++ iy )
{
for ( int ix = 0; ix < width; ++ ix )
{
int index = (iy * width + ix) * 4;
float H = 1.0f - (float)iy / height;
float R = fabs(H * 4.0f - 3.0f) - 1.0f;
float G = 2.0f - fabs(H * 4.0f - 2.0f);
float B = 2.0f - fabs(H * 4.0f - 4.0f);
colorPlane[index + 0] = (unsigned char)(255.0 * R);
colorPlane[index + 1] = (unsigned char)(255.0 * G);
colorPlane[index + 2] = (unsigned char)(255.0 * B);
colorPlane[index + 3] = 255;
}
}
See the following WebGL example:
var ShaderProgram = {};
ShaderProgram.Create = function( shaderList, uniformNames ) {
var shaderObjs = [];
for ( var i_sh = 0; i_sh < shaderList.length; ++ i_sh ) {
var shderObj = this.CompileShader( shaderList[i_sh].source, shaderList[i_sh].stage );
if ( shderObj == 0 )
return 0;
shaderObjs.push( shderObj );
}
var progObj = this.LinkProgram( shaderObjs )
if ( progObj != 0 ) {
progObj.unifomLocation = {};
for ( var i_n = 0; i_n < uniformNames.length; ++ i_n ) {
var name = uniformNames[i_n];
progObj.unifomLocation[name] = gl.getUniformLocation( progObj, name );
}
}
return progObj;
}
ShaderProgram.Use = function( progObj ) { gl.useProgram( progObj ); }
ShaderProgram.SetUniformInt = function( progObj, name, val ) { gl.uniform1i( progObj.unifomLocation[name], val ); }
ShaderProgram.SetUniformFloat = function( progObj, name, val ) { gl.uniform1f( progObj.unifomLocation[name], val ); }
ShaderProgram.SetUniform2f = function( progObj, name, arr ) { gl.uniform2fv( progObj.unifomLocation[name], arr ); }
ShaderProgram.SetUniform3f = function( progObj, name, arr ) { gl.uniform3fv( progObj.unifomLocation[name], arr ); }
ShaderProgram.SetUniformMat44 = function( progObj, name, mat ) { gl.uniformMatrix4fv( progObj.unifomLocation[name], false, mat ); }
ShaderProgram.CompileShader = function( source, shaderStage ) {
var shaderScript = document.getElementById(source);
if (shaderScript) {
source = "";
var node = shaderScript.firstChild;
while (node) {
if (node.nodeType == 3) source += node.textContent;
node = node.nextSibling;
}
}
var shaderObj = gl.createShader( shaderStage );
gl.shaderSource( shaderObj, source );
gl.compileShader( shaderObj );
var status = gl.getShaderParameter( shaderObj, gl.COMPILE_STATUS );
if ( !status ) alert(gl.getShaderInfoLog(shaderObj));
return status ? shaderObj : 0;
}
ShaderProgram.LinkProgram = function( shaderObjs ) {
var prog = gl.createProgram();
for ( var i_sh = 0; i_sh < shaderObjs.length; ++ i_sh )
gl.attachShader( prog, shaderObjs[i_sh] );
gl.linkProgram( prog );
status = gl.getProgramParameter( prog, gl.LINK_STATUS );
if ( !status ) alert("Could not initialise shaders");
gl.useProgram( null );
return status ? prog : 0;
}
function drawScene(){
var canvas = document.getElementById( "ogl-canvas" );
var vp = [canvas.width, canvas.height];
gl.viewport( 0, 0, canvas.width, canvas.height );
gl.enable( gl.DEPTH_TEST );
gl.clearColor( 0.0, 0.0, 0.0, 1.0 );
gl.clear( gl.COLOR_BUFFER_BIT | gl.DEPTH_BUFFER_BIT );
ShaderProgram.Use( progDraw );
gl.enableVertexAttribArray( progDraw.inPos );
gl.bindBuffer( gl.ARRAY_BUFFER, bufObj.pos );
gl.vertexAttribPointer( progDraw.inPos, 2, gl.FLOAT, false, 0, 0 );
gl.bindBuffer( gl.ELEMENT_ARRAY_BUFFER, bufObj.inx );
gl.drawElements( gl.TRIANGLES, bufObj.inx.len, gl.UNSIGNED_SHORT, 0 );
gl.disableVertexAttribArray( progDraw.pos );
}
var gl;
var prog;
var bufObj = {};
function sceneStart() {
var canvas = document.getElementById( "ogl-canvas");
gl = canvas.getContext( "experimental-webgl" );
if ( !gl )
return;
progDraw = ShaderProgram.Create(
[ { source : "draw-shader-vs", stage : gl.VERTEX_SHADER },
{ source : "draw-shader-fs", stage : gl.FRAGMENT_SHADER }
],
[] );
progDraw.inPos = gl.getAttribLocation( progDraw, "inPos" );
if ( prog == 0 )
return;
var pos = [ -1, -1, 1, -1, 1, 1, -1, 1 ];
var inx = [ 0, 1, 2, 0, 2, 3 ];
bufObj.pos = gl.createBuffer();
gl.bindBuffer( gl.ARRAY_BUFFER, bufObj.pos );
gl.bufferData( gl.ARRAY_BUFFER, new Float32Array( pos ), gl.STATIC_DRAW );
bufObj.inx = gl.createBuffer();
bufObj.inx.len = inx.length;
gl.bindBuffer( gl.ELEMENT_ARRAY_BUFFER, bufObj.inx );
gl.bufferData( gl.ELEMENT_ARRAY_BUFFER, new Uint16Array( inx ), gl.STATIC_DRAW );
setInterval(drawScene, 50);
}
<script id="draw-shader-vs" type="x-shader/x-vertex">
precision mediump float;
attribute vec2 inPos;
varying vec2 vertPos;
void main()
{
vertPos = inPos;
gl_Position = vec4( inPos.xy, 0.0, 1.0 );
}
</script>
<script id="draw-shader-fs" type="x-shader/x-fragment">
precision mediump float;
varying vec2 vertPos;
vec3 HueToRGB(in float H)
{
float R = abs(H * 4.0 - 3.0) - 1.0;
float G = 2.0 - abs(H * 4.0 - 2.0);
float B = 2.0 - abs(H * 4.0 - 4.0);
return clamp( vec3(R,G,B), 0.0, 1.0 );
}
void main()
{
vec3 color = HueToRGB( vertPos.y * 0.5 + 0.5 );
gl_FragColor = vec4( color, 1.0 );
}
</script>
<body onload="sceneStart();">
<canvas id="ogl-canvas" style="border: none;" width="200" height="200"></canvas>
</body>
This is possible in fragment (pixel) shader. Anyway - your original gradient data will better work if it will be presented as a texture to a shader with additional parameters (like linear gradient points, or radial gradient params).
However, you still pass it as uniform buffer or texture buffer object. With this you loose built-in texture sampling and filtering capabilities, which will almost do gradient for you.
Take a look at shader toy (https://www.shadertoy.com) - there's tons of samples without usng textures and generating patterns inside a shader program.

Page Roll effect

I want to create a page roll effect in a shader. So i have a XZ plane points with y=0. Now i assume a cylender with R radius and Inf. height is lied down on the plane with certain angle rotated in Y axis. See the image:
I want a equation so that paper can rolled over the sphere in the given XZ direction.
what I am doing is:
float2 currPoint = gl_Vertex.xz;
float2 normDir = normalize(-1, 0); //direction at which paper will start rolling out.
float cylRadius = 1.f;
float dist = sqrt(normDir.x *vi.x * vi.x + normDir.y *vi.y * vi.y);
float beta = dist / cylRadius;
float3 outPos = 0;
outPos.x = currPoint.x + N.x * cylRadius * sin(beta);
outPos.z = cylRadius * (1 -cos(beta));
outPos.y = currPoint.y + N.y * cylRadius * sin(beta);
but it only works in the case of normDir = normalize(-1, 0), in other cases result not as expected.
I got this.My implementation is based on Pawel's page Flip implimentation ( http://nomtek.com/page-flip-3d/ )
Here is the code in HLSL.
float DistToLine(float2 pt1, float2 pt2, float2 testPt)
{
float2 lineDir = pt2 - pt1;
float2 perpDir = float2(lineDir.y, -lineDir.x);
float2 dirToPt1 = pt1 - testPt;
return (dot(normalize(perpDir), dirToPt1));
}
float3 Roll(float2 pos ) //per vertex
{
float time = param1.z ;
float t = (time);
float2 A = float2( 0 , 1 ); //tweak these 4 variables for the direction of Roll
float2 B = float2( 5.f , 1 ); //
float2 C = float2( 1 , 0 ); //
float2 D = float2( 0 , 0 ); //
float2 P1 = lerp( B , A , time ) ;
float2 P2 = lerp( C , D , time ) ; ;
float2 N = normalize( float2(-(P2-P1).y , (P2-P1).x ) );
float dist = DistToLine(P1 , P2 , float2(pos.x , pos.y) );
float3 vOut;
if (dist > 0 )
{
float distFromEnd = DistToLine(C , B , float2(pos.x , pos.y) ) ;
float R = lerp( .1 , .13 , distFromEnd );
float2 p = pos - N * dist;
float alpha = dist / R;
float sinAlpha = R * sin(alpha);
vOut.x = p.x + N.x * sinAlpha;
vOut.y = p.y + N.y * sinAlpha;
vOut.z = (1 - cos(alpha)) * R;
}
else
{
vOut.x = pos.x;
vOut.y = pos.y;
vOut.z = 0;
}
return vOut;
}

Transformed w value is 0

I'm making software renderer.
Sometimes, transformed vertex's w coordinate is 0.
So, if i divide other coordinate by w, it occures error.
if camera position's z coordinate equal with vertex position's z coordinate, transformed vertex's w coordinate is 0.
is there wrong on my code?
below code is my transform code.
void Camera::ComputeWorldToCameraMat()
{
Vector3 zaxis;
zaxis.Set( _dir.x - _pos.x, _dir.y - _pos.y, _dir.z - _pos.z );
zaxis.Normalize();
// xaxis = zaxis X up
Vector3 xaxis;
xaxis = cross(zaxis, _up);
xaxis.Normalize();
Vector3 yaxis;
yaxis = cross(zaxis, xaxis);
_worldToCamera.Identity();
_worldToCamera.a11 = xaxis.x;
_worldToCamera.a12 = yaxis.x;
_worldToCamera.a13 = zaxis.x;
_worldToCamera.a14 = 0;
_worldToCamera.a21 = xaxis.y;
_worldToCamera.a22 = yaxis.y;
_worldToCamera.a23 = zaxis.y;
_worldToCamera.a24 = 0;
_worldToCamera.a31 = xaxis.z;
_worldToCamera.a32 = yaxis.z;
_worldToCamera.a33 = zaxis.z;
_worldToCamera.a34 = 0;
_worldToCamera.a41 = -dot(xaxis, _pos);
_worldToCamera.a42 = -dot(yaxis, _pos);
_worldToCamera.a43 = -dot(zaxis, _pos);
_worldToCamera.a44 = 1;
}
void Camera::ComputeProjectionMat( Viewport* viewport)
{
int width = viewport->_width;
int height = viewport->_height;
//float tanfov = (float)tan(_fov * 0.5f);
float cot = 1.0f / tan( _fov * 0.5f );
float aspect = (float)(width / height);
_projection.Identity();
_projection.a11 = cot / aspect;
_projection.a12 = 0;
_projection.a13 = 0;
_projection.a14 = 0;
_projection.a21 = 0;
_projection.a22 = cot;
_projection.a23 = 0;
_projection.a24 = 0;
_projection.a31 = 0;
_projection.a32 = 0;
_projection.a33 = -1 * (_farZ + _nearZ) / ( _farZ - _nearZ );
_projection.a34 = 2 * ( _nearZ * _farZ ) / ( _farZ - _nearZ );
_projection.a41 = 0;
_projection.a42 = 0;
_projection.a43 = 1;
_projection.a44 = 0;
}

What kind of blurs can be implemented in pixel shaders?

Gaussian, box, radial, directional, motion blur, zoom blur, etc.
I read that Gaussian blur can be broken down in passes that could be implemented in pixel shaders, but couldn't find any samples.
Is it right to assume that any effect that concerns itself with pixels other than itself, can't be implemented in pixel shaders?
You can implement everything, as long you are able to pass information to the shader.
The trick, in this cases, is to perform a multiple pass rendering. The final shader will take a certain number of samplers, that are the non-blurred sources, which are used to compute blurred values.
For example, using multiple textures is possible to emulate effects based on the accumulation buffer.
to implement a gaussian blur, render the scene onto a frambuffer object, with attached a texture on the color attachment. This is the first pass.
As second pass, render a textured quad, where the texture is the one generated in the first step. Textures coordinates are passed from vertex stage to fragment stage, interpolated across the quad. Indeed you have texture coordinate for each fragment; apply a offset for each coordinate to fetch textels around the underlying one, and perform the gaussian blur.
general 'pipeline' for postprocessing effects
setRenderTarget(myRenderTarget); // or FBO in GL
drawAwsomeScene();
setdefaultRenderTarget(); // draw to screen...
blurShader.use();
// shader needs to know what is the size of one pixel on the screen
blurShader.uniform2f("texelSize", 1/screenW, 1/screenH);
// set the texture with scene rendered...
setRenderTargetTexture();
drawFullScreenQuad();
// other effects...
useful examples/tutorial for blur: http://www.gamerendering.com/2008/10/11/gaussian-blur-filter-shader/
I implemented a generic convolution fragment shader (pixel shader)
#version 120
uniform sampler2D texUnit;
uniform float[9] conMatrix;
uniform float conWeight;
uniform vec2 conPixel;
void main(void)
{
vec4 color = vec4(0.0);
vec2 texCoord = gl_TexCoord[0].st;
vec2 offset = conPixel * 1.5;
vec2 start = texCoord - offset;
vec2 current = start;
for (int i = 0; i < 9; i++)
{
color += texture2D( texUnit, current ) * conMatrix[i];
current.x += conPixel.x;
if (i == 2 || i == 5) {
current.x = start.x;
current.y += conPixel.y;
}
}
gl_FragColor = color * conWeight;
}
For a Blur:
where conPixel is {1/screen width, 1/screen height}
where conMatrix is {1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0}
where conWeight is 1.0 / 9.0
where texUnit is 0
An often seen implementation to achieve a blur effect of a scene is the Gaussian blur, implemented by a 2 pass post-processing.
This is an approximation which first blurs along the X-Axis in the 1st pass and along the Y-Axis in the 2nd pass (or vice versa). This results in a better performance for strong blurring.
The blur shader uses a normal (or Gaussian) distribution. For the 2 passes is used the same shader program, with individual direction settings for the 2 passes, stored in the uniform vec2 u_dir. The strength of the blur effect can be varied with the uniform variable float u_sigma in the range [0.0, 1.0].
The scene is written to frame buffer with a texture bound to the color plane. A screen space pass uses the texture as the input to blur the output along the X-axis. The X-axis blur pass writes to another frame buffer, with a texture bound to its color plane. This texture is used as the input, for the final blur process along the Y-axis.
A detailed description of the blur algorithm can be found at the answer to the question OpenGL es 2.0 Gaussian blur on triangle or LeranOpenGL - Gaussian blur.
Blur Vertex shader
#version 330
in vec2 inPos;
out vec2 pos;
void main()
{
pos = inPos;
gl_Position = vec4( inPos, 0.0, 1.0 );
}
Blur Fragment shader
#version 330
in vec2 pos;
uniform sampler2D u_texture;
uniform vec2 u_textureSize;
uniform float u_sigma;
uniform vec2 u_dir;
float CalcGauss( float x, float sigma )
{
if ( sigma <= 0.0 )
return 0.0;
return exp( -(x*x) / (2.0 * sigma) ) / (2.0 * 3.14157 * sigma);
}
void main()
{
vec2 texC = pos.st * 0.5 + 0.5;
vec4 texCol = texture2D( u_texture, texC );
vec4 gaussCol = vec4( texCol.rgb, 1.0 );
vec2 step = u_dir / u_textureSize;
for ( int i = 1; i <= 32; ++ i )
{
float weight = CalcGauss( float(i) / 32.0, u_sigma * 0.5 );
if ( weight < 1.0/255.0 )
break;
texCol = texture2D( u_texture, texC + step * float(i) );
gaussCol += vec4( texCol.rgb * weight, weight );
texCol = texture2D( u_texture, texC - step * float(i) );
gaussCol += vec4( texCol.rgb * weight, weight );
}
gaussCol.rgb = clamp( gaussCol.rgb / gaussCol.w, 0.0, 1.0 );
gl_FragColor = vec4( gaussCol.rgb, 1.0 );
}
See also the answers to the following question:
Fast Gaussian blur at pause
OpenGL es 2.0 Gaussian blur on triangle
How to get a "Glow" shader effect in OpenGL ES 2.0?
See a WebGL example:
var readInput = true;
function changeEventHandler(event){
readInput = true;
}
(function loadscene() {
var resize, gl, progDraw, progBlur, vp_size, blurFB;
var canvas;
var camera;
var bufCube = {};
var bufQuad = {};
var shininess = 10.0;
var glow = 10.0;
var sigma = 0.8;
var radius = 1.0;
function render(deltaMS){
if ( readInput ) {
//readInput = false;
var sliderScale = 100;
sigma = document.getElementById( "sigma" ).value / sliderScale;
radius = document.getElementById( "radius" ).value / sliderScale;
}
vp_size = [canvas.width, canvas.height];
camera.Update( vp_size );
gl.enable( gl.DEPTH_TEST );
gl.clearColor( 0.0, 0.0, 0.0, 1.0 );
gl.clear( gl.COLOR_BUFFER_BIT | gl.DEPTH_BUFFER_BIT );
// set up framebuffer
gl.bindFramebuffer( gl.FRAMEBUFFER, blurFB[0] );
gl.viewport( 0, 0, blurFB[0].width, blurFB[0].height );
gl.clear( gl.COLOR_BUFFER_BIT | gl.DEPTH_BUFFER_BIT );
// setup view projection and model
var prjMat = camera.Perspective();
var viewMat = camera.Orbit();
var modelMat = IdentM44();
modelMat = camera.AutoModelMatrix();
// set up draw shader
ShProg.Use( progDraw.prog );
ShProg.SetM44( progDraw.prog, "u_projectionMat44", prjMat );
ShProg.SetM44( progDraw.prog, "u_modelViewMat44", Multiply(viewMat, modelMat) );
ShProg.SetF1( progDraw.prog, "u_shininess", shininess );
// draw scene
VertexBuffer.Draw( bufCube );
// set blur-X framebuffer and bind frambuffer texture
gl.bindFramebuffer( gl.FRAMEBUFFER, blurFB[1] );
gl.viewport( 0, 0, blurFB[1].width, blurFB[1].height );
gl.clear( gl.COLOR_BUFFER_BIT | gl.DEPTH_BUFFER_BIT );
var texUnit = 1;
gl.activeTexture( gl.TEXTURE0 + texUnit );
gl.bindTexture( gl.TEXTURE_2D, blurFB[0].color0_texture );
// set up blur-X shader
ShProg.Use( progBlur.prog );
ShProg.SetI1( progBlur.prog, "u_texture", texUnit )
ShProg.SetF2( progBlur.prog, "u_textureSize", vp_size );
ShProg.SetF1( progBlur.prog, "u_sigma", sigma )
ShProg.SetF1( progBlur.prog, "u_radius", radius )
ShProg.SetF2( progBlur.prog, "u_dir", [1.0, 0.0] )
// draw full screen space
gl.enableVertexAttribArray( progBlur.inPos );
gl.bindBuffer( gl.ARRAY_BUFFER, bufQuad.pos );
gl.vertexAttribPointer( progBlur.inPos, 2, gl.FLOAT, false, 0, 0 );
gl.bindBuffer( gl.ELEMENT_ARRAY_BUFFER, bufQuad.inx );
gl.drawElements( gl.TRIANGLES, 6, gl.UNSIGNED_SHORT, 0 );
gl.disableVertexAttribArray( progBlur.inPos );
// reset framebuffer and bind frambuffer texture
gl.bindFramebuffer( gl.FRAMEBUFFER, null );
gl.viewport( 0, 0, vp_size[0], vp_size[1] );
gl.clear( gl.COLOR_BUFFER_BIT | gl.DEPTH_BUFFER_BIT );
texUnit = 2;
gl.activeTexture( gl.TEXTURE0 + texUnit );
gl.bindTexture( gl.TEXTURE_2D, blurFB[1].color0_texture );
// set up pst process shader
ShProg.SetI1( progBlur.prog, "u_texture", texUnit )
ShProg.SetF1( progBlur.prog, "u_radius", radius )
ShProg.SetF2( progBlur.prog, "u_dir", [0.0, 1.0] )
// draw full screen space
gl.enableVertexAttribArray( progBlur.inPos );
gl.bindBuffer( gl.ARRAY_BUFFER, bufQuad.pos );
gl.vertexAttribPointer( progBlur.inPos, 2, gl.FLOAT, false, 0, 0 );
gl.bindBuffer( gl.ELEMENT_ARRAY_BUFFER, bufQuad.inx );
gl.drawElements( gl.TRIANGLES, 6, gl.UNSIGNED_SHORT, 0 );
gl.disableVertexAttribArray( progBlur.inPos );
requestAnimationFrame(render);
}
function initScene() {
canvas = document.getElementById( "canvas");
gl = canvas.getContext( "experimental-webgl" );
if ( !gl )
return null;
progDraw = {}
progDraw.prog = ShProg.Create(
[ { source : "draw-shader-vs", stage : gl.VERTEX_SHADER },
{ source : "draw-shader-fs", stage : gl.FRAGMENT_SHADER }
] );
if ( !progDraw.prog )
return null;
progDraw.inPos = gl.getAttribLocation( progDraw.prog, "inPos" );
progDraw.inNV = gl.getAttribLocation( progDraw.prog, "inNV" );
progDraw.inCol = gl.getAttribLocation( progDraw.prog, "inCol" );
progBlur = {}
progBlur.prog = ShProg.Create(
[ { source : "post-shader-vs", stage : gl.VERTEX_SHADER },
{ source : "blur-shader-fs", stage : gl.FRAGMENT_SHADER }
] );
progBlur.inPos = gl.getAttribLocation( progBlur.prog, "inPos" );
if ( !progBlur.prog )
return;
// create cube
var cubePos = [
-1.0, -1.0, 1.0, 1.0, -1.0, 1.0, 1.0, 1.0, 1.0, -1.0, 1.0, 1.0,
-1.0, -1.0, -1.0, 1.0, -1.0, -1.0, 1.0, 1.0, -1.0, -1.0, 1.0, -1.0 ];
var cubeCol = [ 1.0, 0.0, 0.0, 1.0, 0.5, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 1.0 ];
var cubeHlpInx = [ 0, 1, 2, 3, 1, 5, 6, 2, 5, 4, 7, 6, 4, 0, 3, 7, 3, 2, 6, 7, 1, 0, 4, 5 ];
var cubePosData = [];
for ( var i = 0; i < cubeHlpInx.length; ++ i ) {
cubePosData.push( cubePos[cubeHlpInx[i]*3], cubePos[cubeHlpInx[i]*3+1], cubePos[cubeHlpInx[i]*3+2] );
}
var cubeNVData = [];
for ( var i1 = 0; i1 < cubeHlpInx.length; i1 += 4 ) {
var nv = [0, 0, 0];
for ( i2 = 0; i2 < 4; ++ i2 ) {
var i = i1 + i2;
nv[0] += cubePosData[i*3]; nv[1] += cubePosData[i*3+1]; nv[2] += cubePosData[i*3+2];
}
for ( i2 = 0; i2 < 4; ++ i2 )
cubeNVData.push( nv[0], nv[1], nv[2] );
}
var cubeColData = [];
for ( var is = 0; is < 6; ++ is ) {
for ( var ip = 0; ip < 4; ++ ip ) {
cubeColData.push( cubeCol[is*3], cubeCol[is*3+1], cubeCol[is*3+2] );
}
}
var cubeInxData = [];
for ( var i = 0; i < cubeHlpInx.length; i += 4 ) {
cubeInxData.push( i, i+1, i+2, i, i+2, i+3 );
}
bufCube = VertexBuffer.Create(
[ { data : cubePosData, attrSize : 3, attrLoc : progDraw.inPos },
{ data : cubeNVData, attrSize : 3, attrLoc : progDraw.inNV },
{ data : cubeColData, attrSize : 3, attrLoc : progDraw.inCol } ],
cubeInxData );
bufQuad.pos = gl.createBuffer();
gl.bindBuffer( gl.ARRAY_BUFFER, bufQuad.pos );
gl.bufferData( gl.ARRAY_BUFFER, new Float32Array( [ -1.0, -1.0, 1.0, -1.0, 1.0, 1.0, -1.0, 1.0 ] ), gl.STATIC_DRAW );
bufQuad.inx = gl.createBuffer();
gl.bindBuffer( gl.ELEMENT_ARRAY_BUFFER, bufQuad.inx );
gl.bufferData( gl.ELEMENT_ARRAY_BUFFER, new Uint16Array( [ 0, 1, 2, 0, 2, 3 ] ), gl.STATIC_DRAW );
camera = new Camera( [0, 3, 0.0], [0, 0, 0], [0, 0, 1], 90, vp_size, 0.5, 100 );
window.onresize = resize;
resize();
requestAnimationFrame(render);
}
function resize() {
//vp_size = [gl.drawingBufferWidth, gl.drawingBufferHeight];
vp_size = [window.innerWidth, window.innerHeight]
//vp_size = [256, 256]
canvas.width = vp_size[0];
canvas.height = vp_size[1];
var fbsize = Math.max(vp_size[0], vp_size[1]);
fbsize = 1 << 31 - Math.clz32(fbsize); // nearest power of 2
blurFB = [];
for ( var i = 0; i < 2; ++ i ) {
fb = gl.createFramebuffer();
fb.width = fbsize;
fb.height = fbsize;
gl.bindFramebuffer( gl.FRAMEBUFFER, fb );
fb.color0_texture = gl.createTexture();
gl.bindTexture( gl.TEXTURE_2D, fb.color0_texture );
gl.texParameteri( gl.TEXTURE_2D, gl.TEXTURE_MAG_FILTER, gl.NEAREST );
gl.texParameteri( gl.TEXTURE_2D, gl.TEXTURE_MIN_FILTER, gl.NEAREST );
gl.texImage2D( gl.TEXTURE_2D, 0, gl.RGBA, fb.width, fb.height, 0, gl.RGBA, gl.UNSIGNED_BYTE, null );
fb.renderbuffer = gl.createRenderbuffer();
gl.bindRenderbuffer( gl.RENDERBUFFER, fb.renderbuffer );
gl.renderbufferStorage( gl.RENDERBUFFER, gl.DEPTH_COMPONENT16, fb.width, fb.height );
gl.framebufferTexture2D( gl.FRAMEBUFFER, gl.COLOR_ATTACHMENT0, gl.TEXTURE_2D, fb.color0_texture, 0 );
gl.framebufferRenderbuffer( gl.FRAMEBUFFER, gl.DEPTH_ATTACHMENT, gl.RENDERBUFFER, fb.renderbuffer );
gl.bindTexture( gl.TEXTURE_2D, null );
gl.bindRenderbuffer( gl.RENDERBUFFER, null );
gl.bindFramebuffer( gl.FRAMEBUFFER, null );
blurFB.push( fb );
}
}
function Fract( val ) {
return val - Math.trunc( val );
}
function CalcAng( deltaTime, intervall ) {
return Fract( deltaTime / (1000*intervall) ) * 2.0 * Math.PI;
}
function CalcMove( deltaTime, intervall, range ) {
var pos = self.Fract( deltaTime / (1000*intervall) ) * 2.0
var pos = pos < 1.0 ? pos : (2.0-pos)
return range[0] + (range[1] - range[0]) * pos;
}
function EllipticalPosition( a, b, angRag ) {
var a_b = a * a - b * b
var ea = (a_b <= 0) ? 0 : Math.sqrt( a_b );
var eb = (a_b >= 0) ? 0 : Math.sqrt( -a_b );
return [ a * Math.sin( angRag ) - ea, b * Math.cos( angRag ) - eb, 0 ];
}
function IdentM44() {
return [ 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1 ];
};
function RotateAxis(matA, angRad, axis) {
var aMap = [ [1, 2], [2, 0], [0, 1] ];
var a0 = aMap[axis][0], a1 = aMap[axis][1];
var sinAng = Math.sin(angRad), cosAng = Math.cos(angRad);
var matB = matA.slice(0);
for ( var i = 0; i < 3; ++ i ) {
matB[a0*4+i] = matA[a0*4+i] * cosAng + matA[a1*4+i] * sinAng;
matB[a1*4+i] = matA[a0*4+i] * -sinAng + matA[a1*4+i] * cosAng;
}
return matB;
}
function Rotate(matA, angRad, axis) {
var s = Math.sin(angRad), c = Math.cos(angRad);
var x = axis[0], y = axis[1], z = axis[2];
matB = [
x*x*(1-c)+c, x*y*(1-c)-z*s, x*z*(1-c)+y*s, 0,
y*x*(1-c)+z*s, y*y*(1-c)+c, y*z*(1-c)-x*s, 0,
z*x*(1-c)-y*s, z*y*(1-c)+x*s, z*z*(1-c)+c, 0,
0, 0, 0, 1 ];
return Multiply(matA, matB);
}
function Multiply(matA, matB) {
matC = IdentM44();
for (var i0=0; i0<4; ++i0 )
for (var i1=0; i1<4; ++i1 )
matC[i0*4+i1] = matB[i0*4+0] * matA[0*4+i1] + matB[i0*4+1] * matA[1*4+i1] + matB[i0*4+2] * matA[2*4+i1] + matB[i0*4+3] * matA[3*4+i1]
return matC;
}
function Cross( a, b ) { return [ a[1] * b[2] - a[2] * b[1], a[2] * b[0] - a[0] * b[2], a[0] * b[1] - a[1] * b[0], 0.0 ]; }
function Dot( a, b ) { return a[0]*b[0] + a[1]*b[1] + a[2]*b[2]; }
function Normalize( v ) {
var len = Math.sqrt( v[0] * v[0] + v[1] * v[1] + v[2] * v[2] );
return [ v[0] / len, v[1] / len, v[2] / len ];
}
Camera = function( pos, target, up, fov_y, vp, near, far ) {
this.Time = function() { return Date.now(); }
this.pos = pos;
this.target = target;
this.up = up;
this.fov_y = fov_y;
this.vp = vp;
this.near = near;
this.far = far;
this.orbit_mat = this.current_orbit_mat = this.model_mat = this.current_model_mat = IdentM44();
this.mouse_drag = this.auto_spin = false;
this.auto_rotate = true;
this.mouse_start = [0, 0];
this.mouse_drag_axis = [0, 0, 0];
this.mouse_drag_angle = 0;
this.mouse_drag_time = 0;
this.drag_start_T = this.rotate_start_T = this.Time();
this.Ortho = function() {
var fn = this.far + this.near;
var f_n = this.far - this.near;
var w = this.vp[0];
var h = this.vp[1];
return [
2/w, 0, 0, 0,
0, 2/h, 0, 0,
0, 0, -2/f_n, 0,
0, 0, -fn/f_n, 1 ];
};
this.Perspective = function() {
var n = this.near;
var f = this.far;
var fn = f + n;
var f_n = f - n;
var r = this.vp[0] / this.vp[1];
var t = 1 / Math.tan( Math.PI * this.fov_y / 360 );
return [
t/r, 0, 0, 0,
0, t, 0, 0,
0, 0, -fn/f_n, -1,
0, 0, -2*f*n/f_n, 0 ];
};
this.LookAt = function() {
var mz = Normalize( [ this.pos[0]-this.target[0], this.pos[1]-this.target[1], this.pos[2]-this.target[2] ] );
var mx = Normalize( Cross( this.up, mz ) );
var my = Normalize( Cross( mz, mx ) );
var tx = Dot( mx, this.pos );
var ty = Dot( my, this.pos );
var tz = Dot( [-mz[0], -mz[1], -mz[2]], this.pos );
return [mx[0], my[0], mz[0], 0, mx[1], my[1], mz[1], 0, mx[2], my[2], mz[2], 0, tx, ty, tz, 1];
};
this.Orbit = function() {
return Multiply(this.LookAt(), this.OrbitMatrix());
};
this.OrbitMatrix = function() {
return (this.mouse_drag || (this.auto_rotate && this.auto_spin)) ? Multiply(this.current_orbit_mat, this.orbit_mat) : this.orbit_mat;
};
this.AutoModelMatrix = function() {
return this.auto_rotate ? Multiply(this.current_model_mat, this.model_mat) : this.model_mat;
};
this.Update = function(vp_size) {
if (vp_size)
this.vp = vp_size;
var current_T = this.Time();
this.current_model_mat = IdentM44()
if (this.mouse_drag) {
this.current_orbit_mat = Rotate(IdentM44(), this.mouse_drag_angle, this.mouse_drag_axis);
} else if (this.auto_rotate) {
if (this.auto_spin ) {
if (this.mouse_drag_time > 0 ) {
var angle = this.mouse_drag_angle * (current_T - this.rotate_start_T) / this.mouse_drag_time;
this.current_orbit_mat = Rotate(IdentM44(), angle, this.mouse_drag_axis);
}
} else {
var auto_angle_x = Fract( (current_T - this.rotate_start_T) / 13000.0 ) * 2.0 * Math.PI;
var auto_angle_y = Fract( (current_T - this.rotate_start_T) / 17000.0 ) * 2.0 * Math.PI;
this.current_model_mat = RotateAxis( this.current_model_mat, auto_angle_x, 0 );
this.current_model_mat = RotateAxis( this.current_model_mat, auto_angle_y, 1 );
}
}
};
this.ChangeMotionMode = function(drag, spin, auto ) {
var new_drag = drag;
var new_auto = new_drag ? false : auto;
var new_spin = new_auto ? spin : false;
change = this.mouse_drag != new_drag || this.auto_rotate != new_auto || this.auto_spin != new_spin;
if (!change)
return;
if (new_drag && !this.mouse_drag) {
this.drag_start_T = this.Time();
this.mouse_drag_angle = 0.0;
this.mouse_drag_time = 0;
}
if (new_auto && !this.auto_rotate)
this.rotate_start_T = this.Time();
this.mouse_drag = new_drag;
this.auto_rotate = new_auto;
this.auto_spin = new_spin;
this.orbit_mat = Multiply(this.current_orbit_mat, this.orbit_mat);
this.current_orbit_mat = IdentM44();
this.model_mat = Multiply(this.current_model_mat, this.model_mat);
this.current_model_mat = IdentM44();
};
this.OnMouseDown = function( event ) {
var rect = gl.canvas.getBoundingClientRect();
if ( event.clientX < rect.left || event.clientX > rect.right ) return;
if ( event.clientY < rect.top || event.clientY > rect.bottom ) return;
if (event.button == 0) { // left button
this.mouse_start = [event.clientX, event.clientY];
this.ChangeMotionMode( true, false, false );
}
};
this.OnMouseUp = function( event ) {
if (event.button == 0) { // left button
this.ChangeMotionMode( false, true, true );
} else if (event.button == 1) {// middle button
this.ChangeMotionMode( false, false, !this.auto_rotate );
}
};
this.OnMouseMove = function( event ) {
var dx = (event.clientX-this.mouse_start[0]) / this.vp[0];
var dy = (event.clientY-this.mouse_start[1]) / this.vp[1];
var len = Math.sqrt(dx*dx + dy*dy);
if (this.mouse_drag && len > 0) {
this.mouse_drag_angle = Math.PI*len;
this.mouse_drag_axis = [dy/len, 0, -dx/len];
this.mouse_drag_time = this.Time() - this.drag_start_T;
}
};
this.domElement = document;
var cam = this;
//this.domElement.addEventListener( 'contextmenu', function(e) { event.preventDefault(); }, false );
this.domElement.addEventListener( 'mousedown', function(e) { cam.OnMouseDown(e) }, false );
this.domElement.addEventListener( 'mouseup', function(e) { cam.OnMouseUp(e) }, false );
this.domElement.addEventListener( 'mousemove', function(e) { cam.OnMouseMove(e) }, false );
//this.domElement.addEventListener( 'mousewheel', hid_events.onMouseWheel, false );
//this.domElement.addEventListener( 'DOMMouseScroll', hid_events.onMouseWheel, false ); // firefox
}
var ShProg = {};
ShProg.Create = function( shaderList ) {
var shaderObjs = [];
for ( var i_sh = 0; i_sh < shaderList.length; ++ i_sh ) {
var shderObj = this.Compile( shaderList[i_sh].source, shaderList[i_sh].stage );
if ( shderObj == 0 )
return 0;
shaderObjs.push( shderObj );
}
var progObj = this.Link( shaderObjs )
if ( progObj != 0 ) {
progObj.attrInx = {};
var noOfAttributes = gl.getProgramParameter( progObj, gl.ACTIVE_ATTRIBUTES );
for ( var i_n = 0; i_n < noOfAttributes; ++ i_n ) {
var name = gl.getActiveAttrib( progObj, i_n ).name;
progObj.attrInx[name] = gl.getAttribLocation( progObj, name );
}
progObj.uniLoc = {};
var noOfUniforms = gl.getProgramParameter( progObj, gl.ACTIVE_UNIFORMS );
for ( var i_n = 0; i_n < noOfUniforms; ++ i_n ) {
var name = gl.getActiveUniform( progObj, i_n ).name;
progObj.uniLoc[name] = gl.getUniformLocation( progObj, name );
}
}
return progObj;
}
ShProg.AttrI = function( progObj, name ) { return progObj.attrInx[name]; }
ShProg.UniformL = function( progObj, name ) { return progObj.uniLoc[name]; }
ShProg.Use = function( progObj ) { gl.useProgram( progObj ); }
ShProg.SetI1 = function( progObj, name, val ) { if(progObj.uniLoc[name]) gl.uniform1i( progObj.uniLoc[name], val ); }
ShProg.SetF1 = function( progObj, name, val ) { if(progObj.uniLoc[name]) gl.uniform1f( progObj.uniLoc[name], val ); }
ShProg.SetF2 = function( progObj, name, arr ) { if(progObj.uniLoc[name]) gl.uniform2fv( progObj.uniLoc[name], arr ); }
ShProg.SetF3 = function( progObj, name, arr ) { if(progObj.uniLoc[name]) gl.uniform3fv( progObj.uniLoc[name], arr ); }
ShProg.SetF4 = function( progObj, name, arr ) { if(progObj.uniLoc[name]) gl.uniform4fv( progObj.uniLoc[name], arr ); }
ShProg.SetM33 = function( progObj, name, mat ) { if(progObj.uniLoc[name]) gl.uniformMatrix3fv( progObj.uniLoc[name], false, mat ); }
ShProg.SetM44 = function( progObj, name, mat ) { if(progObj.uniLoc[name]) gl.uniformMatrix4fv( progObj.uniLoc[name], false, mat ); }
ShProg.Compile = function( source, shaderStage ) {
var shaderScript = document.getElementById(source);
if (shaderScript)
source = shaderScript.text;
var shaderObj = gl.createShader( shaderStage );
gl.shaderSource( shaderObj, source );
gl.compileShader( shaderObj );
var status = gl.getShaderParameter( shaderObj, gl.COMPILE_STATUS );
if ( !status ) alert(gl.getShaderInfoLog(shaderObj));
return status ? shaderObj : null;
}
ShProg.Link = function( shaderObjs ) {
var prog = gl.createProgram();
for ( var i_sh = 0; i_sh < shaderObjs.length; ++ i_sh )
gl.attachShader( prog, shaderObjs[i_sh] );
gl.linkProgram( prog );
status = gl.getProgramParameter( prog, gl.LINK_STATUS );
if ( !status ) alert("Could not initialise shaders");
gl.useProgram( null );
return status ? prog : null;
}
var VertexBuffer = {
Create: function(attribs, indices) {
var buffer = { buf: [], attr: [], inx: gl.createBuffer(), inxLen: indices.length };
for (var i=0; i<attribs.length; ++i) {
buffer.buf.push(gl.createBuffer());
buffer.attr.push({ size : attribs[i].attrSize, loc : attribs[i].attrLoc });
gl.bindBuffer(gl.ARRAY_BUFFER, buffer.buf[i]);
gl.bufferData(gl.ARRAY_BUFFER, new Float32Array( attribs[i].data ), gl.STATIC_DRAW);
}
gl.bindBuffer(gl.ELEMENT_ARRAY_BUFFER, buffer.inx);
gl.bufferData(gl.ELEMENT_ARRAY_BUFFER, new Uint16Array( indices ), gl.STATIC_DRAW);
gl.bindBuffer(gl.ARRAY_BUFFER, null);
gl.bindBuffer(gl.ELEMENT_ARRAY_BUFFER, null);
return buffer;
},
Draw: function(bufObj) {
for (var i=0; i<bufObj.buf.length; ++i) {
gl.bindBuffer(gl.ARRAY_BUFFER, bufObj.buf[i]);
gl.vertexAttribPointer(bufObj.attr[i].loc, bufObj.attr[i].size, gl.FLOAT, false, 0, 0);
gl.enableVertexAttribArray( bufObj.attr[i].loc);
}
gl.bindBuffer(gl.ELEMENT_ARRAY_BUFFER, bufObj.inx);
gl.drawElements(bufObj.primitve_type ? bufObj.primitve_type : gl.TRIANGLES, bufObj.inxLen, gl.UNSIGNED_SHORT, 0);
for (var i=0; i<bufObj.buf.length; ++i)
gl.disableVertexAttribArray(bufObj.attr[i].loc);
gl.bindBuffer( gl.ARRAY_BUFFER, null );
gl.bindBuffer( gl.ELEMENT_ARRAY_BUFFER, null );
} };
initScene();
})();
html,body { margin: 0; overflow: hidden; }
#gui { position : absolute; top : 0; left : 0; }
<script id="draw-shader-vs" type="x-shader/x-vertex">
precision mediump float;
attribute vec3 inPos;
attribute vec3 inNV;
attribute vec3 inCol;
varying vec3 vertPos;
varying vec3 vertNV;
varying vec3 vertCol;
uniform mat4 u_projectionMat44;
uniform mat4 u_modelViewMat44;
void main()
{
vertNV = mat3( u_modelViewMat44 ) * normalize( inNV );
vertCol = inCol;
vec4 pos = u_modelViewMat44 * vec4( inPos, 1.0 );
vertPos = pos.xyz / pos.w;
gl_Position = u_projectionMat44 * pos;
}
</script>
<script id="draw-shader-fs" type="x-shader/x-fragment">
precision mediump float;
varying vec3 vertPos;
varying vec3 vertNV;
varying vec3 vertCol;
uniform float u_shininess;
void main()
{
vec3 color = vertCol;
vec3 normalV = normalize( vertNV );
vec3 eyeV = normalize( -vertPos );
vec3 halfV = normalize( eyeV + normalV );
float NdotH = max( 0.0, dot( normalV, halfV ) );
float shineFac = ( u_shininess + 2.0 ) * pow( NdotH, u_shininess ) / ( 2.0 * 3.14159265 );
gl_FragColor = vec4( color.rgb * (0.2 + NdotH), 1.0 );
}
</script>
<script id="post-shader-vs" type="x-shader/x-vertex">
precision mediump float;
attribute vec2 inPos;
varying vec2 pos;
void main()
{
pos = inPos;
gl_Position = vec4( inPos, 0.0, 1.0 );
}
</script>
<script id="blur-shader-fs" type="x-shader/x-fragment">
precision mediump float;
varying vec2 pos;
uniform sampler2D u_texture;
uniform vec2 u_textureSize;
uniform float u_sigma;
uniform float u_radius;
uniform vec2 u_dir;
float CalcGauss( float x, float sigma )
{
if ( sigma <= 0.0 )
return 0.0;
return exp( -(x*x) / (2.0 * sigma) ) / (2.0 * 3.14157 * sigma);
}
void main()
{
vec2 texC = pos.st * 0.5 + 0.5;
vec4 texCol = texture2D( u_texture, texC );
vec4 gaussCol = vec4( texCol.rgb, 1.0 );
vec2 step = u_dir / u_textureSize;
for ( int i = 1; i <= 32; ++ i )
{
float weight = CalcGauss( float(i) / 32.0, u_sigma * 0.5 );
if ( weight < 1.0/255.0 )
break;
texCol = texture2D( u_texture, texC + u_radius * step * float(i) );
gaussCol += vec4( texCol.rgb * weight, weight );
texCol = texture2D( u_texture, texC - u_radius * step * float(i) );
gaussCol += vec4( texCol.rgb * weight, weight );
}
gaussCol.rgb = clamp( gaussCol.rgb / gaussCol.w, 0.0, 1.0 );
gl_FragColor = vec4( gaussCol.rgb, 1.0 );
}
</script>
<div>
<form id="gui" name="inputs">
<table>
<tr> <td> <font color= #CCF>radius</font> </td>
<td> <input type="range" id="radius" min="1" max="1000" value="350" onchange="changeEventHandler(event);"/></td> </tr>
<tr> <td> <font color= #CCF>blur</font> </td>
<td> <input type="range" id="sigma" min="1" max="100" value="5" onchange="changeEventHandler(event);"/></td> </tr>
</table>
</form>
</div>
<canvas id="canvas" style="border: none;"></canvas>