Is it possible to draw a Rect with rounded corners using a DrawNode object?
I think that something is possible using Bezier curves, but I have made some tries and I think I can't handle it.
Looking at API I've found only these 2 functions:
drawQuadBezier (const Vec2 &origin, const Vec2 &control, const Vec2 &destination, unsigned int segments, const Color4F &color)
drawCubicBezier (const Vec2 &origin, const Vec2 &control1, const Vec2 &control2, const Vec2 &destination, unsigned int segments, const Color4F &color)
[Modified after answer]
I have applied the answer in Cocos2dx, maybe somebody find this useful:
(just done some casting to int if you don't need high precision)
auto MagicConst = 0.552;
auto position = 150;
auto R = 50;
Vec2 TopLeft = Vec2(position, position + R * 2);
Vec2 TopRight = Vec2(position + R * 2, position + R * 2);
Vec2 BottomRight = Vec2(position + R * 2, position);
Vec2 BottomLeft = Vec2(position, position);
Vec2 originTL = Vec2(TopLeft.x, TopLeft.y - R);
Vec2 originTR = Vec2(TopRight.x - R, TopRight.y);
Vec2 originBR = Vec2(BottomRight.x - R, BottomRight.y);
Vec2 originBL = Vec2(BottomLeft.x, BottomLeft.y + R);
Vec2 control1TL = Vec2(TopLeft.x, (int) (TopLeft.y - R * (1 - MagicConst)));
Vec2 control1TR = Vec2((int) (TopRight.x - R * (1 - MagicConst)), TopRight.y);
Vec2 control1BR = Vec2((int) (BottomRight.x - R * (1 - MagicConst)), BottomRight.y);
Vec2 control1BL = Vec2(BottomLeft.x, (int) (BottomLeft.y + R * (1 - MagicConst)));
Vec2 control2TL = Vec2((int) (TopLeft.x + R * (1 - MagicConst)), TopLeft.y);
Vec2 control2TR = Vec2(TopRight.x, (int) (TopRight.y - R * (1 - MagicConst)));
Vec2 control2BR = Vec2(BottomRight.x, (int) (BottomRight.y + R * (1 - MagicConst)));
Vec2 control2BL = Vec2((int) (BottomLeft.x + R * (1 - MagicConst)), BottomLeft.y);
Vec2 destinationTL = Vec2(TopLeft.x + R, TopLeft.y);
Vec2 destinationTR = Vec2(TopRight.x, TopRight.y - R);
Vec2 destinationBR = Vec2(BottomRight.x, BottomRight.y + R);
Vec2 destinationBL = Vec2(BottomLeft.x + R, BottomLeft.y);
auto roundCorner = DrawNode::create();
roundCorner->drawCubicBezier(originTL, control1TL, control2TL, destinationTL, 10, Color4F::RED);
roundCorner->drawCubicBezier(originTR, control1TR, control2TR, destinationTR, 10, Color4F::GREEN);
roundCorner->drawCubicBezier(originBR, control1BR, control2BR, destinationBR, 10, Color4F::YELLOW);
roundCorner->drawCubicBezier(originBL, control1BL, control2BL, destinationBL, 10, Color4F::WHITE);
addChild(roundCorner);
This will produce: http://i.stack.imgur.com/mdEOM.png
Now you can change MagicConst to round the corners as you want.
For example with MagicConst = 0.9: http://i.stack.imgur.com/9V5cr.png
That is the result I wanted! ;) (thank you #Mbo)
(I can't post embedded image yet) :P
It is possible to calculate cubic Bezier curve that approximates a quarter of circle to make round corner.
Example for top left corner of axis-aligned rectangle (point TopLeft) and arc radius R:
Edit: Changed some -/+ signs
MagicConst = 0.552
Bezier.origin.X = ToplLeft.X
Bezier.origin.Y = ToplLeft.Y + R
Bezier.control1.X = ToplLeft.X
Bezier.control1.Y = ToplLeft.Y + R * (1-MagicConst)
Bezier.control2.X = ToplLeft.X + R * (1-MagicConst)
Bezier.control2.Y = ToplLeft.Y
Bezier.destination.X = ToplLeft.X + R
Bezier.destination.Y = ToplLeft.Y
about MagicConstant
You can easily find similar symmetric coordinates for other corners.
I did not consider case of extreme short rectangle edges (<2*R)
Related
I'm trying to create an underwater filter by utilizing shaders in SparkAR.
My filter looks like intended in SparkAR, but not at all when tested in Instagram.
Here is a comparison, SparkAR on the left, Instagram on the right:
I thought it had something to do with the resolution, so I tried everything there already: upscaling, calculating the UVs by using getModelViewProjectionMatrix() instead of getRenderTargetSize(), etc.
Nothing worked, so I hope someone here has experienced something similar and can help me out!
Here is the shader code used:
#ifdef GL_ES
precision mediump float;
#endif
float length2(vec2 p) { return dot(p, p); }
float noise(vec2 p){
return fract(sin(fract(sin(p.x) * (4231.13311)) + p.y) * 3131.0011);
}
float worley(vec2 p) {
float d = 1e30;
for (int xo = -1; xo <= 1; ++xo) {
for (int yo = -1; yo <= 1; ++yo) {
vec2 tp = floor(p) + vec2(xo, yo);
d = min(d, length2(p - tp - vec2(noise(tp))));
}
}
return 3.0*exp(-4.0*abs(2.0*d - 1.0));
}
float fworley(vec2 p) {
float time = fragment(std::getTime());
return sqrt(sqrt(sqrt(
1.6 * // light
worley(p*32. + 4.3 + time*.125) *
sqrt(worley(p * 64. + 5.3 + time * -0.0625)) *
sqrt(sqrt(worley(p * -100. + 9.3))))));
}
void main(out vec4 Position, out vec4 Color) {
Position = std::getModelViewProjectionMatrix() * std::getVertexPosition();
vec2 scaling = vec2(1., 1.);
float time = fragment(std::getTime());
vec2 vertCoord = fragment(std::getVertexTexCoord());
vec2 resolution = fragment(std::getRenderTargetSize());
vec2 uv = floor(resolution * vertCoord) / resolution;
vec2 xDifference = vec2(2.0 * (sin(time / 2.0) / 2.0) - 1.5, 0.8);
float t = fworley(uv * resolution / (900.0 * scaling)) / 2.;
t *= exp(-length2(abs(1.0* (uv + xDifference) * - 1.0)));
t += fworley(uv * resolution / (450.0 * scaling)) / 2.;
xDifference = vec2(2.0 * (sin(time / 2.0) / 2.0) - 0.75, 0.7);
t *= exp(-length2(abs(1.0* (uv + xDifference) * - 1.0))) * 0.5;
t += fworley(uv * resolution / (300.0 * scaling)) / 3.;
xDifference = vec2(2.0 * (sin(time / 2.0) / 3.0) - 0.5, 0.6);
t *= exp(-length2(abs(1.0* (uv + xDifference) * - 1.0)));
Color = vec4((t+0.05) * vec3(0.3, 1.5*t, 0.3 + pow(t, 1.0-t)), 1.3);
}
I also already checked if I used something that is not supported by GLSL 1.x as that is the basis for SparkSL. But that isn't the case.
You need to read the article : https://sparkar.facebook.com/ar-studio/learn/sparksl/cross-device-shader-sparksl#opengl
In short, the issue is precision of floats.
And never use:
fract(sin(fract(sin(p.x) * (4231.13311)) + p.y) * 3131.0011);
I have the following shader rendering voxels using raycasting:
#version 460
#extension GL_ARB_separate_shader_objects : enable
#pragma optionNV(unroll all)
layout(binding = 3, std140) uniform compVarsOb {
float time;
float phiA;
float thetaA;
vec3 camPos;
float fov;
int voxWidth;
int voxHeight;
int voxDepth;
} cvo;
layout(binding = 2, rgba8) uniform writeonly image2D img;
float hash3(vec2 xy){
xy = mod(xy, .19);
float h = dot(xy.yyx, vec3(.013, 27.15, 2027.3));
h *= h;
h *= fract(h);
return fract(h);
}
//layout(binding = 4) uniform sampler3D voxels;
layout(binding = 4, std140) buffer vData{
vec4 voxels[];
};
float greaterThan(float a, float b){
float d = a - b;
return (1. + (d / abs(d)))/2.;
}
float lesserThan(float a, float b){
float d = a - b;
return (1. - (d / abs(d)))/2.;
}
float withinBounds(ivec3 li){
vec3 l = vec3(li);
return greaterThan(l.x, 0.) * lesserThan(l.x, cvo.voxWidth) * greaterThan(l.y, 0.) * lesserThan(l.y, cvo.voxHeight) * greaterThan(l.z, 0.) * lesserThan(l.z, cvo.voxDepth);
}
vec4 quaternionMult(vec4 a, vec4 b){
return vec4(a.x * b.x - dot(a.yzw, b.yzw), a.x*b.yzw + b.x*a.yzw + cross(a.yzw, b.yzw));
}
void main()
{
vec2 iResolution = vec2(2560., 1440.);
vec2 fragCoord = gl_GlobalInvocationID.xy;
ivec2 fragI = ivec2(gl_GlobalInvocationID.xy);
vec2 iMouse = vec2(.5);
vec2 uv = fragCoord/iResolution.xy;
ivec2 uvI = ivec2(uv);
vec2 muv = iMouse.xy / iResolution.xy;
float iTime = cvo.time;
vec3 col = vec3(0.);
float screenRatio = iResolution.y / iResolution.x;
//Setting up the ray directions and other information about the point and camera
//##############################################################################
//camera direction angles phi (xy plane) and theta (xz plane)
float phi = cvo.phiA;//radians(360. * (1. - muv.x));
float theta = cvo.thetaA;//radians(180. * (1. - muv.y));
//get the camera direction as the basis for the rotation (each ray direction is a rotation of the camera direciton vector)
//it is in quarternion form here so its a vec4 instead of a vec3
vec4 camD = vec4(0., cos(phi) * sin(theta), sin(phi) * sin(theta), cos(theta));
float rad90 = radians(90.);
float fov = cvo.fov;
float xAng = radians(fov * (.5 - uv.x));
//replace "fov" with "(fov + (110. * pow(.5 - uv.x, 2.)))" below to add a counteractment to the fisheye lens effect
//it basically counteracts the artifact with quaternions that happens when you rotate by a large angle on one axis then try to rotate on another axis perpendicular, it just rotates around it thus making the new direction lesser
float yAng = radians(fov * screenRatio * (uv.y - .5));
//get the axes that the quarternions should be based around (perpendicular to the camera plane or dv)
vec3 xRotAxis = vec3(cos(phi) * sin(theta - rad90), sin(phi) * sin(theta - rad90), cos(theta - rad90));
vec3 yRotAxis = cross(xRotAxis, camD.yzw);//vec3(cos(phi - rad90) * sin(theta), sin(phi - rad90) * sin(theta), cos(theta));
//get the quarternions of the ray direction rotations
vec4 xQuat = vec4(cos(xAng / 2.), xRotAxis * sin(xAng / 2.));
vec4 yQuat = vec4(cos(yAng / 2.), yRotAxis * sin(yAng / 2.));
//combine the rotations
vec4 compQuat = quaternionMult(yQuat, xQuat);
//get the conjugate of the compQuart
vec4 conjComp = vec4(compQuat.x, -compQuat.yzw);
//ray direction
vec3 rayD = normalize(quaternionMult(quaternionMult(compQuat, camD), conjComp).yzw);
//camera location
vec3 cam = cvo.camPos;//vec3(cos(iTime), 0., 0.);
//point location and radius
//vec3 p = vec3(0., (5. * iTime) + 1., 0.);
float pr = .00001;
//############################################
//hit = 1. means that nothing has been hit or everything has been completely transparent
float hit = 1.;
vec3 locf = vec3(0.);
ivec3 loc = ivec3(0);
int locI = 0;
vec4 v = vec4(0.);
for(int i = 0; i < 20; i++){
locf = vec3((i * rayD * .4) + cam);
loc = ivec3(locf);
//adjust loc for the buffer indexing
locI = loc.x + loc.y * cvo.voxWidth + loc.z * cvo.voxWidth * cvo.voxHeight;
//vec4 v = texelFetch(voxels, loc, 0);//;imageLoad(voxels, loc);//texelFetch(voxels, ivec3((i * rayD) + cam), 0);
v = voxels[locI];// * withinBounds(loc);
if(locf.x < 0. || locf.x > cvo.voxWidth || locf.y < 0. || locf.y > cvo.voxHeight || locf.z < 0. || locf.z > cvo.voxDepth){
v = vec4(0.);
}
col += v.xyz * hit * v.w;
hit -= v.w;
if(hit <= 0.){
//col = vec3(v.w / 5.);
break;
}
}
//col = imageLoad(voxels, ivec3(uvI, 1)).xyz;//texelFetch(voxels, ivec3(fragI / 10, 1), 0).xyz;
//col = vec3(phi / radians(180.));
//col = texture(iChannel0, uv).xyz;
//col = voxels[(fragI.x / 10) + (fragI.y / 10) * cvo.voxWidth].xyz;
//col = vec3(rayD.z);
imageStore(img, fragI, vec4(col,1.0));
}
It produces this:
The problem is when I change the loop (end of main()) for the amount of voxels I want to iterate over to more than 2 (its at 20 right now), the fps absolutely tanks. Yet I feel that my GPU is capable of way more than 2 iterations of a not so demanding loop, so I'm not sure what is going on.
I am running on an RTX 2060 Super which here is said to be capable of 7.81 * 10^12 FLOPS. If I'm understanding it correctly, this means that if I want to run a compute shader at 144 fps at 1440p, I would be allowed a total of (7.81 * 10^12)/(144 * 2560 * 1440) FLOPS in my shader. That comes out to about 14712 FLOPS in the compute shader which is way more than I have in my compute shader right now, yet my code only runs at an average of 30 fps when the loop is at 20 iterations. I can only get 144+ fps when I cut the loop down to 1 or 2 iterations (which at that point is basically like not having the loop at all). Are loops just horribly unoptimized for compute shaders? Where am I going wrong?
The flops GPU is capable of is usually calculated given perfect conditions with perfect code. That is, every computation is a Fused Multiply Add, is able to start right after the previous one with data already in cache/registers, and every single core is working. Achieving such conditions is the problem of writing code on GPUs.
GPUs normally create multiple threads per core in order to reduce penalty from having to wait for memory accesses. Caches are used to work with large bandwith usage, but they are relatively small and they require memory locality(data being stored physically close) for effective usage. Some GPUs have a separate texture cache, abusing which might be wise. Drivers also try to store textures and images in an efficient way, at least by making pixels close by - also close in memory, but potentially using some special hardware.
In compute shaders, threads are created in large blocks and then assigned to Compute Units (terminology differs between vendors, basically sort of a meta-core, with own cache and a bunch of cores). The size of block a is defined in shader with layout(local_size_x = X, local_size_y = Y, local_size_z = Z) in;, where total number of threads equals to the multiple of the dimensions. Threads within a block can communicate with shared variables (they are usually stored within same space as l1 cache) and synchronise with barrier() and alike. GPUs have multiple compute units, and to make them work multiple blocks should be launched(the values in vkCmdDispatch() are the number of groups launched).
Threads are also implicitly grouped in SIMD-like groups (nVidia calls it SIMT - single instruction multiple threads). Every thread in one such group performs the same instruction(but there are some differences with newer nVidia cards). In case where an if() or for(), while(), etc. makes only some threads execute a portion of code, part of cores are disabled, potentially wasting performance. Usually they have size of 32 or 64(AMD), so thread blocks should be created as a multiple of that number. Some functionalities of such groups are exposed with subgroup extensions.
Since you are working with voxels, i'd imagine that using 3D textures or images instead of a buffer may be good for caches. Figuring out how to make threads cooperate and make use of the fast shared memory should also be a good idea.
So i'm making a raytracer in OpenGL, fully shader based, and i'm struggling to know where the problem is with my Shadow rays. If i multiply the radiance of the object by the shadowRay output, it seems like only the "edge" of the sphere is lighten up
I verified multiple times the code without finding where the problem comes from.
This is what i get:
vec3 TraceShadowRay(vec3 hitPoint, vec3 normal, Object objects[3])
{
Light pointLight;
pointLight.position = vec3(0, 80, 0);
pointLight.intensity = 2;
Ray ShadowRay;
ShadowRay.origin = hitPoint + normal * 1e-4;
ShadowRay.dir = normalize(pointLight.position - ShadowRay.origin);
ShadowRay.t = 100000;
//ShadowRay.dir = vec3(0, 1, 0);
for(int i = 0; i < 3; ++i)
{
if(objects[i].type == 0)
{
if(interectSphere(objects[i].position, objects[i].radius, ShadowRay))
{
return vec3(0);
}
}
if(objects[i].type == 1)
{
if(intersectPlane(objects[i].normal, objects[i].position, ShadowRay))
{
return vec3(0);
}
}
}
float AngleNormalShadow = dot(ShadowRay.dir, normal);
clamp(AngleNormalShadow, 0, 1);
return GetLight(ShadowRay.origin, pointLight);// * AngleNormalShadow;
}
The getLight function:
vec3 GetLight(vec3 origin, Light light)
{
return vec3(1, 1, 1) * light.intensity;
//float dist = sqrt( ((origin.x - light.position.x) * (origin.x - light.position.x)) + ((origin.y - light.position.y) * (origin.y - light.position.y)));
//return (vec3(1, 1, 1) * light.intensity) / (4 * M_PI * ((origin - light.position).length * (origin - light.position).length));
}
The intersectSphere function:
bool interectSphere(const vec3 center, float radius, inout Ray r)
{
vec3 o = r.origin;
vec3 d = r.dir;
vec3 v = o - center;
float b = 2 * dot(v, d);
float c = dot(v, v) - radius*radius;
float delta = b*b - 4 * c;
if(delta < 1e-4)
return false;
float t1 = (-b - sqrt(delta))/2;
float t2 = (-b + sqrt(delta))/2;
if(t1 < t2)
{
r.t = t1;
r.t2 = t2;
}
else if(t2 < t1)
{
r.t = t2;
r.t2 = t1;
}
r.reflectionNormal = normalize((r.origin + r.dir * r.t) - center);
return true;
}
The result expected is a nice shaded sphere with light coming from the top of the spheres
Could it be a missing negation? Looks like interectSphere() returns true when there is a collision, but the calling code in TraceShadowRay() bails out when it returns true.
old:
if(interectSphere(objects[i].position, objects[i].radius, ShadowRay))
{
return vec3(0);
}
new:
if(!interectSphere(objects[i].position, objects[i].radius, ShadowRay))
{
return vec3(0);
}
The repository (GitHub)
I'm having issues with my diffuse shading for my models (It does not arise when rendering primitives.
What's interesting here to note is I think that when you look at the left reflective sphere, the shading appears normal (I might be wrong on this, basing on observation).
Low poly bunny and a triangle
Low poly bunny and 2 reflective spheres
Cube and 2 reflective spheres
I'm not sure what I'm doing wrong, as the normals are calculated each time I create a triangle in the constructor. I am using tinyobjloader to load my models, here is the TriangleMesh intersection algorithm.
FPType TriangleMesh::GetIntersection(const Ray &ray)
{
for(auto &shape : shapes)
{
size_t index_offset = 0;
for(size_t f = 0; f < shape.mesh.num_face_vertices.size(); ++f) // faces (triangles)
{
int fv = shape.mesh.num_face_vertices[f];
tinyobj::index_t &idx0 = shape.mesh.indices[index_offset + 0]; // v0
tinyobj::index_t &idx1 = shape.mesh.indices[index_offset + 1]; // v1
tinyobj::index_t &idx2 = shape.mesh.indices[index_offset + 2]; // v2
Vec3d &v0 = Vec3d(attrib.vertices[3 * idx0.vertex_index + 0], attrib.vertices[3 * idx0.vertex_index + 1], attrib.vertices[3 * idx0.vertex_index + 2]);
Vec3d &v1 = Vec3d(attrib.vertices[3 * idx1.vertex_index + 0], attrib.vertices[3 * idx1.vertex_index + 1], attrib.vertices[3 * idx1.vertex_index + 2]);
Vec3d &v2 = Vec3d(attrib.vertices[3 * idx2.vertex_index + 0], attrib.vertices[3 * idx2.vertex_index + 1], attrib.vertices[3 * idx2.vertex_index + 2]);
Triangle tri(v0, v1, v2);
if(tri.GetIntersection(ray))
return tri.GetIntersection(ray);
index_offset += fv;
}
}
}
The Triangle Intersection algorithm.
FPType Triangle::GetIntersection(const Ray &ray)
{
Vector3d v0v1 = v1 - v0;
Vector3d v0v2 = v2 - v0;
Vector3d pvec = ray.GetDirection().Cross(v0v2);
FPType det = v0v1.Dot(pvec);
// ray and triangle are parallel if det is close to 0
if(abs(det) < BIAS)
return false;
FPType invDet = 1 / det;
FPType u, v;
Vector3d tvec = ray.GetOrigin() - v0;
u = tvec.Dot(pvec) * invDet;
if(u < 0 || u > 1)
return false;
Vector3d qvec = tvec.Cross(v0v1);
v = ray.GetDirection().Dot(qvec) * invDet;
if(v < 0 || u + v > 1)
return false;
FPType t = v0v2.Dot(qvec) * invDet;
if(t < BIAS)
return false;
return t;
}
I think this is because when I'm handling all my object intersections, the triangle mesh is regarded as 1 object only, so it only returns 1 normal, when I'm trying to get the object normals: code
Color Trace(const Vector3d &origin, const Vector3d &direction, const std::vector<std::shared_ptr<Object>> &sceneObjects, const int indexOfClosestObject,
const std::vector<std::shared_ptr<Light>> &lightSources, const int &depth = 0)
{
if(indexOfClosestObject != -1 && depth <= DEPTH) // not checking depth for infinite mirror effect (not a lot of overhead)
{
std::shared_ptr<Object> sceneObject = sceneObjects[indexOfClosestObject];
Vector3d normal = sceneObject->GetNormalAt(origin);
screenshot of debug
EDIT: I have solved the issue and now shading works properly: https://github.com/MrCappuccino/Tracey/blob/testing/src/TriangleMesh.cpp#L35-L48
If you iterate all your faces and return on the first face you hit, it may happen that you hit faces which are behind other faces and therefore not really the face you want to hit, so you would have to measure the length of your ray and return the intersection for the shortest ray.
I've recently started experimenting with noise (simple perlin noise), and have run into a slight problem with animating it. So far come I've across an awesome looking 3d noise (https://github.com/ashima/webgl-noise) that I could use in my project but that I understood nothing of, and a bunch of tutorials that explain how to create simple 2d noise.
For the 2d noise, I originally used the following fragment shader:
uniform sampler2D al_tex;
varying vec4 varying_pos; //Actual coords
varying vec2 varying_texcoord; //Normalized coords
uniform float time;
float rand(vec2 co) { return fract(sin(dot(co, vec2(12.9898, 78.233))) * 43758.5453); }
float ease(float p) { return 3*p*p - 2*p*p*p; }
float cnoise(vec2 p, int wavelength)
{
int ix1 = (int(varying_pos.x) / wavelength) * wavelength;
int iy1 = (int(varying_pos.y) / wavelength) * wavelength;
int ix2 = (int(varying_pos.x) / wavelength) * wavelength + wavelength;
int iy2 = (int(varying_pos.y) / wavelength) * wavelength + wavelength;
float x1 = ix1 / 1280.0f;
float y1 = iy1 / 720.0f;
float x2 = ix2 / 1280.0f;
float y2 = iy2 / 720.0f;
float xOffset = (varying_pos.x - ix1) / wavelength;
float yOffset = (varying_pos.y - iy1) / wavelength;
xOffset = ease(xOffset);
yOffset = ease(yOffset);
float t1 = rand(vec2(x1, y1));
float t2 = rand(vec2(x2, y1));
float t3 = rand(vec2(x2, y2));
float t4 = rand(vec2(x1, y2));
float tt1 = mix(t1, t2, xOffset);
float tt2 = mix(t4, t3, xOffset);
return mix(tt1, tt2, yOffset);
}
void main()
{
float t = 0;
int minFreq = 0;
int noIterations = 8;
for (int i = 0; i < noIterations; i++)
t += cnoise(varying_texcoord, int(pow(2, i + minFreq))) / pow(2, noIterations - i);
gl_FragColor = vec4(vec3(t), 1);
}
The result that I got was this:
Now, I want to animate it with time. My first thought was to change the rand function to take a vec3 instead of vec2, and then change my cnoise function accordingly, to interpolate values in the z direction too. With that goal in mind, I made this:
sampler2D al_tex;
varying vec4 varying_pos;
varying vec2 varying_texcoord;
uniform float time;
float rand(vec3 co) { return fract(sin(dot(co, vec3(12.9898, 78.2332, 58.5065))) * 43758.5453); }
float ease(float p) { return 3*p*p - 2*p*p*p; }
float cnoise(vec3 pos, int wavelength)
{
ivec3 iPos1 = (ivec3(pos) / wavelength) * wavelength; //The first value that I'll sample to interpolate
ivec3 iPos2 = iPos1 + wavelength; //The second value
vec3 transPercent = (pos - iPos1) / wavelength; //Transition percent - A float in [0-1) indicating how much of each of the above values will contribute to final result
transPercent.x = ease(transPercent.x);
transPercent.y = ease(transPercent.y);
transPercent.z = ease(transPercent.z);
float t1 = rand(vec3(iPos1.x, iPos1.y, iPos1.z));
float t2 = rand(vec3(iPos2.x, iPos1.y, iPos1.z));
float t3 = rand(vec3(iPos2.x, iPos2.y, iPos1.z));
float t4 = rand(vec3(iPos1.x, iPos2.y, iPos1.z));
float t5 = rand(vec3(iPos1.x, iPos1.y, iPos2.z));
float t6 = rand(vec3(iPos2.x, iPos1.y, iPos2.z));
float t7 = rand(vec3(iPos2.x, iPos2.y, iPos2.z));
float t8 = rand(vec3(iPos1.x, iPos2.y, iPos2.z));
float tt1 = mix(t1, t2, transPercent.x);
float tt2 = mix(t4, t3, transPercent.x);
float tt3 = mix(t5, t6, transPercent.x);
float tt4 = mix(t8, t7, transPercent.x);
float tt5 = mix(tt1, tt2, transPercent.y);
float tt6 = mix(tt3, tt4, transPercent.y);
return mix(tt5, tt6, transPercent.z);
}
float fbm(vec3 p)
{
float t = 0;
int noIterations = 8;
for (int i = 0; i < noIterations; i++)
t += cnoise(p, int(pow(2, i))) / pow(2, noIterations - i);
return t;
}
void main()
{
vec3 p = vec3(varying_pos.xy, time);
float t = fbm(p);
gl_FragColor = vec4(vec3(t), 1);
}
However, on doing this, the animation feels... strange. It's as though I'm watching a slideshow of perlin noise slides, with the individual slides fading in. All other perlin noise examples that I have tried (like https://github.com/ashima/webgl-noise) are actually animated with time - you can actually see it being animated, and don't just feel like the images are fading in, and not being actually animated. I know that I could just use the webgl-noise shader, but I want to make one for myself, and for some reason, I'm failing miserably. Could anyone tell me where I am going wrong, or suggest me on how I can actually animate it properly with time?
You should proably include z in the sin function:
float rand(vec3 co) { return fract(sin(dot(co.xy ,vec2(12.9898,78.233)) + co.z) * 43758.5453); }
Apparently the somewhat random numbers are prime numbers. This is to avoid patterns in the noise. I found another prime number, 94418953, and included that in the sin/dot function. Try this:
float rand(vec3 co) { return fract(sin(dot(co.xyz ,vec3(12.9898,78.233, 9441.8953))) * 43758.5453); }
EDIT: You don't take into account wavelength on the z axis. This means that all your iterations will have the same interpolation distance. In other words, you will get the fade effect you're describing. Try calculating z the same way you calculate x and y:
int iz1 = (int(p.z) / wavelength) * wavelength;
int iz2 = (int(p.z) / wavelength) * wavelength + wavelength;
float z1 = iz1 / 720.0f;
float z2 = iz2 / 720.0f;
float zOffset = (varying_pos.z - iz1) / wavelength;
This means however that the z value will variate the same rate that y will. So if you want it to scale from 0 to 1 then you should proably multiply z with 720 before passing it into the noise function.
check this code. it's a simple version of 3d noise:
// Here are some easy to understand noise gens... the D line in cubic interpolation (rounding)
function rndng ( n: float ): float
{//random proportion -1, 1 ... many people use Sin to take
//linearity out of a pseudo random, exp n*n is faster on central processor.
var e = ( n *321.9234)%1;
return (e*e*111.07546)%2-1;
}
function lerps(o:float, v:float, alpha:float):float
{
o += ( v - o ) * alpha;
return o;
}
//3d ----------------
function lnz ( vtx: Vector3 ): float //3d perlin noise code fast
{
vtx= Vector3 ( Mathf.Abs(vtx.x) , Mathf.Abs(vtx.y) , Mathf.Abs(vtx.z) ) ;
var I = Vector3 (Mathf.Floor(vtx.x),Mathf.Floor(vtx.y),Mathf.Floor(vtx.z));
var D = Vector3(vtx.x%1,vtx.y%1,vtx.z%1);
D = Vector3(D.x*D.x*(3.0-2.0*D.x),D.y*D.y*(3.0-2.0*D.y),D.z*D.z*(3.0-2.0*D.z));
var W = I.x + I.y*71.0 + 125.0*I.z;
return lerps(
lerps( lerps(rndng(W+0.0),rndng(W+1.0),D.x) , lerps(rndng(W+71.0),rndng(W+72.0),D.x) , D.y)
,
lerps( lerps(rndng(W+125.0),rndng(W+126.0),D.x) , lerps(rndng(W+153.0),rndng(W+154.0),D.x) , D.y)
,
D.z
);
}
//1d ----------------
function lnzo ( vtx: Vector3 ): float //perlin noise, same as unityfunction version
{
var total = 0.0;
for (var i:int = 1; i < 5; i ++)
{
total+= lnz2(Vector3 (vtx.x*(i*i),0.0,vtx.z*(i*i)))/(i*i);
}
return total*5;
}
//2d 3 axis honeycombe noise ----------------
function lnzh ( vtx: Vector3 ): float // perlin noise, 2d, with 3 axes at 60'instead of 2 x y axes
{
vtx= Vector3 ( Mathf.Abs(vtx.z) , Mathf.Abs(vtx.z*.5-vtx.x*.866) , Mathf.Abs(vtx.z*.5+vtx.x*.866) ) ;
var I = Vector3 (Mathf.Floor(vtx.x),Mathf.Floor(vtx.y),Mathf.Floor(vtx.z));
var D = Vector3(vtx.x%1,vtx.y%1,vtx.z%1);
//D = Vector3(D.x*D.x*(3.0-2.0*D.x),D.y*D.y*(3.0-2.0*D.y),D.z*D.z*(3.0-2.0*D.z));
var W = I.x + I.y*71.0 + 125.0*I.z;
return lerps(
lerps( lerps(rndng(W+0.0),rndng(W+1.0),D.x) , lerps(rndng(W+71.0),rndng(W+72.0),D.x) , D.y)
,
lerps( lerps(rndng(W+125.0),rndng(W+126.0),D.x) , lerps(rndng(W+153.0),rndng(W+154.0),D.x) , D.y)
,
D.z
);
}
//2d ----------------
function lnz2 ( vtx: Vector3 ): float // i think this is 2d perlin noise
{
vtx= Vector3 ( Mathf.Abs(vtx.x) , Mathf.Abs(vtx.y) , Mathf.Abs(vtx.z) ) ;
var I = Vector3 (Mathf.Floor(vtx.x),Mathf.Floor(vtx.y),Mathf.Floor(vtx.z));
var D = Vector3(vtx.x%1,vtx.y%1,vtx.z%1);
D = Vector3(D.x*D.x*(3.0-2.0*D.x),D.y*D.y*(3.0-2.0*D.y),D.z*D.z*(3.0-2.0*D.z));
var W = I.x + I.y*71.0 + 125.0*I.z;
return lerps(
lerps( lerps(rndng(W+0.0),rndng(W+1.0),D.x) , lerps(rndng(W+71.0),rndng(W+72.0),D.x) , D.z)
,
lerps( rndng(W+125.0), rndng(W+126.0),D.x)
,
D.z
);
}