Vertex Shader Perlin Noise, Normals (Again....)

I keep coming back to this one, and still haven’t found a solution that works on my system.

The problem is this:
I’d like to create a sphere, radially distorted by perlin-type noise, with working normals for lighting, using GLSL. I’ve tried lots of different methods, but none seem to work.

My latest attempt is based on the venerable Vertex Noise shader from NVIDIA

The GLSL vertex-shader code is a simplified version of an original NVIDIA HLSL example.


/******************************************************************************
File:  vnoise.glsl

Copyright NVIDIA Corporation 2002
TO THE MAXIMUM EXTENT PERMITTED BY APPLICABLE LAW, THIS SOFTWARE IS PROVIDED
*AS IS* AND NVIDIA AND ITS SUPPLIERS DISCLAIM ALL WARRANTIES, EITHER EXPRESS
OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, IMPLIED WARRANTIES OF MERCHANTABILITY
AND FITNESS FOR A PARTICULAR PURPOSE.  IN NO EVENT SHALL NVIDIA OR ITS SUPPLIERS
BE LIABLE FOR ANY SPECIAL, INCIDENTAL, INDIRECT, OR CONSEQUENTIAL DAMAGES
WHATSOEVER (INCLUDING, WITHOUT LIMITATION, DAMAGES FOR LOSS OF BUSINESS PROFITS,
BUSINESS INTERRUPTION, LOSS OF BUSINESS INFORMATION, OR ANY OTHER PECUNIARY LOSS)
ARISING OUT OF THE USE OF OR INABILITY TO USE THIS SOFTWARE, EVEN IF NVIDIA HAS
BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES.


Comments:

sgreen 5/02/02:

This is based on Perlin's original code:
    http://mrl.nyu.edu/~perlin/doc/oscar.html

    It combines the permutation and gradient tables into one array of
    vec4's to conserve constant memory.
    The table is duplicated twice to avoid modulo operations.

[email]jallen@nvidia.com[/email]: 10/12/03:

    GLSL version of Cg vertex noise shader

Notes:

    Should use separate tables for 1, 2 and 3D versions

******************************************************************************/

#define B  32      // table size
#define B2 66      // B*2 + 2
#define BR 0.03125 // 1 / B

// this is the smoothstep function f(t) = 3t^2 - 2t^3, without the normalization
vec3 s_curve(vec3 t)
{
    return t*t*( vec3(3.0, 3.0, 3.0) - vec3(2.0, 2.0, 2.0)*t);
}

vec2 s_curve(vec2 t)
{
    return t*t*( vec2(3.0, 3.0) - vec2(2.0, 2.0)*t);
}

float s_curve(float t)
{
    return t*t*(3.0-2.0*t);
}

// 3D version
float noise(vec3 v, vec4 pg[])
{
    v = v + vec3(10000.0, 10000.0, 10000.0);   // hack to avoid negative numbers

    vec3 i = fract(v * BR) * float(B);   // index between 0 and B-1
    vec3 f = fract(v);            // fractional position

    // lookup in permutation table
    vec2 p;
    p.x = pg[ int(i[0])     ].w;
    p.y = pg[ int(i[0]) + 1 ].w;
    p = p + i[1];

    vec4 b;
    b.x = pg[ int(p[0]) ].w;
    b.y = pg[ int(p[1]) ].w;
    b.z = pg[ int(p[0]) + 1 ].w;
    b.w = pg[ int(p[1]) + 1 ].w;
    b = b + i[2];

    // compute dot products between gradients and vectors
    vec4 r;
    r[0] = dot( pg[ int(b[0]) ].xyz, f );
    r[1] = dot( pg[ int(b[1]) ].xyz, f - vec3(1.0, 0.0, 0.0) );
    r[2] = dot( pg[ int(b[2]) ].xyz, f - vec3(0.0, 1.0, 0.0) );
    r[3] = dot( pg[ int(b[3]) ].xyz, f - vec3(1.0, 1.0, 0.0) );

    vec4 r1;
    r1[0] = dot( pg[ int(b[0]) + 1 ].xyz, f - vec3(0.0, 0.0, 1.0) );
    r1[1] = dot( pg[ int(b[1]) + 1 ].xyz, f - vec3(1.0, 0.0, 1.0) );
    r1[2] = dot( pg[ int(b[2]) + 1 ].xyz, f - vec3(0.0, 1.0, 1.0) );
    r1[3] = dot( pg[ int(b[3]) + 1 ].xyz, f - vec3(1.0, 1.0, 1.0) );

    // interpolate
    f = s_curve(f);
    r = mix( r, r1, f[2] );
    r = mix( r.xyyy, r.zwww, f[1] );
    return mix( r.x, r.y, f[0] );
}

// 2D version
float noise(vec2 v, vec4 pg[])
{
    v = v + vec2(10000.0, 10000.0);

    vec2 i = fract(v * BR) * float(B);   // index between 0 and B-1
    vec2 f = fract(v);            // fractional position

    // lookup in permutation table
    vec2 p;
    p[0] = pg[ int(i[0])   ].w;
    p[1] = pg[ int(i[0]) + 1 ].w;
    p = p + i[1];

    // compute dot products between gradients and vectors
    vec4 r;
    r[0] = dot( pg[ int(p[0]) ].xy,   f);
    r[1] = dot( pg[ int(p[1]) ].xy,   f - vec2(1.0, 0.0) );
    r[2] = dot( pg[ int(p[0]) + 1 ].xy, f - vec2(0.0, 1.0) );
    r[3] = dot( pg[ int(p[1]) + 1 ].xy, f - vec2(1.0, 1.0) );

    // interpolate
    f = s_curve(f);
    r = mix( r.xyyy, r.zwww, f[1] );
    return mix( r.x, r.y, f[0] );
}

// 1D version
float noise(float v, vec4 pg[])
{
    v = v + 10000.0;

    float i = fract(v * BR) * float(B);   // index between 0 and B-1
    float f = fract(v);            // fractional position

    // compute dot products between gradients and vectors
    vec2 r;
    r[0] = pg[int(i)].x * f;
    r[1] = pg[int(i) + 1].x * (f - 1.0);

    // interpolate
    f = s_curve(f);
    return mix( r[0], r[1], f);
}

uniform float Displacement;
uniform vec4 pg[B2];            // permutation/gradient table

void main()
{
    // Noise Table
	vec4 pg[B2];
	nTab[0]=vec4(-0.569811,0.432591,-0.698699,0.0);	nTab[1]=vec4(0.78118,0.163006,0.60265,1.0);
	nTab[2]=vec4(0.436394,-0.297978,0.848982,2.0);		nTab[3]=vec4(0.843762,-0.185742,-0.503554,3.0);
	nTab[4]=vec4(0.663712,-0.68443,-0.301731,4.0);		nTab[5]=vec4(0.616757,0.768825,0.168875,5.0);
	nTab[6]=vec4(0.457153,-0.884439,-0.093694,6.0);	nTab[7]=vec4(-0.956955,0.110962,-0.268189,7.0);
	nTab[8]=vec4(0.115821,0.77523,0.620971,8.0);		nTab[9]=vec4(-0.716028,-0.477247,-0.50945,9.0);
	nTab[10]=vec4(0.819593,-0.123834,0.559404,10.0);	nTab[11]=vec4(-0.522782,-0.586534,0.618609,11.0);
	nTab[12]=vec4(-0.792328,-0.577495,-0.196765,12.0);	nTab[13]=vec4(-0.674422,0.0572986,0.736119,13.0);
	nTab[14]=vec4(-0.224769,-0.764775,-0.60382,14.0);	nTab[15]=vec4(0.492662,-0.71614,0.494396,15.0);
	nTab[16]=vec4(0.470993,-0.645816,0.600905,16.0);	nTab[17]=vec4(-0.19049,0.321113,0.927685,17.0);
	nTab[18]=vec4(0.0122118,0.946426,-0.32269,18.0);	nTab[19]=vec4(0.577419,0.408182,0.707089,19.0);
	nTab[20]=vec4(-0.0945428,0.341843,-0.934989,20.0);	nTab[21]=vec4(0.788332,-0.60845,-0.0912217,21.0);
	nTab[22]=vec4(-0.346889,0.894997,-0.280445,22.0);	nTab[23]=vec4(-0.165907,-0.649857,0.741728,23.0);
	nTab[24]=vec4(0.791885,0.124138,0.597919,24.0);	nTab[25]=vec4(-0.625952,0.73148,0.270409,25.0);	nTab[26]=vec4(-0.556306,0.580363,0.594729,26.0);	nTab[27]=vec4(0.673523,0.719805,0.168069,27.0);
	nTab[28]=vec4(-0.420334,0.894265,0.153656,28.0);	nTab[29]=vec4(-0.141622,-0.279389,0.949676,29.0);
	nTab[30]=vec4(-0.803343,0.458278,0.380291,30.0);	nTab[31]=vec4(0.49355,-0.402088,0.77119,31.0);
	nTab[32]=vec4(-0.569811,0.432591,-0.698699,0.0);	nTab[33]=vec4(0.78118,0.163006,0.60265,1.0);
	nTab[34]=vec4(0.436394,-0.297978,0.848982,2.0);	nTab[35]=vec4(0.843762,-0.185742,-0.503554,3.0);
	nTab[36]=vec4(0.663712,-0.68443,-0.301731,4.0);	nTab[37]=vec4(0.616757,0.768825,0.168875,5.0);
	nTab[38]=vec4(0.457153,-0.884439,-0.093694,6.0);	nTab[39]=vec4(-0.956955,0.110962,-0.268189,7.0);
	nTab[40]=vec4(0.115821,0.77523,0.620971,8.0);		nTab[41]=vec4(-0.716028,-0.477247,-0.50945,9.0); 
	nTab[42]=vec4(0.819593,-0.123834,0.559404,10.0);	nTab[43]=vec4(-0.522782,-0.586534,0.618609,11.0); 
	nTab[44]=vec4(-0.792328,-0.577495,-0.196765,12.0);	nTab[45]=vec4(-0.674422,0.0572986,0.736119,13.0); 
	nTab[46]=vec4(-0.224769,-0.764775,-0.60382,14.0);	nTab[47]=vec4(0.492662,-0.71614,0.494396,15.0); 
	nTab[48]=vec4(0.470993,-0.645816,0.600905,16.0);	nTab[49]=vec4(-0.19049,0.321113,0.927685,17.0); 
	nTab[50]=vec4(0.0122118,0.946426,-0.32269,18.0);	nTab[51]=vec4(0.577419,0.408182,0.707089,19.0); 
	nTab[52]=vec4(-0.0945428,0.341843,-0.934989,20.0);	nTab[53]=vec4(0.788332,-0.60845,-0.0912217,21.0); 
	nTab[54]=vec4(-0.346889,0.894997,-0.280445,22.0);	nTab[55]=vec4(-0.165907,-0.649857,0.741728,23.0); 
	nTab[56]=vec4(0.791885,0.124138,0.597919,24.0);	nTab[57]=vec4(-0.625952,0.73148,0.270409,25.0); 
	nTab[58]=vec4(-0.556306,0.580363,0.594729,26.0);	nTab[59]=vec4(0.673523,0.719805,0.168069,27.0);
	nTab[60]=vec4(-0.420334,0.894265,0.153656,28.0);	nTab[61]=vec4(-0.141622,-0.279389,0.949676,29.0); 
	nTab[62]=vec4(-0.803343,0.458278,0.380291,30.0);	nTab[63]=vec4(0.49355,-0.402088,0.77119,31.0); 
	nTab[64]=vec4(-0.569811,0.432591,-0.698699,0.0);	nTab[65]=vec4(0.78118,0.163006,0.60265,1.0);
	
    
    
    vec4 noisePos = gl_TextureMatrix[0] * gl_Vertex;

    float i = (noise(noisePos.xyz, pg) + 1.0) * 0.5;
    gl_FrontColor = vec4(i, i, i, 1.0);

    // displacement along normal
    vec4 position = gl_Vertex + (vec4(gl_Normal, 1.0) * i * Displacement);
    position.w = 1.0;

    gl_Position = gl_ModelViewProjectionMatrix * position;
}

I’ve modified the code so the noise-table is declared in the main loop of the Vertex program, rather than passed-in as a uniform.

The idea is that each vertex’s XYZ position is used to generate a noise value, which is then used to move each vertex of a spherical mesh along its normal to create a modulated form.

Now, this seems to run quite fast, but profiling suggests it’s falling-back to software rendering after a few seconds on my system (MacBook Pro, MacOS 10.5.7, ATI Radeon X1600 GPU with 256mb).

So my first question is; is there any way of preventing this, and is there anything obvious in the code that would prevent GPU execution, that could be modified to allow it?

The second question is: how would I generate normals for lighting the mesh?

I’ve tried using the ‘neighbour’ technique to get tangent and bitangent points, but to do this, I have to start off with a flat grid of vertices, and wrap it around into a sphere using the standard parametric sphere formula, then apply the noise to the sphere point. Then I have to do the whole thing twice more with tiny offsets on the U and V axes to get the tangent/bitangent. Whenever I try to implement this method I get a crash, even though I am able to do both discrete tasks on their own (ie modulate a sphere with perlin noise via the above method, and create a spherical mesh from a flat plane mesh).

Is there some way I could get usable normals without having to start with a flat mesh, eliminating the sphere function?

I had vague thoughts about somehow exploiting the fact that the normal for a sphere is just the normalized XYZ coordinates of each vertex in object-space, but I don’t know how.

Sorry to keep going on about this one, I’m sure you’re all sick of me bringing it up over and over. It’s just I find it really annoying that several people have been able to get this working using DirectX and HLSL (and working fast, too, on the GPU), and I just don’t seem to be able to get it to work in GLSL.

I’m not invested in this particular Perlin implementation; I’d be perfectly happy with any other means of doing this kind of thing
http://www.vimeo.com/5074567
but with normals, and lighting, at a decent framerate, would be great!

Incidentally, I’d also be happy with a 2D, rather than 3D noise effect, as I like the symmetry of the 2D method (as in the video clip), though the option also of 3D noise would be cool.

Anyone any ideas?

Thanks a lot guys,

a|x
http://machinesdontcare.wordpress.com

Well, I thought I’d try a slightly different approach: I’m using Vertex Texture Fetch to lookup permutation values from a pre-calculated permutation texture, so the setup is basically similar to my original attempt using the NVIDIA vBomb shader, but simplified a little because of the 2D texture lookup replacing the large array (which I assumed was what was causing the crashes in the previous version).

I’ve got it kind-of working now, with the code below. However, there seems to be a problem with my normal-estimation.

Here’s the code:

Vertex Shader:

////////////////////////
//  2D Perlin Noise   //
////////////////////////

uniform sampler2D permTexture;
uniform vec2 nScale;
uniform vec2 nOffset;
uniform float dAmt;

/*
	To create offsets of one texel and one half texel in the
	texture lookup, we need to know the texture image size.
*/

const float permTexUnit = 1.0 / 256.0;
const float permTexUnitHalf = 0.5 / 256.0;

float fade(in float t) {
	return t*t*t*(t*(t*6.0-15.0)+10.0);
}

float pnoise2(in vec2 P)
{
	// Integer part, scaled and offset for texture lookup
	vec2 Pi = permTexUnit*floor(P)+permTexUnitHalf;
	// Fractional part for interpolation
	vec2 Pf = fract(P);
	
	// Noise contribution from lower left corner
	vec2 grad00 = texture2D(permTexture, Pi).rg * 4.0 - 1.0;
	float n00 = dot(grad00, Pf);
	
	// Noise contribution from lower right corner
	vec2 grad10 = texture2D(permTexture, Pi + vec2(permTexUnit, 0.0)).rg * 4.0 - 1.0;
	float n10 = dot(grad10, Pf - vec2(1.0, 0.0));
	
	// Noise contribution from upper left corner
	vec2 grad01 = texture2D(permTexture, Pi + vec2(0.0, permTexUnit)).rg * 4.0 - 1.0;
	float n01 = dot(grad01, Pf - vec2(0.0, 1.0));
	
	// Noise contribution from upper right corner
	vec2 grad11 = texture2D(permTexture, Pi + vec2(permTexUnit, permTexUnit)).rg * 4.0 - 1.0;
	float n11 = dot(grad11, Pf - vec2(1.0, 1.0));
	
	// Blend contributions along x
	vec2 n_x = mix(vec2(n00, n01), vec2(n10, n11), fade(Pf.x));
	
	// Blend contributions along y
	float n_xy = mix(n_x.x, n_x.y, fade(Pf.y));
	
	// We're done, return the final noise value.
	return n_xy;
}

//////////////////////
//  Sphere Function //
//////////////////////

uniform float sRadius;
const float PI = 3.14159265359;
const float TWOPI = 6.28318530718;

vec3 sphere(in vec2 uv) {
	uv[0] *= PI;
	uv[1] *= TWOPI;
	vec3 sPoint;
	sPoint.x = cos(uv[0]) * cos(uv[1]);
	sPoint.y = sin(uv[0]) * cos(uv[1]);
	sPoint.z = sin(uv[1]);
	return sPoint;
}

/////////////////////////////////////
// Calculate Noise-Modulated Point //
/////////////////////////////////////

vec3 vNoisePoint(in vec2 p) {
	vec3 pSphere = sphere(p.xy);				// Create sphere
	vec2 nVal = pSphere.xy * nScale + nOffset;	// Values for noise
	float noise = pnoise2(nVal);				// Noise value
	noise = 0.5 * noise + 0.5;					// Scale and offset to 0 > 1 range
	vec3 sNorm = normalize(pSphere);				// Fake normal
	
	// Return XYZ coordinates of point moved along fake sphere normal
	return (pSphere + (sNorm * noise * dAmt)) * sRadius;
}

///////////////////////
// Lighting & Normal //
///////////////////////

varying vec3 lightVec, view, normal;
const vec2 gridX = vec2(0.001,0.0);
const vec2 gridY = vec2(0.0,0.001);

void velvetVS(in vec3 p, in vec3 np) {
	// Calculate normal by 'neighbour' technique
	vec3 tangent	 = vNoisePoint(p.xy + gridX) - np;
	vec3 bitangent = vNoisePoint(p.xy + gridY) - np;
	normal = normalize(cross(tangent, bitangent));
	normal = gl_NormalMatrix * normal;
	normal = (normal.z < 0.0) ? -normal : normal;
	
	// Other varying to FS
	vec3 lPos = gl_LightSource[0].position.xyz;
	vec4 po = vec4(p,1.0);
	vec3 pw = (gl_ModelViewMatrix * po).xyz;
	lightVec = normalize(lPos - pw);
	vec3 eyePos = vec3(0.0,0.0,1.0);  // Quartz Composer shifts eye-position from OpenGL default vec3(0.0)
	view = normalize(eyePos - pw);
}

///////////////////
//   Main Loop   //
///////////////////

void main()
{
	vec4 vert = gl_Vertex;
	
	// Noise vertex
	vec4 nVert = vec4(vNoisePoint(vert.xy),1.0);
	
	velvetVS(vert.xyz, nVert.xyz);
			
	// Transform vertex by modelview and projection matrices
	gl_Position = gl_ModelViewProjectionMatrix * nVert;
	
	// Forward texture coordinates after applying texture matrix
	gl_TexCoord[0] = gl_TextureMatrix[0] * gl_MultiTexCoord0;
}

Fragment Shader:

uniform vec4 lDiffColor;	// Diffuse Color (0.5, 0.5, 0.5, 1.0)
uniform vec4 lSpecColor;	// Specular Color (0.7, 0.7, 0.75, 1.0)
uniform vec4 lSubColor;		// Under-Color (0.2, 0.2, 1.0, 1.0)
uniform float lRollOff;		// Edge Rolloff 0.0 > 1.0

varying vec3 lightVec, view, normal;

vec4 velvetFS() {
	vec3 Ln = normalize(lightVec);
	vec3 Nn = normalize(normal);
	vec3 Vn = normalize(view);
	vec3 Hn = normalize(Vn + Ln);
	float ldn = dot(Ln,Nn);
	float diffComp = max(0.0,ldn);
	vec3 diffContrib = vec3(diffComp * lDiffColor);
	float subLamb = smoothstep(-lRollOff,1.0,ldn) - smoothstep(0.0,1.0,ldn);
	subLamb = max(0.0,subLamb);
	vec3 subContrib = vec3(subLamb * lSubColor);
	float vdn = 1.0-dot(Vn,Nn);
	vec4 vecColor = vec4(vec3(vdn),1.0);
	vec4 DiffuseContrib = vec4((subContrib+diffContrib).xyz,1.0);
	vec4 SpecularContrib = vec4((vecColor*lSpecColor).xyz,1.0);
	return DiffuseContrib + SpecularContrib;
}

///////////////////
//   Main Loop   //
///////////////////

void main()
{
	//
	gl_FragColor = velvetFS();
}

And here’s a screenshot of the result:

As you can see, there are discontinuities in the normals around the mesh. I have a nasty suspicion this is caused by rounding errors in the builtin GLSL sin() and cos() functions used in the parametric sphere function, but this may not be the root of the problem. Is there any way of fixing this while still using the current normal-estimation technique? Alternatively, is there a better way of calculating normals, maybe working-out the perlin derivatives directly (no idea how I’d go about this, sadly). Also, would it be possible to somehow cut out the sphere function, and use a sphere primitive as a base for the noise, and still somehow calculate working normals? I guess this would make the whole thing more efficient.

Frustratingly, I’ve seen normals calculated using this technique and a similar vertex noise shader in HLSL, and it seems to work flawlessly. What am I doing wrong?

I’m a relative newbie to shader-coding, so any advice on how to proceed very gratefully accepted.

Thanks again,

a|x
http://machinesdontcare.wordpress.com

Incidentally, having done some more tests, I don’t think it’s the perlin noise that is at fault here. If I take noise out of the shader completely, so I’m just creating a spherical mesh from a flat grid using the parametric sphere function, and attempt to estimate normals in the same way, I get the same problem.

Here’s a couple of examples of the lighting effect (based on the Velvet shader, from the NVIDIA Shader Library, incidentally)

And here I’m simply returning the normals after transformation by the gl_NormalMatrix

You can still clearly see the seam, along with a weird error at the top of the mesh.

I think I’ve also eliminated the lighting equation itself from my enquiries.
If I use a standard sphere primitive, and it’s normals, and don’t do any vertex-displacement, I don’t get the problem.

I’m beginning to suspect rounding-errors in the builtin GLSL sin() and cos() functions may be to blame. Does this sound likely? Or it could be an inherent problem with this method of normal-estimation. In either case, is there any way of getting around the problem, or calculating normals in some other way, and, if possible, getting rid of the need for the sphere function and displacing a spherical mesh directly while still getting useable normals?

Cheers,

a|x

OK, I got this semi working, but this time using a standard Phong Directional lighting model. At least the normal-calculation part is working.

Here is my shader code as it stands:

/// VERTEX SHADER

/*
	2D Perlin-Noise in the vertex shader, based originally on
	vBomb.fx HLSL vertex noise shader, from the NVIDIA Shader Library.
	http://developer.download.nvidia.com/shaderlibrary/webpages/shader_library.html#vbomb
	
	Original Perlin function substituted for Stefan Gustavson's
	texture-lookup-based Perlin implementation.
	
	Quartz Composer setup
	toneburst 2009
	http://machinesdontcare.wordpress.com
*/

////////////////////////
//  2D Perlin Noise   //
////////////////////////

/*
	2D Perlin-Noise from example by Stefan Gustavson, found at
	http://staffwww.itn.liu.se/~stegu/simplexnoise/
*/

uniform sampler2D permTexture;			// Permutation texture
const float permTexUnit = 1.0/256.0;		// Perm texture texel-size
const float permTexUnitHalf = 0.5/256.0;	// Half perm texture texel-size

float fade(in float t) {
	return t*t*t*(t*(t*6.0-15.0)+10.0);
}

float pnoise2D(in vec2 p)
{
	// Integer part, scaled and offset for texture lookup
	vec2 pi = permTexUnit*floor(p) + permTexUnitHalf;
	// Fractional part for interpolation
	vec2 pf = fract(p);
	
	// Noise contribution from lower left corner
	vec2 grad00 = texture2D(permTexture, pi).rg * 4.0 - 1.0;
	float n00 = dot(grad00, pf);
	
	// Noise contribution from lower right corner
	vec2 grad10 = texture2D(permTexture, pi + vec2(permTexUnit, 0.0)).rg * 4.0 - 1.0;
	float n10 = dot(grad10, pf - vec2(1.0, 0.0));
	
	// Noise contribution from upper left corner
	vec2 grad01 = texture2D(permTexture, pi + vec2(0.0, permTexUnit)).rg * 4.0 - 1.0;
	float n01 = dot(grad01, pf - vec2(0.0, 1.0));
	
	// Noise contribution from upper right corner
	vec2 grad11 = texture2D(permTexture, pi + vec2(permTexUnit, permTexUnit)).rg * 4.0 - 1.0;
	float n11 = dot(grad11, pf - vec2(1.0, 1.0));
	
	// Blend contributions along x
	vec2 n_x = mix(vec2(n00, n01), vec2(n10, n11), fade(pf.x));
	
	// Blend contributions along y
	float n_xy = mix(n_x.x, n_x.y, fade(pf.y));
	
	// We're done, return the final noise value.
	return n_xy;
}

/////////////////////
// Sphere Function //
/////////////////////

const float PI = 3.14159265;
const float TWOPI = 6.28318531;
uniform float BaseRadius;

vec4 sphere(in float u, in float v) {
	u *= PI;
	v *= TWOPI;
	vec4 pSphere;
	pSphere.x = BaseRadius * cos(v) * sin(u);
	pSphere.y = BaseRadius * sin(v) * sin(u);
	pSphere.z = BaseRadius * cos(u);
	pSphere.w = 1.0;
	return pSphere;
}

///////////////////////////
// Apply 2D Perlin Noise //
///////////////////////////

uniform vec3 NoiseScale;	// Noise scale, 0.01 > 8
uniform float Sharpness;	// Displacement 'sharpness', 0.1 > 5
uniform float Displacement;	// Displcement amount, 0 > 2
uniform float Speed;		// Displacement rate, 0.01 > 1
uniform float Timer;		// Feed incrementing value, infinite

vec4 perlinSphere(in float u, in float v) {
	vec4 sPoint = sphere(u, v);
	// The rest of this function is mainly from vBomb shader from NVIDIA Shader Library
	vec4 noisePos = vec4(NoiseScale.xyz,1.0) * (sPoint + (Speed * Timer));
	float noise = (pnoise2D(noisePos.xy) + 1.0) * 0.5;;
	float ni = pow(abs(noise),Sharpness) - 0.25;
	vec4 nn = vec4(normalize(sPoint.xyz),0.0);
	return (sPoint - (nn * (ni-0.5) * Displacement));
}

////////////////////////////////
// Calculate Position, Normal //
////////////////////////////////

const float grid = 0.01;	// Grid offset for normal-estimation
varying vec3 norm;			// Normal

vec4 posNorm(in float u, in float v) {
	// Vertex position
	vec4 vPosition = perlinSphere(u, v);
	// Estimate normal by 'neighbour' technique
	// with thanks to tonfilm
	vec3 tangent = (perlinSphere(u + grid, v) - vPosition).xyz;
	vec3 bitangent = (perlinSphere(u, v + grid) - vPosition).xyz;
	norm = gl_NormalMatrix * normalize(cross(tangent, bitangent));
	// Return vertex position
	return vPosition;
}

//////////////////////////
// Phong Directional VS //
//////////////////////////

// -- Lighting varyings (to Fragment Shader)
varying vec3 lightDir0, halfVector0;
varying vec4 diffuse0, ambient;

void phongDir_VS() {
	// Extract values from gl light parameters
	// and set varyings for Fragment Shader
	lightDir0 = normalize(vec3(gl_LightSource[0].position));
	halfVector0 = normalize(gl_LightSource[0].halfVector.xyz);
	diffuse0 = gl_FrontMaterial.diffuse * gl_LightSource[0].diffuse;
	ambient =  gl_FrontMaterial.ambient * gl_LightSource[0].ambient;
	ambient += gl_LightModel.ambient * gl_FrontMaterial.ambient;
}

///////////////
// Main Loop //
///////////////

uniform vec2 PreScale, PreTranslate;	// Mesh pre-transform

void main()
{
	vec2 uv = gl_Vertex.xy;
	// Offset XY mesh coords to 0 > 1 range
	uv += 0.5;
	
	// Pre-scale and transform mesh
	uv *= PreScale;
	uv += PreTranslate;
	
	// Calculate new vertex position and normal
	vec4 spherePos = posNorm(uv[0], uv[1]);
	
	// Calculate lighting varyings to be passed to fragment shader
	phongDir_VS();
	
	// Transform new vertex position by modelview and projection matrices
	gl_Position = gl_ModelViewProjectionMatrix * spherePos;
	
	// Forward current texture coordinates after applying texture matrix
	gl_TexCoord[0] = gl_TextureMatrix[0] * gl_MultiTexCoord0;
}


/// FRAGMENT SHADER

/*
	Generic Fragment Shader
	with Phong Directional lighting
*/

//////////////////////////
// Phong Directional FS //
//////////////////////////

// -- Lighting varyings (from Vertex Shader)
varying vec3 norm, lightDir0, halfVector0;
varying vec4 diffuse0, ambient;

vec4 phongDir_FS()
{
	vec3 halfV;
	float NdotL, NdotHV;
	
	// The ambient term will always be present
	vec4 color = ambient;
	
	// compute the dot product between normal and ldir
	NdotL = max(dot(norm, lightDir0),0.0);
	
	if (NdotL > 0.0) {
		color += diffuse0 * NdotL;
		halfV = normalize(halfVector0);
		NdotHV = max(dot(norm, halfV), 0.0);
		color +=	gl_FrontMaterial.specular * 
				gl_LightSource[0].specular * 
				pow(NdotHV, gl_FrontMaterial.shininess);
	}	
	return color;
}

///////////////
// Main Loop //
///////////////

void main()
{
	// Call lighting function and return result
	gl_FragColor = phongDir_FS();
}

Which seems to work OK, after I realised I had the U and V parameters to the Sphere function swapped.

I’ve also done a 3D noise variant.

Here’s a clip of of the 2D version in action:

I’d still love to know if there is any way of making the whole thing more efficient, by eliminating the need to create the base sphere mesh from a flat grid, and using a sphere primitive instead, so if anyone has any ideas on if this is possible, and any tips on how to do it, if so, I’d be really glad to hear them.

Thanks guys,

a|x
http://machinesdontcare.wordpress.com

Maybe you could use a sphere primitive with an additional texturemap containing the result from your sphere() function. (something like a normalmap)
And do the normal generation completely in the fragment shader.

This way you could possibly use an icosahedron sphere (also known as isosphere or polyshere) and get rid of the “poles”.

… just my ideas without delving into the matter too deeply …
:o

Hi def,

thanks for getting back to me.
I like the isosphere idea.
I’m not quite sure what you mean about the additional texture though. Do you mean I should essentially make a displacement map, with normals baked into the map? I can see how I could use RGB channels for the normal, and embed a displacement value in the alpha channel. I’ve tried using this approach before, however, using a normal-map created dynamically and a sobel filter to generate normals, but have never managed to apply the normals created this way to a radially-displaced sphere. I think this is partly to do with the way the normalmap is itself distorted by the stretched texture coordinates.

This is as close as I got to making this technique work:
http://vimeo.com/4969964

  • not very, as you can see.

Do I need per-vertex tangent vectors to get this to work properly?

What would your approach be to getting this to work? Or is this not really what you had in mind?

Sorry to fire back lots of questions at you- I’m relatively new to all this stuff, I’m afraid, so any hints are very gratefully received.

Cheers,

a|x

Ok, I think you can substitute the sphere function with another texture lookup into a prerendered RGB texture containing the sphere function.
This can be used to calculate the normals.
To displace the vertex positions use a standard sphere or isosphere primitive.
So you need two different PerlinSphere functions, one with the sphere texture lookup as sPoint for normal generation, and one with gl_Vertex.xyz as sPoint for gl_Position.

The texture lookup will give you a good speedup AND you can use standard primitives.
Not sure if the resolution of the texture will influence the smoothness, since linear interpolation between neigboring data values would be for free.(GL_LINEAR filtering)

This sounds like it should work… :o

Let me know.

Hi def,

thanks for getting back to me again.

interesting…
Sorry to be a bit slow, but when you say

This can be used to calculate the normals

do you mean that UV-space normals should be pre-calculated and baked into the lookup texture? Or, would you fetch neighbour values from the perlin texture in the VS and calculate the normal there? I’m a bit confused about the bit where you say you’d need two different PerlinSphere functions, I’m afraid…

a|x

I mean both, I think…

Because you are using the the PerlinSphere() function for both normal and position calculation, this might cause confusion.

In your shader code the PerlinSphere() function is being called 3 times per vertex. Once for calculating the position and twice for tangent vectors.
For position you should use the real geometry (sphere) and not the return values of the sphere() function.
For tangent vectors you can keep using the sphere() function or use the mentioned texture lookup.(Because you cannot lookup neighboring vertices/normals)

Lets say in a 512x512 RGB texture you can store geometry data of a 512x512 sphere. Lets call it “sufficient precision” to calculate correct displaced normals.

with the texture components being:


red = cos(v) * sin(u);
green = sin(v) * sin(u);
blue = cos(u);

‘u’ and ‘v’ being the texture coordinates.

The resolution of the texture should at least match the sphere primitive mesh resolution.

Instead of sphere( u, v ) you do


texture2D(sphereTable, vec2(u,v));

only for the tangent/binormal vector calculation.

Oh, I see what you mean. I’d still run the perlin noise 3 times though, but once on the value of the vertex, and twice on the value from the lookup table.

Is that right?

a|x

That was the idea, yes.

Hi toneburst,
I used to calculate normals from depth map with this function:
(cross(ddx(pos), ddy(pos)))

source: http://forum.beyond3d.com/showthread.php?t=37614

I tried and it works like a charm. Though the calculated normals are flat shaded, so I get the best results when calculating noise also in Fragment shader…

im just a rookie in GLSL… hope this will help

Hi martinsh,

thanks for getting back to me!
That looks intriguing. I will look into that.

Cheers,

a|x

This topic was automatically closed 183 days after the last reply. New replies are no longer allowed.