No exactly opengl q, but related to per-pixel-lighting

To do per-pixel lighting where bump maps are independent from the geometry one has to do the lighting in tangent space.

For a given vertex v with normal n and tangent vector t, the binormal is the normalized (nxt).

Here is where my problem lies: how do I get a tangent vector for a given vertex if my 3d model is a triangular mesh and is not derived from parametric sufrace?

thanks in advance,
-Lev

#pragma once

#include “mesh3d.h”

float __forceinline __fastcall rsq(float number)
{
long i;
float x2, y;
const float threehalfs = 1.5f;

x2 = number * 0.5f;
y = number;
i = * (long *) &y; // evil floating point bit level hacking
i = 0x5f3759df - (i >> 1); // what the ****?
y = * (float *) &i;
y = y * (threehalfs - (x2 * y * y)); // 1st iteration

return y;
}

#include <xmmintrin.h>

struct VECTOR
{
union
{
struct { float x,y,z,w; };
__m128 SSE;
};

VECTOR( ) { }
VECTOR( float X, float Y, float Z ) : x( X ), y( Y ), z( Z ), w( 0 ) { }

void NORMALIZE( )
{
float one_div_length = rsq( x * x + y * y + z * z );
x *= one_div_length;
y *= one_div_length;
z *= one_div_length;
}

operator float* ( )
{
return &x;
}
};

struct VECTOR4 : VECTOR
{
float w;
VECTOR4( float X, float Y, float Z, float W = 1 )
{
x = X;
y = Y;
z = Z;
w = W;
}
};

VECTOR CROSS( const VECTOR& v0, const VECTOR& v1 )
{
return VECTOR( v0.y * v1.z - v0.z * v1.y, v0.z * v1.x - v0.x * v1.z, v0.x * v1.y - v0.y * v1.x );
}

float DP3( const VECTOR& v0, const VECTOR& v1 )
{
return v0.x * v1.x + v0.y * v1.y + v0.z * v1.z;
}

float DP4( const VECTOR4& v0, const VECTOR4& v1 )
{
return v0.x * v1.x + v0.y * v1.y + v0.z * v1.z + v0.w * v1.w;
}

float ABS( float a )
{
/* __asm {
mov eax, a
add eax, 0x7FFFFFFF
mov a, eax
}
return a;*/
return a > 0 ? a : -a;
}

struct VERTEX
{
VECTOR pos;
VECTOR nrml;
VECTOR tex0;
VECTOR S;
VECTOR T;
VECTOR SxT;

VERTEX( ) { }
};

struct MESH
{
VERTEX* vdata;
unsigned int vcount;
unsigned int* fdata;
unsigned int fcount; //facecount = indexcount * 3, it IS facecount!

void COPYFROM_mesh3d( mesh3d* m3d )
{
vcount = m3d->vcount;
vdata = new VERTEX[ m3d->vcount ];
fcount = m3d->fcount;
fdata = new unsigned int[ 3 * m3d->fcount ];

  memcpy( fdata, m3d->fdata, m3d->fcount * 3 * sizeof( unsigned int ) );

  for( unsigned int i = 0; i < vcount; i ++ )
  {
  	vdata[ i ].pos.x = m3d->vdata[ i ].pos.x;
  	vdata[ i ].pos.y = m3d->vdata[ i ].pos.y;
  	vdata[ i ].pos.z = m3d->vdata[ i ].pos.z;
  	vdata[ i ].tex0.x = m3d->vdata[ i ].tex0.x;
  	vdata[ i ].tex0.y = m3d->vdata[ i ].tex0.y;
  	vdata[ i ].tex0.z = 0;
  	vdata[ i ].nrml.x = 0;
  	vdata[ i ].nrml.y = 0;
  	vdata[ i ].nrml.z = 0;
  	vdata[ i ].S.x = 0;
  	vdata[ i ].S.y = 0;
  	vdata[ i ].S.z = 0;
  	vdata[ i ].T.x = 0;
  	vdata[ i ].T.y = 0;
  	vdata[ i ].T.z = 0;
  	vdata[ i ].SxT.x = 0;
  	vdata[ i ].SxT.y = 0;
  	vdata[ i ].SxT.z = 0;
  }

}

void CALC_T_SPACE( )
{
for( unsigned int f = 0; f < fcount; f ++ )
{
VERTEX& v0 = vdata[ fdata[ f * 3 + 0 ] ];
VERTEX& v1 = vdata[ fdata[ f * 3 + 1 ] ];
VERTEX& v2 = vdata[ fdata[ f * 3 + 2 ] ];

  	VECTOR e1, e2;
  	VECTOR cp;

  	e1 = VECTOR( v1.pos.x - v0.pos.x, v1.pos.y - v0.pos.y, v1.pos.z - v0.pos.z );
  	e2 = VECTOR( v2.pos.x - v0.pos.x, v2.pos.y - v0.pos.y, v2.pos.z - v0.pos.z );
  	cp = CROSS( e1, e2 );
  	{
  		v0.nrml.x += cp.x;
  		v0.nrml.y += cp.y;
  		v0.nrml.z += cp.z;
  		v1.nrml.x += cp.x;
  		v1.nrml.y += cp.y;
  		v1.nrml.z += cp.z;
  		v2.nrml.x += cp.x;
  		v2.nrml.y += cp.y;
  		v2.nrml.z += cp.z;
  	}

  	e1 = VECTOR( v1.pos.x - v0.pos.x, v1.tex0.x - v0.tex0.x, v1.tex0.y - v0.tex0.y );
  	e2 = VECTOR( v2.pos.x - v0.pos.x, v2.tex0.x - v0.tex0.x, v2.tex0.y - v0.tex0.y );
  	cp = CROSS( e1, e2 );
  	if( cp.x )
  	{
  		v0.S.x += -cp.y / cp.x;
  		v0.T.x += -cp.z / cp.x;
  		v1.S.x += -cp.y / cp.x;
  		v1.T.x += -cp.z / cp.x;
  		v2.S.x += -cp.y / cp.x;
  		v2.T.x += -cp.z / cp.x;
  	}

  	e1 = VECTOR( v1.pos.y - v0.pos.y, v1.tex0.x - v0.tex0.x, v1.tex0.y - v0.tex0.y );
  	e2 = VECTOR( v2.pos.y - v0.pos.y, v2.tex0.x - v0.tex0.x, v2.tex0.y - v0.tex0.y );
  	cp = CROSS( e1, e2 );
  	if( cp.x )
  	{
  		v0.S.y += -cp.y / cp.x;
  		v0.T.y += -cp.z / cp.x;
  		v1.S.y += -cp.y / cp.x;
  		v1.T.y += -cp.z / cp.x;
  		v2.S.y += -cp.y / cp.x;
  		v2.T.y += -cp.z / cp.x;
  	}

  	e1 = VECTOR( v1.pos.z - v0.pos.z, v1.tex0.x - v0.tex0.x, v1.tex0.y - v0.tex0.y );
  	e2 = VECTOR( v2.pos.z - v0.pos.z, v2.tex0.x - v0.tex0.x, v2.tex0.y - v0.tex0.y );
  	cp = CROSS( e1, e2 );
  	if( cp.x )
  	{
  		v0.S.z += -cp.y / cp.x;
  		v0.T.z += -cp.z / cp.x;
  		v1.S.z += -cp.y / cp.x;
  		v1.T.z += -cp.z / cp.x;
  		v2.S.z += -cp.y / cp.x;
  		v2.T.z += -cp.z / cp.x;
  	}
  }

  for( unsigned int v = 0; v < vcount; v ++ )
  {
  	VERTEX& v0 = vdata[ v ];
  	v0.nrml.NORMALIZE( );
  	v0.S.NORMALIZE( );
  	v0.T.NORMALIZE( );
  	v0.SxT = CROSS( v0.S, v0.T );
  	v0.SxT.NORMALIZE( );
  	if( DP3( v0.SxT, v0.nrml ) < 0 )
  	{
  		v0.SxT.x = -v0.SxT.x;
  		v0.SxT.y = -v0.SxT.y;
  		v0.SxT.z = -v0.SxT.z;
  	}
  }

}
};

like that…

just fill the struct with the pointers, and call CALC_T_SPACE( )…

fcount == face count
fdata == unsigned int[ 3 ] *
vcount == vertex count
vdata == struct VERTEX
{
VECTOR pos;
VECTOR nrml;
VECTOR tex0;
VECTOR S;
VECTOR T;
VECTOR SxT;

VERTEX( ) { }

};

1: Thx

2: Well since the source is not the most readable I’ve seen it’ll take some time to understand it. Whats the idea behind it? I’m more like “take-the-idea-and-write-your-own-code” so would be great if you could give any description

Cheers,
-Lev

One question (not about davepermen’s code):

given V is vertex and N is a normal for this vertex, can I take an arbitary vector which lies in the plane P, which is defined this way: ((V belongs to P) and (N is perpendicular to P)) ?

Cheers,
-Lev

Lev, download “per-pixel lighting” presentations\papers from NVIDIA site. They have explained a theory behind the local space basis calculation.

ok… its simple… read the specs on nvidia.com/developers… they describe the math behind detailed enough… after all you dont understand anything and copy the code out of the openglsdk and use it… working…

i do understand the math of everything but generating the tangentspace… now… i use this and have done everything else my own

Hi Lev,

I’ll try to explain what’s behind NVidia’s presentation, since the presentation treats the calculation quite shortly.

The basic idea is that if you had a function of two variables F(u,v) you could easily determine the tangent by calculating the partial derivative DF/Du (u,v). Fact is, you haven’t. But if you’ve textured your object you can use texture coordinates as the parameters (u,v). Using Taylor approximation you will find that F(u+du,v+dv)=F(u,v)+DF(u,v)(du,dv)+ h(du,dv), where DF is the derivate of F and h is the error term (this is why we’ll forget it).
Next is to transfer these ideas to the triangle with vertex positions (p0,p1,p2) and texture coordinates (u0,v0), (u1,v1) and (u2,v2). The Taylor approximation yields:
p1=p0 + D * (u1-u0, v1-v0)^T
p2=p0 + D * (u2-u0, v2-v0)^T
D is a 3x2 Matrix (the derivative from above) where the first column is the tangent vector:
D=| t_x c_x |
| t_y c_y |
| t_z c_z |
If you solve for t and c you’ll get:
b.x=(-dv1dx2+dx1dv2) /(du1dv2-dv1du2);
etc. (see presentation)
where dv1:=v1-v0, dx2:=p2.x-p0.x etc.

As you’ll want normalize the tangent, the division is unnecessary. Then you’ll get the formula from NVidia’s presentation.

A last thing remains. If you want smooth shading, you should adjust the tangent to fit to your vertex normal (and not the triangle normal). You can do this by forcing the tangent into the plane given by the vertex normal (Orthogonalisation). Therefore you calculate the dotproduct (=d) from the vertex normal (=n) and the tangent (=t): d=t.n
Final tangent= t - d*n

Hope that helps,
Stefan

Once upon a time, I thought Tangent space bump-mapping was the greatest thing ever. It was simple, made perfect sense, and seemed to be powerful and extenable to every situation.

Then, I learned the horrible truth.

The tangent and binormal are not the geometric tangent and binormal. Instead, they are the tangent and binormal as defined by the orientation of the texture at that particular vertex. This is crucial for correct tangent space bumpmapping. This means that, for an arbituarily textured surface that may be animating, the cross product of the normal with the tangent does not have to equal the binormal.

If you force the tangent and binormal to be orthogonal (by calculating a tangent and then crossing it with the normal to compute the binormal), then you can create some significant artifacts in your per-pixel code. This mainly happens on models that are animating.

Granted, for static geometry where tangent/binormal sheering (non-orthogonality) does not occur, this method works perfectly fine, so its still a pretty solid technique. However, correcting for tangent/binormal sheering is very difficult to do correctly (the method I’ve heard to do it requires pixel shaders and moves out of tangent space into “light-normal” space).

not the geometric tangent and binormal? If its true it hurts.
This is bad, and a misunderstanding. The terms used in all papers I’ve seen are "normal, tangent, binormal (normal cross tangent), and all this seem to apply to a vertex, so if the tangent and binormal apply to a texture, then the normal must also be a “texture normal”, but what the heck is that? I’m really really confused now.

Cass, can you please bring some light into the whole thing and explain a bit what is ment by normal, tangen and binormal.

Thanks in advance,
-Lev

Hi Korval,

I’m afraid I don’t understand your concerns. The problem is that we have just a triangulated object and we’d love to find a tangent. A tangent is defined for parametric objects, e.g. if we have a function from R^2 -> R^3 to describe the surface of the object. If that representation is given it’s easy to determine the tangent by calculating the derivative. But in general we don’t have this. As a workaround we use texturing as such an approximation.

Nevertheless, if there is a parametric description for the object, it is surely a good idea to use it to calculate the tangent. This was shown in the first bump mapping demos from NVidia, where objects like a torus were used.

So I don’t know what you mean by “geometric tangent”. For a object given only by its triangles there is simply no geometric tangent defined.

So is a “geometric” tangent or not?

If it is its the same as with normals: vertex normals are nonsence from the mathematic point of view, since a vertex can’t have a normal. But we are usuing per-vertex normals, because our vertices are approximating the surface. and thus we are using a normal that the surface at this point would have.

Well if its the same with tangents then following should be true:

Any vector which lies in the plane P is a tangent for a given vertex. Plane P is defined as following: The vertex we are finding the tangent for lies in the plane and the vertex normal is the plane normal.

The only thing that is a problem in this case is that for per-pixel lighting the tangents for a give triangle must interpolate niceliy, so | | lerp(0.5, tangent_a, tangent_b)| | should be as close to 1 as possible.

Is this all true or not?

Can someone (Cass, you authored many papers, you should know it better than many of us) help me?

Hi all.
It’s a really interesting discussion. However…

Originally posted by Korval:
The tangent and binormal are not the geometric tangent and binormal. Instead, they are the tangent and binormal as defined by the orientation of the texture at that particular vertex. This is crucial for correct tangent space bumpmapping. This means that, for an arbituarily textured surface that may be animating, the cross product of the normal with the tangent does not have to equal the binormal.

I can’t agree with this statement. Tangent-space bumpmapping takes square patch assumption, that is U and V coords must be orthogonal. So the cross product must be equal to the binormal. Why? It’s needed for fast moving object-space vectors into tangent space. It’s done by a transformation matrix obtained from tangent, binormal, and normal vectors. However such a matrix construction is correct only for the orthonormal basis, that is T,B, and N vectors must be orthogonal to each other and be of magnitude 1. If U and V direction become non-orthogonal, we can’t use the square patch assumption. To calculate a correct transformation matrix we would need to calculate an inverse matrix of the matrix composed from T, B, and N vectors, not just a transposed matrix. Such a calculation is possible but slow when many vertices. So there is a problem with deformable meshes, of course. However if deformations of a mesh don’t corrupt U-V orthogonality, the tangent-space technique should work fine.
Alexei.

“Tangent-space bumpmapping takes square patch assumption, that is U and V coords must be orthogonal. So the cross product must be equal to the binormal.”

Correction: the common implementation of tangent-space bump mapping makes this assumption. The theory behind tangent-space bump mapping, however, is not restricted to orthogonal tangents and binormals. It simply requires some significant mathematics to compute it, as you’ve stated.

And yes, the tangents and binormals expected for tangent-space bumpmapping are oriented along the texture, not the geometry. I spent almost a week trying to fix my bump mapping code before I talked to someone who explained all this to me.

The normal, however, should still be oriented with the geometry. And it should still be orthogonal to the tangent and binormal. Essentially, just choose a tangents and binormals in the tangent plane that are pointing in the directions of U and V on the texture. Otherwise your bump mapping will come out wrong.

And, while technically the tangent and binormal need not be orthogonal in theory, in practice this should be the case. Just don’t expect it to look correct on meshes where the U and V are non-orthogonal.

Hm, this is from Mark Kilgard’s Practucal bump-mapping for todays GPUs:

We assume that each
vertex within the model has both a normal vector and tangent
vector.
For polygonal models generated from parametric surfaces, these
vectors are straightforward to generate from the parametric
representation.

If I understand this passage correctly he is definately referring to a “geometric” tangent.

-Lev

Kilgard is assuming that, for a parametric surface, the parameters U and V are used as texture coordinate. Therefore, since the tangent and binormal are defined on these surfaces partially in terms of U and V, they are already aligned with the texture.

But, if you don’t believe me, try it yourself. Rotate your tangent/binormal around the normal by 45 degrees and see what you get. They are still geometrically valid tangents and binormals, so feel free.

Korval, of course tangents and binormals are expected to be oriented along U&V coords. I didn’t mean they were not. I wanted to say, that U&V directions must be orthogonal (not in theory, but in practice). Now, I think, we understand each other.
Lev, a “geometric” tangent or not doesn’t really matter. This is an indefinite term. Just make sure that your U and V directions are orthogonal. If so, your parametrization is correct, and your tangents and binormals will be correct too.
Alexei.