Well, here’s the situation: using nVidia GeForce 6800 Ultra 512MB PCI/e x16 with the 77.77 drivers (yes I tried on a machine with up-to-date drivers too). We’re trying something a little different. We’ve discovered (as I’m sure many of you have) that there is process expense in calculating normals for lighting, especially when using volumetric data.
We’re storing a file type that contains xyz value/gradient. During the process that stores them, we have to calculate the magnitude, so we figured why not calculate the normals and store them too?
Then at load time, we’d have two 3D textures, one for the value, one for the normals, saving us the expense of calculating the normals.
First of all, I’ve gone through the normals calulated, and everything seems fine there.
On to the code. Assume that the files have been loaded and two arrays have been populated with the value(vgh_data) and normal(norm_data) data. Here’s the code for loading the textures:
if(vgh_data)
{
if (texture3D == 0) {
glGenTextures(1, &texture3D);
}
glBindTexture(GL_TEXTURE_3D, texture3D);
glTexParameteri(GL_TEXTURE_3D, GL_TEXTURE_WRAP_R, GL_CLAMP);
glTexParameteri(GL_TEXTURE_3D, GL_TEXTURE_WRAP_S, GL_CLAMP);
glTexParameteri(GL_TEXTURE_3D, GL_TEXTURE_WRAP_T, GL_CLAMP);
glTexParameteri(GL_TEXTURE_3D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_3D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
glTexImage3D( GL_TEXTURE_3D,
0,
GL_RGBA8,
xiSize,
yiSize,
ziSize, // depth
0, // border
GL_RGBA, // texture format
GL_UNSIGNED_BYTE, // texture type
vgh_data); // the texture
}
if(norm_data)
{
if (normals3D == 0) {
glGenTextures(1, &normals3D);
}
glBindTexture(GL_TEXTURE_3D, normals3D);
glTexParameteri(GL_TEXTURE_3D, GL_TEXTURE_WRAP_R, GL_CLAMP);
glTexParameteri(GL_TEXTURE_3D, GL_TEXTURE_WRAP_S, GL_CLAMP);
glTexParameteri(GL_TEXTURE_3D, GL_TEXTURE_WRAP_T, GL_CLAMP);
glTexParameteri(GL_TEXTURE_3D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_3D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
glTexImage3D( GL_TEXTURE_3D,
0, // LOD
GL_RGB, // internal format
xiSize, // width
yiSize, // height
ziSize, // depth
0, // border
GL_RGB, // texture format
GL_BYTE, // texture type
norm_data); // the texture
}
Now the code for assigning the textures (assume that my shaders have been initialized OK).
......................
initialization
.....................
// Link program varibles to the shader
texLoc_VolData = glGetUniformLocationARB(g_ShaderProgramObj, "VolData");
texLoc_NormData = glGetUniformLocationARB(g_ShaderProgramObj, "NormData");
texLoc_LUT1D = glGetUniformLocationARB(g_ShaderProgramObj, "LUT1D");
texLoc_LUT2D = glGetUniformLocationARB(g_ShaderProgramObj, "LUT2D");
LUT2D_flag = glGetUniformLocationARB(g_ShaderProgramObj, "LUT2D_flag");
LIGHT_flag = glGetUniformLocationARB(g_ShaderProgramObj, "LIGHT_flag");
.......................
rendering loop
.......................
glUseProgramObjectARB( g_ShaderProgramObj );
if(texLoc_VolData > -1)
{
glActiveTexture(GL_TEXTURE1);
glBindTexture(GL_TEXTURE_3D, VolData->texture3D);
glUniform1iARB(texLoc_VolData, 1);
CheckGLError("RenderVolume: texLoc_VolData");
}
// change the active texture so we can bind
// the normal data (provided it exists). Lighting
// models can only be computed using these normals
// as well.
if(texLoc_NormData > -1)
{
if(VolData->norm_data)
{
glActiveTexture(GL_TEXTURE3);
glBindTexture(GL_TEXTURE_3D, VolData->normals3D);
glUniform1iARB(texLoc_NormData, 3);
CheckGLError("RenderVolume: texLoc_NormData");
// lighting only if norms data loaded
if(LIGHT_flag > -1)
{
glUniform1iARB(LIGHT_flag, lighting_model);
glGetUniformivARB(g_ShaderProgramObj, LIGHT_flag,&val1);
CheckGLError("RenderVolume: LIGHT_flag");
}
}
else
{
if(LIGHT_flag > -1)
{
glUniform1iARB(LIGHT_flag, 0);
CheckGLError("RenderVolume: LIGHT_flag");
}
}
}
Here’s my fragment shader:
// the actual volume data texture
uniform sampler3D VolData;
// the pre-computed vector normals
// in 3D texture form
uniform sampler3D NormData;
// 1D and 2D LUT
uniform sampler1D LUT1D;
uniform sampler2D LUT2D;
// should we use a 2D LUT?
uniform float LUT2D_flag;
// should we use the pre-computed
// normals for lighting?
uniform int LIGHT_flag;
void main( void )
{
vec4 outLUT1D;
vec4 outLUT2D;
vec4 outLUT;
vec4 val;
vec4 val = texture3D(VolData, gl_TexCoord[0].xyz);
if (LUT2D_flag > 0.5)
outLUT = texture2D(LUT2D, val.xy);
else
outLUT = texture1D(LUT1D, val.x);
// normal
if(LIGHT_flag == 1)
{
// simple test to see if our
// normals are being calculated correctly
vec4 norms = texture3D(NormData, gl_TexCoord[0].xyz);
outLUT.rgb = norms.xyz;
outLUT.a = texture1D(LUT1D, val.x);
}
// normal magnitude
else if(LIGHT_flag == 2)
{
}
// ambient light
else if(LIGHT_flag == 3)
{
}
gl_FragColor = outLUT;
}
I get my VolData (vgh_data) to show up just fine. As soon as I switch my LIGHT_flag to 1, it’s black. I know my normal data is loading, it doesn’t barf when I assign/load/use the texture, so what could possibly be going wrong? By using the normal data as rgb, I should be getting a pretty rainbow effect of the volume data.
Anybody know what I’m doing wrong?