I’m trying to save memory by using bytes to store texture and color information for my vertex format, however I’ve hit a performance problem when trying to use my vertex format with VBOs and a shader.
Here is my rendering code,
glBindBuffer( GL_ARRAY_BUFFER, chunkGeom->vertexBO );
glBindBuffer( GL_ELEMENT_ARRAY_BUFFER, chunkGeom->indexBO );
glVertexAttribPointer( attrTC, 2, GL_BYTE, GL_FALSE, sizeof(GLvertex), BUFFER_OFFSET(12) );
glVertexAttribPointer( attrL, 1, GL_BYTE, GL_FALSE, sizeof(GLvertex), BUFFER_OFFSET(15) );
glEnableVertexAttribArray(attrL);
glEnableVertexAttribArray(attrTC);
glVertexPointer( 3, GL_FLOAT, sizeof(GLvertex), BUFFER_OFFSET(0) );
glDrawRangeElements( GL_TRIANGLES, 0, chunkGeom->vertexCount, chunkGeom->edgeCount, GL_UNSIGNED_SHORT, 0);
glDisableVertexAttribArray(attrTC);
glDisableVertexAttribArray(attrL);
glBindBuffer( GL_ARRAY_BUFFER, 0 );
glBindBuffer( GL_ELEMENT_ARRAY_BUFFER, 0 );
This gets run for the 400x objects in my scene, which are not overly complex as this code was running at 60 frames a second previously ( with vsync ), and is now running at around 11 frames a second.
Have I made some sort of mistake here that is resulting in poor perfomance?
here are my simple shaders, although I doubt the problem lies here
attribute vec2 in_texcoords;
attribute float in_light;
out vec2 f_texcoords;
out float f_light;
void main(void)
{
f_texcoords = in_texcoords;
f_light = in_light;
gl_Position = gl_ModelViewProjectionMatrix * gl_Vertex;
}
uniform sampler2D worldDiffuse;
in vec2 f_texcoords;
in float f_light;
void main (void)
{
gl_FragColor = texture2D( worldDiffuse, f_texcoords.xy * 0.25 );
}