Barebones texturing shader problem

Hi folks way more knowledgable than me,
This topic is intimately related to that from this thread . I tried the suggested solution, but the shader failed to compile and link.
So, here I am, posting a complete codeblock that will compile under vc++6.0. I ommitted the error checking routines to make it shorter, and for many this code will seem familiar, as it very closely resembles the 3D Lab’s brick example code.

Both shaders are trivial, and should cause the square to be shaded red. However, it ends up black on the screen. I know the shader loading and compiling routines work, because if I change the fragment shader to explicitly set the color to red, it works.

I would be infinitely grateful for any insight into my problem.
</font><blockquote><font size=“1” face=“Verdana, Arial”>code:</font><hr /><pre style=“font-size:x-small; font-family: monospace;”>#include <stdlib.h>
#include <stdio.h>
#define GLEW_STATIC 1
#include <GL/glew.h>
#include <GL/glut.h>

static GLint window;
GLubyte tex2d[16][16][4];
GLuint texhandle;

const GLchar VertexSource={"
void main(void)
{
gl_Position = ftransform();
}
“};
const GLchar FragSource={”
uniform sampler2D texUnit0;
void main(void)
{
gl_FragColor = texture2D(texUnit0, gl_TexCoord[0].xy);
}
"};

GLuint installShaders(const GLchar *shaderVertex,
const GLchar *shaderFragment)
{
GLuint shaderVS, shaderFS, shaderProg;
GLint vertCompiled, fragCompiled;
GLint linked;

shaderVS = glCreateShader(GL_VERTEX_SHADER);
shaderFS = glCreateShader(GL_FRAGMENT_SHADER);

glShaderSource(shaderVS, 1, &shaderVertex, NULL);
glShaderSource(shaderFS, 1, &shaderFragment, NULL);

glCompileShader(shaderVS);
glGetShaderiv(shaderVS, GL_COMPILE_STATUS, &vertCompiled);

glCompileShader(shaderFS);
glGetShaderiv(shaderFS, GL_COMPILE_STATUS, &fragCompiled);

if (!vertCompiled

not all the code made it into the post, so here’s some more from where it left off:
</font><blockquote><font size=“1” face=“Verdana, Arial”>code:</font><hr /><pre style=“font-size:x-small; font-family: monospace;”> if (!vertCompiled

so something strange is going on, I can’t seem to get beyond that line.
Should be “if (!vertCompiled || !fragCompiled)”
after that,

        return 0;

    shaderProg = glCreateProgram();
    glAttachShader(shaderProg, shaderVS);
    glAttachShader(shaderProg, shaderFS);

    glLinkProgram(shaderProg);
    glGetProgramiv(shaderProg, GL_LINK_STATUS, &linked);

    if (!linked)
        return 0;

	return shaderProg;
}

static void display(void)
{
	glLoadIdentity();
	glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);

	glEnable(GL_TEXTURE_2D);
	glColor3ub(0,255,0);
	glBegin(GL_QUADS);
	glTexCoord2f(0.0,0.0);glVertex2f(-.5,-.5);
	glTexCoord2f(1.0,0.0);glVertex2f(.5,-.5);  
	glTexCoord2f(1.0,1.0);glVertex2f(.5,.5); 
	glTexCoord2f(0.0,1.0);glVertex2f(-.5,.5); 
	glEnd();
	glDisable(GL_TEXTURE_2D);

	glFlush();
	glutSwapBuffers();
}

static void key(unsigned char keyPressed, int x, int y)
{
	switch(keyPressed)
	{
	case 'q':
	case 27:
		exit(0);
		break;
	default:
		printf("key: %d
",keyPressed);
		break;
	}
}

static void reshape(int width, int height)
{
	float aspect = (float) width / (float) height;

	glViewport(0, 0, width, height);
	glMatrixMode(GL_PROJECTION);
	glLoadIdentity();
	
	glOrtho(-2.0, 2.0, -2.0/aspect, 2.0/aspect, -10.0, 10.0);
	
	glMatrixMode(GL_MODELVIEW);
	glLoadIdentity();
}
int main( int argc, char **argv )
{
	int success = 0;

	glutInit( &argc, argv );
	glutInitDisplayMode( GLUT_RGB | GLUT_DEPTH | GLUT_DOUBLE);
	glutInitWindowSize(500, 500);
	window = glutCreateWindow( "Shader test");
	
	glutDisplayFunc(display);
	glutKeyboardFunc(key);
	glutReshapeFunc(reshape);
	
	glewInit();
	
	glDepthFunc(GL_LESS);
	glEnable(GL_DEPTH_TEST);
	glClearColor(1.0,1.0,1.0,0.0);

	for(int a=0;a<16;a++)
	{
		for(int b=0;b<16;b++)
		{
			tex2d[a][b][0]=255;
			tex2d[a][b][1]=0;
			tex2d[a][b][2]=0;
			tex2d[a][b][3]=255;
		}
	}
	glPixelStorei(GL_UNPACK_ALIGNMENT,1);
	glGenTextures(1,&texhandle);
	glActiveTexture(GL_TEXTURE0);
	glBindTexture(GL_TEXTURE_2D,texhandle);
	glTexImage2D(GL_TEXTURE_2D,0,GL_RGBA,16,16,0,GL_RGBA,GL_UNSIGNED_BYTE,tex2d);
	glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S,GL_REPEAT);
	glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T,GL_REPEAT);
	glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER,GL_NEAREST);
	glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER,GL_NEAREST);
	glTexEnvf(GL_TEXTURE_ENV,GL_TEXTURE_ENV_MODE,GL_REPLACE);

	GLuint progObject = installShaders(VertexSource, FragSource);
	glUseProgram(progObject);
	glUniform1i(glGetUniformLocation(progObject,"texUnit0"),texhandle);

	glutMainLoop();
	
	return 0;
}

In

glUniform1i(glGetUniformLocation(progObject,“texUnit0”),texhandle);

you have to set the texture unit where the texture is bound not the texture id! So, in your case simply pass 0 instead of texhandle. Btw, you only have to bind the texture, enabling isn’t needed for the shader to work.

It works!
Amazing, a whole day of struggling and it was as simple as changing one parameter.

Ok, so now I’m a little confused: when you say, “you have to set the texture unit where the texture is bound”, is the reason that I’m supposed to put 0 there because I enabled GL_TEXTURE0? If I had enabled GL_TEXTURE5 instead, would I then put 5 in there?
If I were to be using multiple textures, is it true that I would have to do a glActiveTexture for each?

Yes, it’s just as simple as

glActiveTexture(GL_TEXTUREi);
glBindTexture(GL_TEXTURE_whatever, texid);

and then set the uniform of this texture to i.

Excellent, I think I’ve got it. Thanks for your help!

Can anyone explain to me, WHY it is not necessary to enable the texture units? I never heard of that before. It kind of makes sense, but then is it better (speed-wise) NOT to enable texture units at all, if you are using shaders anyway?

Thanks,
Jan.

glEnable and the texture function settings (glTexEnv) are ignored by the shader and can be omitted.
And as always the less state settings/changes the better.

Originally posted by Jan:
Can anyone explain to me, WHY it is not necessary to enable the texture units? I never heard of that before. It kind of makes sense, but then is it better (speed-wise) NOT to enable texture units at all, if you are using shaders anyway?
I don’t know the technical reason for it but it’s useless to enable them if you think about it. Don’t sample the texture in your shader if you don’t want to.

This is a fixed function vs programmable pipeline difference.

Also, you can’t sample 2 different textures on the same unit as well, even though you can bind a 1D, 2D, CUBE, 3D, RECT to the same tex unit.
In fixed pipe, there is the texture hierarchy thing but in PP, it doesn’t exist.

This topic was automatically closed 183 days after the last reply. New replies are no longer allowed.