The black screen

Hi all.

Having completed my D3D renderers for my engine, I turned my attention to OpenGL. I say that because it is relevant a bit later on when I set up the various matrices.

So, I’ve completed all the code that I thought I needed, but I’m getting a black screen, so I must be missing something. I check all my errors, but nothing is coming up. This is for the windows platform, and I’m not using GLEW or GLUT or any of that kind of stuff, just core functionality here.

I’m taking out all the error checking in this code to keep it as brief as possible. Can someone take a look through it and see if they see any issues?

I will start after I have set up the context. I think it’s working fine up to that point (BTW, I print out the OGL version both after setting up a dummy context and then setting up the real context, and I get 4.0.0 first, then 3.3.0 second (which is correct, I request a 3.3.0 context from wglChoosePixelFormatARB()).

The test geometry vertex and index data:


	//geometry data
	GLfloat BoxVerts[] = {(-0.6f,  0.8f, -0.6f), (0.6f,  0.8f, -0.6f), (0.6f,  0.8f,  0.6f), (-0.6f,  0.8f,  0.6f),
						 (-0.8f, -0.8f, -0.8f), (0.8f, -0.8f, -0.8f), (0.8f, -0.8f,  0.8f), (-0.8f, -0.8f,  0.8f)};
	GLfloat BoxColors[] = {(1.0f, 1.0f, 1.0f), (0.0f, 1.0f, 1.0f), (1.0f, 0.0f, 1.0f), (1.0f, 1.0f, 0.0f),
							 (0.0f, 0.0f, 1.0f), (0.0f, 1.0f, 0.0f), (1.0f, 0.0f, 0.0f), (0.0f, 1.0f, 0.0f)};
	unsigned int BoxIndices[] = {3,1,0, 2,1,3, 0,5,4, 1,5,0, 3,4,7, 0,4,3, 1,6,5, 2,6,1, 2,7,6, 3,7,2, 6,4,5, 7,4,6};

The shader setup


	//create the shader objects, compile and link them
	GLuint VertexShader;
	GLuint FragmentShader;
	GLuint ShaderProgram;
	char *Log;
	GLint Size;

	VertexShader = glCreateShader(GL_VERTEX_SHADER);
	glShaderSource(VertexShader, 1, &vString, NULL);
	glCompileShader(VertexShader);
	glGetShaderiv(VertexShader, GL_INFO_LOG_LENGTH, &Size);
	Log = new char[Size];
	glGetShaderInfoLog(VertexShader, Size, nullptr, Log); 
	OutputDebugStringA(Log);
	delete Log;

	FragmentShader = glCreateShader(GL_FRAGMENT_SHADER);
	glShaderSource(FragmentShader, 1, &fString, NULL);
	glCompileShader(FragmentShader);
	glGetShaderiv(FragmentShader, GL_INFO_LOG_LENGTH, &Size);
	Log = new char[Size];
	glGetShaderInfoLog(FragmentShader, Size, nullptr, Log); 
	OutputDebugStringA(Log);
	delete Log;

	//create the program
	ShaderProgram = glCreateProgram();
	glAttachShader(ShaderProgram, VertexShader);
	glAttachShader(ShaderProgram, FragmentShader);

	//link and use the program
	glLinkProgram(ShaderProgram);
	glUseProgram(ShaderProgram);

Setting up the attribute buffers


	//find the attribute locations
	GLint PositionLocation;
	GLint ColorLocation;
	PositionLocation = glGetAttribLocation(ShaderProgram, "Position");
	ColorLocation = glGetAttribLocation(ShaderProgram, "Color");

	//create and bind a vertex array object
	GLuint vao;
	glGenVertexArrays(1, &vao);
	glBindVertexArray(vao);

	//create the attribute buffers
	GLuint AttributeBuffer;
	glGenBuffers(1, &AttributeBuffer);
	glBindBuffer(GL_ARRAY_BUFFER, AttributeBuffer);

	//fill the buffer
	glBufferData(GL_ARRAY_BUFFER, sizeof(BoxVerts) + sizeof(BoxColors), nullptr, GL_STATIC_DRAW);
	glBufferSubData(GL_ARRAY_BUFFER, 0, sizeof(BoxVerts), BoxVerts);
	glBufferSubData(GL_ARRAY_BUFFER, sizeof(BoxVerts), sizeof(BoxColors), BoxColors);

	//relate the buffer data to the attributes in the shaders
	glEnableVertexAttribArray(PositionLocation);
	glVertexAttribPointer(PositionLocation, 3, GL_FLOAT, GL_FALSE, 0, (const GLvoid *)0);
	glEnableVertexAttribArray(ColorLocation);
	glVertexAttribPointer(ColorLocation, 3, GL_FLOAT, GL_FALSE, 0, (const GLvoid *)sizeof(BoxVerts));

	//create the index buffer
	GLuint IndexBuffer;
	glGenBuffers(1, &IndexBuffer);
	glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, IndexBuffer);

	//fill the buffer
	glBufferData(GL_ELEMENT_ARRAY_BUFFER, sizeof(BoxIndices), BoxIndices, GL_STATIC_DRAW);

setting the uniforms


	//set the matrices
	GLuint UniformWorldMatrix;
	GLuint UniformViewMatrix;
	GLuint UniformProjectionMatrix;

	float WorldMatrix[] = {0.707f, 0.0f, 0.707f, 0.0f,
						   0.0f, 1.0f, 0.0f, 0.0f,
						   -0.707f, 0.0f, 0.707f, 0.0f,
						   0.0f, 0.0f, 0.0f, 1.0f};
	float ViewMatrix[] = {1.0f, 0.0f, 0.0f, 0.0f,
						  0.0f, 1.0f, 0.0f, 0.0f,
						  0.0f, 0.0f, 1.0f, 10.0f,
						  0.0f, 0.0f, 0.0f, 1.0f};
	float ProjectionMatrix[] = {4.0f, 0.0f, 0.0f, 0.0f,
						        0.0f, 4.0f, 0.0f, 0.0f,
						        0.0f, 0.0f, 1.0f, -1.0f,
						        0.0f, 0.0f, 1.0f, 0.0f};

	UniformWorldMatrix = glGetUniformLocation(ShaderProgram, "WorldMatrix");
	UniformViewMatrix = glGetUniformLocation(ShaderProgram, "ViewMatrix");
	UniformProjectionMatrix = glGetUniformLocation(ShaderProgram, "ProjectionMatrix");
	glUniformMatrix4fv(UniformWorldMatrix, 1, GL_FALSE, WorldMatrix);
	glUniformMatrix4fv(UniformViewMatrix, 1, GL_FALSE, ViewMatrix);
	glUniformMatrix4fv(UniformProjectionMatrix, 1, GL_FALSE, ProjectionMatrix);

Windows code


    //pop up the window
    ShowWindow(ControlWindowHandle, true);

	while (true)
        {
        if (PeekMessage(&msg, NULL, 0, 0, PM_REMOVE))
            {
            if (msg.message == WM_QUIT)
                {
                break;
                }
            TranslateMessage(&msg);
            DispatchMessage(&msg);
            }
        else
            {
            Sleep(1);   //do not max out processor
	    glDrawElements(GL_TRIANGLES, 36, GL_UNSIGNED_INT, (GLvoid*)((char*)NULL));   
            }
        }

	return 0;
	}

[EDIT: I broke up the wall of code a bit for easier reading. I also changed the last parameter of glDrawElements to 0 (I’ve seen conflicting reports on what to put in here, but I think 0 is correct, let me know if that’s wrong).]

Okay, a lot of code, I know, but hopefully it’s readable.

Alright, the hard coded matrices are the same ones I use from my D3D renderer (they are not hard coded there, but the values I use here are what they end up being). It occurs to me that this may not be right (there are handedness differences between OGL and D3D, right? Which would change the perspective matrix?). I can mess with that (although if anyone has a link somewhere that explains the differences and how they affect the matrices, that would be good).

The values for the box vertices/indeces are the same as from D3D, so I’m not concerned those are wrong.

The shaders are as such:


	const char *vString = "																\
		#version 330
 																	\
																						\
		in vec3 Position;																\
		in vec3 Color;																	\
		out vec3 FragColor;																\
		uniform mat4 WorldMatrix;														\
		uniform mat4 ViewMatrix;														\
		uniform mat4 ProjectionMatrix;													\
																						\
		void main ()																	\
			{																			\
			vec4 Vertex = vec4(Position, 1.0);											\
			gl_Position = (ProjectionMatrix * (WorldMatrix * ViewMatrix)) * Vertex;		\
			FragColor = Color;															\
			}";
	
	const char *fString = "																\
		#version 330
 																	\
																						\
		in vec3 FragColor;																\
		out vec4 OutColor;																\
																						\
		void main ()																	\
			{																			\
			OutColor = vec4(FragColor, 1.0);											\
			}";

So, does anyone see what it is that I’m missing or messing up?

Silly question but are you using SwapBuffers to flip your scene into the screen?

Believe me, there are no silly questions :slight_smile: I’m sure whatever I’m doing wrong, it’s something silly like that. I’m a complete newbie when it comes to OGL, I’m going off my references and what I can study on the internet (and unfortunately, just about everything assumes use of GLUT, GLEW etc, which I’m not using, so it’s rough going).

No, I wasn’t using SwapBuffer, but that was because I wasn’t requesting a double buffered pixel format. Since that seems like a very good idea, I changed that. My pixel format selector now looks like such (this happens before all that code I posted above)


	//get a real pixel format
	unsigned int PixelCount;
	int PixAttribs[] = {
		WGL_SUPPORT_OPENGL_ARB, 1,
		WGL_DOUBLE_BUFFER_ARB, 1,
		WGL_DRAW_TO_WINDOW_ARB, 1,
		0};
	int ContextAttribs[] = {
		WGL_CONTEXT_MAJOR_VERSION_ARB, 3,
		WGL_CONTEXT_MINOR_VERSION_ARB, 3,
		0};
	int Format;

	wglChoosePixelFormatARB(DC, PixAttribs, nullptr, 1, &Format, &PixelCount);

	if (Format == -1)
	OutputDebugString(L"ChoosePixelFormat (OGL): Unable to locate available Pixel Format
");

	//delete the old context
	wglMakeCurrent(DC, nullptr);	
	wglDeleteContext(RenderingContext);

	//set up the real context
	RenderingContext = wglCreateContextAttribsARB(DC, nullptr, ContextAttribs);
	wglMakeCurrent(DC, RenderingContext);	

And then the draw call (in the message pump) is now


        else
            {
            Sleep(1);   //do not max out processor
	    glDrawElements(GL_TRIANGLES, 36, GL_UNSIGNED_INT, (GLvoid*)((char*)NULL));   
	    OGLPrintError("glDrawElements()");
	    SwapBuffers(DC);
            }

Still no luck with getting geometry on the screen though.

I’ve also changed my perspective matrix after doing a bit of research. It is slightly different from what is used with D3D. Hopefully this is right, but let me know if you see anything suspicious:


	float ViewportWidth = 0.5f;
	float ViewportHeight = 0.5f;
	float ViewportNear = -1.0f;
	float ViewportFar = -1000.0f;
	float ProjectionMatrix[] = {1.0f, 0.0f, 0.0f, 0.0f,
						        0.0f, 1.0f, 0.0f, 0.0f,
						        0.0f, 0.0f, 1.0f, 0.0f,
						        0.0f, 0.0f, -1.0f, 0.0f};
	ProjectionMatrix[0] = (2.0f*ViewportNear)/ViewportWidth;
	ProjectionMatrix[5] = (2.0f*ViewportNear)/ViewportHeight;
	ProjectionMatrix[10] = (ViewportNear - ViewportFar)/(ViewportFar - ViewportNear);
	ProjectionMatrix[11] = (-2.0f*ViewportFar*ViewportNear)/(ViewportFar - ViewportNear);

Notice I set my near and far parameters to be negative, which I’m doing based on the idea that -z should go into the screen. But it is possible the math already accounts for that and they should be positive (I couldn’t really tell to be honest), so I’ve tried them both ways. But still, no geometry.

Hi I think ur projection matrix is incorrect. It should be this


ProjectionMatrix[0] = (2.0f * zMin) / (xMax - xMin);
ProjectionMatrix[5] = (2.0f * zMin) / (yMax - yMin);
ProjectionMatrix[8] = (xMax + xMin) / (xMax - xMin);
ProjectionMatrix[9] = (yMax + yMin) / (yMax - yMin);
ProjectionMatrix[10] = -((zMax + zMin) / (zMax - zMin));
ProjectionMatrix[11] = -1.0f;
ProjectionMatrix[14] = -((2.0f * (zMax*zMin))/(zMax - zMin));
ProjectionMatrix[15] = 0.0f;

source: http://www.songho.ca/opengl/gl_projectionmatrix.html

Thanks Mobeen. My matrix assumes a symmetric viewing volume, therefore:

xMax - xMin = 2xMax = ViewportWidth
yMax - yMin = 2yMax = ViewportHeight
xMax + xMin = 0
yMax + yMin = 0

So I think all my equations simplify to exactly what you have written down, if I did the math right :slight_smile: [and actually, if you scroll down one more matrix on the link you gave me, he shows the same equations for a symmetric viewing volume].

However, I do notice you are specifying your arrays into the matrix in column major order (so that the -1.0 ends up at [11], while I was putting them in in row order (so it ends up at [14]. Should I be specifying them in column major? I’ll try that, maybe there is my problem (or I guess I could just let OGL transpose them for me in the glUniformMatrix4fv() call). I’ll give it a try and let you know.

[EDIT: No, that still doesn’t give me any geometry. I noticed your [10] array member does not match mine, so I changed to agree with yours (I must have made an error in my calculations).

I’m at a total loss here. Is there any sort of diagnostic program I can run (similar to PIX for D3D?) or any way to find out what are in the attributes/uniforms as each vertex is processed through the shader, and (even better) what the out values end up being? I just don’t even know what to look at here, at this point.

Ahem, how can you say xMax - xMin = ViewportWidth

You should have written WindowWidth IMO. You need to derive this width (and height) based on either HFOV or VFOV for symmetric frustums. Here’s my code for a 90 degree hfov:

frustum[1] = std::tan(.5 * M_HALFPI) * GetN(frustum);
frustum[0] = -frustum[1];
frustum[3] = aspect_ratio * frustum[1];
frustum[2] = -frustum[3];

frustum is the array {l, r, b, t, n, f}.

Best solution to your problem IMO would be to take an existing demo from the Internet, which works, and then adapt it to your use. Also check for OpenGL errors if you aren’t already! Also for shader compile and link errors! I think there’s a tool that checks for errors on Windows automatically, but I don’t use it, since I’m a Linux user.

define GL_DEBUG(x)\

do {
GLenum error(glGetError());
if (GL_NO_ERROR != error)
{
std::cerr << gl_error_string(error) << std::endl;
BOOST_VERIFY(0);
}
} while(0)
#else

define GL_DEBUG(x)

#endif // NDEBUG

Please be aware, that people in these forums generally frown upon this macro of mine (but hey, it compiles away in release builds and is portable). I compiled stuff with this macro for FreeBSD, Linux and Windows.

Okay, I’m going to remove this from the problem set, because whether it is correct or not, I’m about 95% sure it’s not the underlying cause of my lack of geometry, heh. But I’ll get to that in a while, I’m retooling the code at the moment.

Best solution to your problem IMO would be to take an existing demo from the Internet, which works, and then adapt it to your use.

Well, I would, if one existed that was even remotely useful to me. Sadly, 99.9% of the stuff out there on the internets uses Glu and/or glut and/or glew, and is therefore mostly useless to me. And I know you guys don’t understand the whys of this, but take it as a given I won’t be using any of those libraries.

Also check for OpenGL errors if you aren’t already!

I do. Every line in my code that calls an OGL function is followed by another that goes something like:
OGLPrintError(“glBindVertexArray()”);

Which just checks for and prints out any errors. I took all those lines out of the code I posted for brevity, but they are there. And they report nothing amiss.

Also for shader compile and link errors!

Already done, look at the code.

I like your macro. When (if EVER) I get this working and start converting to actual it production code, I’ll probably adapt it to for my uses rather than make a function call like I’m doing now. Thanks.

I’m going to repost my code in a minute, a bit simplified to remove the uniforms. I just want to see $%&^&^ triangles on the screen, I can deal with the matrices later once I have everything else working.

There are folks here who provide modern OpenGL samples/tutorials. There’s also a GL3 sample in the GL wiki. I’m sure you can find something. I’ve seen GL4+ context creation posts in these forums, just google for them.

About my macro: maybe we like it, because we are both noobs.

http://www.opengl.org/wiki/Tutorial:_OpenGL_3.0_Context_Creation_(GLX)

BTW:
//////////////////////////////////////////////////////////////////////////////
GLubyte const* gl_error_string(GLenum error)
{
return gluErrorString(error);
}

Here I use GLU, but the wrapper is there precisely so I don’t rely on GLU on platforms where it is not present. You can copy Mesa’s implementation of gluErrorString(), if you don’t like GLU.

First, thanks very much to everyone that has offered suggestions. It is much appreciated. I know how frustrating it can be dealing with a newbie, and I appreciate the patience you are showing with me :slight_smile:

I still have no geometry on screen, which is baffling me. I feel like I must be missing a step somewhere. And it’s probably something really obvious and stupid that is simply getting missed in the vastness of the rest of the code.

Okay, I’m going to post nearly the entirety of my test program. This is simplified from what I posted before, it uses no uniforms at all, so there are no matrix calculations to deal with. The code goes in order from top to bottom, though I’m breaking it into chunks here so it’s not overwhelming. Also, I DO CHECK ALL OGL ERRORS (that code not shown). None are reported.

Okay, so the definition of the vertices:


	GLfloat BoxVerts[] = {(20.0f,  50.0f, -20.0f), (40.0f,  50.0f, -20.0f), (40.0f,  50.0f, -40.0f), (20.0f,  50.0f, -40.0f),
						 (10.0f, 10.0f, -10.0f), (50.0f, 10.0f, -10.0f), (50.0f, 10.0f, -60.0f), (10.0f, 10.0f, -60.0f)};
	GLfloat BoxColors[] = {(1.0f, 1.0f, 1.0f), (0.0f, 1.0f, 1.0f), (1.0f, 0.0f, 1.0f), (1.0f, 1.0f, 0.0f),
							 (0.0f, 0.0f, 1.0f), (0.0f, 1.0f, 0.0f), (1.0f, 0.0f, 0.0f), (0.0f, 1.0f, 0.0f)};
	unsigned int BoxIndices[] = {3,1,0, 2,1,3, 0,5,4, 1,5,0, 3,4,7, 0,4,3, 1,6,5, 2,6,1, 2,7,6, 3,7,2, 6,4,5, 7,4,6};

Since I’m not using matrices at all, I’ve defined the locations in window coords.

Okay, creation of the window and setup of the dummy rendering context:


    //fill in the class attributes for the main window
    wc.cbSize = sizeof(WNDCLASSEX);
    wc.style = CS_HREDRAW | CS_VREDRAW | CS_DBLCLKS | CS_OWNDC;
    wc.lpfnWndProc = WndProc;
    wc.cbClsExtra = 0;
    wc.cbWndExtra = 0;
    wc.hInstance = Instance;
    wc.hIcon = LoadIcon(NULL, IDI_APPLICATION);
    wc.hCursor = LoadCursor(NULL, IDC_ARROW);
    wc.hbrBackground = (HBRUSH)GetStockObject(DKGRAY_BRUSH);
    wc.lpszMenuName = nullptr;
    wc.lpszClassName = L"OGL Test";
    wc.hIconSm = LoadIcon(NULL, IDI_APPLICATION);

    //register
    RegisterClassEx(&wc);

    //create and center the window on screen
    ControlWindowHandle = CreateWindowEx(NULL, L"OGL Test", L"OGL Test", WS_THICKFRAME | WS_MINIMIZEBOX | WS_SYSMENU, 0, 0, 800, 600, NULL, NULL, Instance, NULL);

	//grab the device context
	DC = GetDC(ControlWindowHandle);

	//we can't use wglGetProcAddress without a context first, so set up a dummy context
	PixelFormatDesc.iPixelType = PFD_TYPE_RGBA;
	PixelFormatDesc.cColorBits = 24;
	PixelFormatDesc.cAlphaBits = 8;
	PixelFormatDesc.cDepthBits = 24;
	PixelFormatDesc.cStencilBits = 8;
	PixelFormatDesc.iLayerType = PFD_MAIN_PLANE;

	SetPixelFormat(DC, 1, &PixelFormatDesc);
	RenderingContext = wglCreateContext(DC);
	wglMakeCurrent(DC, RenderingContext);	

	//show the version of OGL that is available
	OutputDebugStringA((char*)glGetString(GL_VERSION));
	OutputDebugStringA("
");

Okay, now I grab pointers to all the functions I am going to use. I’m going to abbreviate this, as it’s a lot of lines of essentially the same code, but it goes like such:


	//query for the functions we are going to need
	bool (WINAPI *wglChoosePixelFormatARB)(HDC hdc, const int *AttributeIntList, const float *AttributeFloatList, unsigned int MaxFormats, int *PixelFormat, unsigned int *PixelCount);
	HGLRC (WINAPI *wglCreateContextAttribsARB)(HDC hdc, HGLRC ShareContext, const int *AttributeList);
...and so forth...

	*reinterpret_cast<PROC*>(&wglChoosePixelFormatARB) = wglGetProcAddress("wglChoosePixelFormatARB");
	*reinterpret_cast<PROC*>(&wglCreateContextAttribsARB) = wglGetProcAddress("wglCreateContextAttribsARB");
...and so forth...

Now I delete the dummy context and set up the real context


	//get a real pixel format
	unsigned int PixelCount;
	int PixAttribs[] = {
		WGL_SUPPORT_OPENGL_ARB, 1,
		WGL_DOUBLE_BUFFER_ARB, 1,
		WGL_DRAW_TO_WINDOW_ARB, 1,
		0};
	int ContextAttribs[] = {
		WGL_CONTEXT_MAJOR_VERSION_ARB, 3,
		WGL_CONTEXT_MINOR_VERSION_ARB, 3,
		0};
	int Format;

	wglChoosePixelFormatARB(DC, PixAttribs, nullptr, 1, &Format, &PixelCount);

	if (Format == -1)
	OutputDebugString(L"ChoosePixelFormat (OGL): Unable to locate available Pixel Format
");

	//delete the old context
	wglMakeCurrent(DC, nullptr);	
	wglDeleteContext(RenderingContext);

	//set up the real context
	RenderingContext = wglCreateContextAttribsARB(DC, nullptr, ContextAttribs);
	wglMakeCurrent(DC, RenderingContext);	

	//show the version of OGL we are using (should be 3.3.0)
	OutputDebugStringA((char*)glGetString(GL_VERSION));
	OutputDebugStringA("
");


Okay, these are the new shader strings


	const char *vString = "																\
		#version 330
 																	\
																						\
		in vec3 Position;																\
		in vec3 Color;																	\
		out vec3 FragColor;																\
																						\
		void main ()																	\
			{																			\
			gl_Position = vec4(Position, 1.0);											\
			FragColor = Color;															\
			}";

	const char *fString = "																\
		#version 330
 																	\
																						\
		in vec3 FragColor;																\
		out vec4 OutColor;																\
																						\
		void main ()																	\
			{																			\
			OutColor = vec4(FragColor, 1.0);											\
			}";

Those are used to create, link, and use the shaders, like such:


	GLuint VertexShader;
	GLuint FragmentShader;
	GLuint ShaderProgram;
	char *Log;
	GLint Size;

	VertexShader = glCreateShader(GL_VERTEX_SHADER);
	glShaderSource(VertexShader, 1, &vString, NULL);
	glCompileShader(VertexShader);
	glGetShaderiv(VertexShader, GL_INFO_LOG_LENGTH, &Size);
	Log = new char[Size];
	glGetShaderInfoLog(VertexShader, Size, nullptr, Log); 
	OutputDebugStringA(Log);
	delete Log;

	FragmentShader = glCreateShader(GL_FRAGMENT_SHADER);
	glShaderSource(FragmentShader, 1, &fString, NULL);
	glCompileShader(FragmentShader);
	glGetShaderiv(FragmentShader, GL_INFO_LOG_LENGTH, &Size);
	Log = new char[Size];
	glGetShaderInfoLog(FragmentShader, Size, nullptr, Log); 
	OutputDebugStringA(Log);
	delete Log;

	//create the program
	ShaderProgram = glCreateProgram();
	glAttachShader(ShaderProgram, VertexShader);
	glAttachShader(ShaderProgram, FragmentShader);

	//link and use the program
	glLinkProgram(ShaderProgram);
	glUseProgram(ShaderProgram);

Grabbing the attribute locations


	//find the attribute locations
	GLint PositionLocation;
	GLint ColorLocation;
	PositionLocation = glGetAttribLocation(ShaderProgram, "Position");
	ColorLocation = glGetAttribLocation(ShaderProgram, "Color");

Set up the vertex array object


	//create and bind a vertex array object
	GLuint vao;
	glGenVertexArrays(1, &vao);
	glBindVertexArray(vao);

Create the attribute buffers, fill them, and link them to the shader attributes


	//create the attribute buffers
	GLuint AttributeBuffer;
	glGenBuffers(1, &AttributeBuffer);
	glBindBuffer(GL_ARRAY_BUFFER, AttributeBuffer);

	//fill the buffer
	glBufferData(GL_ARRAY_BUFFER, sizeof(BoxVerts) + sizeof(BoxColors), nullptr, GL_STATIC_DRAW);
	glBufferSubData(GL_ARRAY_BUFFER, 0, sizeof(BoxVerts), BoxVerts);
	glBufferSubData(GL_ARRAY_BUFFER, sizeof(BoxVerts), sizeof(BoxColors), BoxColors);

	//relate the buffer data to the attributes in the shaders
	glEnableVertexAttribArray(PositionLocation);
	glVertexAttribPointer(PositionLocation, 3, GL_FLOAT, GL_FALSE, 0, (const GLvoid *)0);
	glEnableVertexAttribArray(ColorLocation);
	glVertexAttribPointer(ColorLocation, 3, GL_FLOAT, GL_FALSE, 0, (const GLvoid *)sizeof(BoxVerts));

Create and fill the index buffer


	//create the index buffer
	GLuint IndexBuffer;
	glGenBuffers(1, &IndexBuffer);
	glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, IndexBuffer);

	//fill the buffer
	glBufferData(GL_ELEMENT_ARRAY_BUFFER, sizeof(BoxIndices), BoxIndices, GL_STATIC_DRAW);

Is this possibly where I am missing a step? Do I need to enable that buffer somehow?

And then finally the windows message pump


    //pop up the window
    ShowWindow(ControlWindowHandle, true);

	while (true)
        {
        if (PeekMessage(&msg, NULL, 0, 0, PM_REMOVE))
            {
            if (msg.message == WM_QUIT)
                {
                break;
                }
            TranslateMessage(&msg);
            DispatchMessage(&msg);
            }
        else
            {
            Sleep(1);   //do not max out processor
			glClear(GL_COLOR_BUFFER_BIT);
			glDrawElements(GL_TRIANGLES, 36, GL_UNSIGNED_INT, BoxIndices);   
			glDrawElements(GL_TRIANGLES, 36, GL_UNSIGNED_INT, 0);   
			SwapBuffers(DC);
            }
        }

	return 0;

Notice I’ve used glDrawElements() twice. There is some seriously conflicting information out there on what to put in that last parameter. Some people (who claim their code works) put in a nullptr (0, to refer to the starting point of the bound index buffer). Others, who make the same claim, put in the address of their index array. I’m thoroughly confused as to which is right (I suspect the former because the index array is already bound to the pipeline, so I see no reason why we need to pass it into the draw call at all, but I could be wrong).

Alright, just in case, I’ll also post my message handler. There is nothing special here.


//---------------------------------------------------------------------------
long CALLBACK WndProc(HWND Wnd, UINT Message, WPARAM wParam, LPARAM lParam)
    {
	PAINTSTRUCT ps;
	HDC hdc;

	switch (Message)
		{
		case WM_DESTROY:
			{
			PostQuitMessage(0);
			return 0;
			}
		case WM_KEYDOWN:
			{
			if (wParam == VK_ESCAPE)
				{
				DestroyWindow(Wnd);
				return 0;
				}
			}
		case WM_PAINT:
			{
			hdc = BeginPaint(Wnd, &ps);
			EndPaint(Wnd, &ps);
			break;
			}
		}
	return DefWindowProc(Wnd, Message, wParam, lParam);
    }

Sorry for posting so much code. I hope it becomes evident from what I’ve posted what I’m missing.

Success guys!

See, I TOLD you it was something really stupid.

glFlush();

that’s all it took. I now have geometry on the screen. Wooh!

Although the fact that I HAVE to call glFlush to see geometry on the screen suggests to me there might be something amiss with my double buffer, as my understanding was that glFlush shouldn’t have to be called at all. I’ll have to look into what is going on with that.

Also I’m not getting the results I expect, it’s almost like the buffers are reading interleaved instead of one after the other like I intended.

But I have something on the screen, so I can work with it and get all this stuff figured out. Which is a huge relief, heh.

Thanks again everyone. I know I was a PITA. Hopefully I can take it from here.

PixelFormatDesc.dwFlags = PFD_DRAW_TO_WINDOW|PFD_SUPPORT_OPENGL | PFD_DOUBLEBUFFER;

It’s already set, I believe, via this code:


	//get a real pixel format
	unsigned int PixelCount;
	int PixAttribs[] = {
		WGL_SUPPORT_OPENGL_ARB, 1,
		WGL_DOUBLE_BUFFER_ARB, 1,
		WGL_DRAW_TO_WINDOW_ARB, 1,
		0};
	int ContextAttribs[] = {
		WGL_CONTEXT_MAJOR_VERSION_ARB, 3,
		WGL_CONTEXT_MINOR_VERSION_ARB, 3,
		0};
	int Format;

	wglChoosePixelFormatARB(DC, PixAttribs, nullptr, 1, &Format, &PixelCount);

	if (Format == -1)
		OutputDebugString(L"ChoosePixelFormat (OGL): Unable to locate available Pixel Format
");

	//delete the old context
	wglMakeCurrent(DC, nullptr);	
	wglDeleteContext(RenderingContext);

	//set up the real context
	RenderingContext = wglCreateContextAttribsARB(DC, nullptr, ContextAttribs);
	wglMakeCurrent(DC, RenderingContext);	

The other one (using the PixelFormatDesc structure) was just a dummy context I needed in order to use the wglGetProcAddress() function. Once I had the pointers to my OGL functions, it was deleted in favor of the new context. I went ahead and added your flag line to it anyway (just in case), but it didn’t do anything different (I didn’t expect that it would).

Am I not setting up the double buffer properly through the wglChoosePixelFormatARB() function?

wglChoosePixelFormatARB is for p-buffer. Why are you using p-buffer? I would create a window, make it invisible, setup the pixel format and create a render target aka FBO. p-buffer is old should not be used today.

I’m not creating a p-buffer. I’m following the advice given in the OpenGL Superbible (5th ed), Chapter 13 in section “Pixel Formats”.

In this section, it is stated that “You need to find a pixel format that has the characteristics and capabilities that match the needs of your application. This pixel format is then used to create an OpenGL rendering context. There are two ways to go about looking for a pixel format. The first method is the more preferred and capable mechanism exposed by OpenGL directly. The second method uses the original Windows interfaces, which have been around for as long as OpenGL has been supported on Windows.”

He then goes on to describe the use of both methods. The first (the “preferred method”) uses wglChoosePixelFormatARB(), which is what I set up. The second, less preferred method, uses your PixelFormatDescriptor and ChoosePixelFormat(). He even says in the beginning of the section “But these methods are limited and do not expose all formats or attributes. We show you how to use these for completeness. If you are writing a new OpenGL app, you are better off using the [wglChoosePixelFormatARB()] method we just described”.

All that to say, I think I’m doing it right :slight_smile: Having said that, my double buffer is apparently malfunctioning, so perhaps I will give your method a try and see what happens.

[EDIT: Actually, your post made me realize why my double buffer is failing. Although I selected a pixel format as needed by my app, I never set it! Oops. I’m fixing that, and it should clear up the problem (and possible more than one. So thanks!]

[EDIT 2: Yep, that did it! glFlush() is now gone. Closer and closer :slight_smile: ]

Ooops, never mind.
wglGetPixelFormatAttribivARB and wglChoosePixelFormatARB is a good way to do it. I use it for getting a multisample format but it can be used any pixel format.