PDA

View Full Version : Inheritance + Pre-Transforming Mesh Vertices



ParagonArcade
12-08-2014, 10:04 PM
I am trying to use a derived class of Mesh to create a new object SolidRectangle(bad name), I also am trying to transform my vertices before sending them to the shader.

Every time I load a vertex, I save two copies: one in vertices, one in vertlets.

My plan is to multiple the position of the vertex by the transformation matrix and store these values in verlets. So that I can translate, rotate, scale my objects, and then pass them to the shader which multiples by the projection matrix to get the final position.

I traced my code and the matrix-vector multiplication is coming out right but it doesn't work. It's as if the transformation isn't been applied. I already tried making my std::vector into <Vertex*> with pointers and it broke everything even after I changed "." to "->" on everything. Do I need to bind to the vertex buffer object every frame when I update my vertices? I was on the impression it was saving the location in memory of the Vertex array and I could update these values each frame?




class Mesh
{
private:
string filename;

GLuint ibo = 0;
GLuint vbo = 0;
GLuint vao = 0;

vec_int elements;
vec_vertex vertices;
vec_vertex vertlets;

Transform* transform;

void addElements();
void addVertices();
void addVertlets();
public:
Mesh();
~Mesh();

void addElement(int value);
void addVertex(Vertex value);

void Compile();

void Update(float dt);
void Render();

int getNumOfElements() { return elements.size(); }
int getNumOfVertices() { return vertices.size(); }
std::vector<int> getElements(){ return elements; }
std::vector<Vertex> getVertices(){ return vertices; }

Transform* getTransform() { return transform; }
};

...

void Mesh::addVertlets()
{
if (vbo)
{
//Bind
glBindBuffer(GL_ARRAY_BUFFER, vbo);

//Load Vertices
glBufferData(GL_ARRAY_BUFFER, sizeof(Vertex)* vertlets.size(), &vertlets[0], GL_STATIC_DRAW);

glVertexAttribPointer(0, 3, GL_FLOAT, GL_FALSE, sizeof(Vertex), (GLvoid*)0);
glEnableVertexAttribArray(0);
glVertexAttribPointer(1, 3, GL_FLOAT, GL_FALSE, sizeof(Vertex), (GLvoid*)sizeof(Vector3f));
glEnableVertexAttribArray(1);
glVertexAttribPointer(2, 3, GL_FLOAT, GL_FALSE, sizeof(Vertex), (GLvoid*)(sizeof(Vector3f)+sizeof(Vector3f)));
glEnableVertexAttribArray(2);
glVertexAttribPointer(3, 2, GL_FLOAT, GL_FALSE, sizeof(Vertex), (GLvoid*)(sizeof(Vector3f)+sizeof(Vector3f)+sizeof (Vector3f)));
glEnableVertexAttribArray(3);
}
}

...

void Mesh::Compile()
{
glBindVertexArray(vao);

//Add Object
addElements();
addVertlets();

glBindVertexArray(0);
}

void Mesh::Update(float dt)
{
Matrix4f transformation = getTransform()->getTransformation();

for (int i = 0; i < getNumOfVertices(); i++)
{
vertlets[i].setPosition(transformation * vertices[i].getPosition());
}
}

void Mesh::Render()
{
//Bind
glBindVertexArray(vao);

if (elements.size() == 36)
{
cout << "Position: " << vertlets[0].getPosition().getX() << endl;
}

glDrawElements(GL_TRIANGLES, elements.size(), GL_UNSIGNED_INT, 0);

//Unbind
glBindVertexArray(0);

//Disable Attributes
glDisableVertexAttribArray(2);
glDisableVertexAttribArray(1);
glDisableVertexAttribArray(0);
}

void Mesh::addElement(int value) { elements.push_back(value); }
void Mesh::addVertex(Vertex value) { vertices.push_back(value); vertlets.push_back(value); }

...

#ifndef RECTANGLE_H
#define RECTANGLE_H

#include "Mesh.h"

class SolidRectangle : public Mesh
{
private:
float width;
float height;
float depth;
public:
SolidRectangle();
SolidRectangle(float width, float height, float depth);

void Update(float dt);
void Render();
};

#endif

...

SolidRectangle::SolidRectangle()
{
width = 1;
height = 1;
depth = 1;

generateFaceTop(this);
generateFaceBot(this);
generateFaceFront(this);
generateFaceBack(this);

//Compile Mesh
Compile();

getTransform()->setPosition(0, 0, 0);
getTransform()->setRotation(0, 0, 0);
getTransform()->setScale(1.0f, 1.0f, 1.0f);
}
SolidRectangle::SolidRectangle(float width, float height, float depth)
{
this->width = width;
this->height = height;
this->depth = depth;

generateFaceTop(this);
generateFaceBot(this);
generateFaceFront(this);
generateFaceBack(this);

//Compile Mesh
Compile();

getTransform()->setPosition(0, 0, 0);
getTransform()->setRotation(0, 0, 0);
getTransform()->setScale(width/2, height/2, depth/2);
}

...

static void generateFaceFront(Mesh* mesh)
{
int size = mesh->getNumOfVertices();

mesh->addElement(size + 0); mesh->addElement(size + 2); mesh->addElement(size + 1);
mesh->addElement(size + 0); mesh->addElement(size + 3); mesh->addElement(size + 2);

//Back Plane
mesh->addVertex(Vertex(Vector3f(10, 10, -10), Vector3f(1, 1, 1), Vector3f(0, 0, -1), Vector2f(1, 1)));
mesh->addVertex(Vertex(Vector3f(-10, 10, -10), Vector3f(1, 1, 1), Vector3f(0, 0, -1), Vector2f(0, 1)));
mesh->addVertex(Vertex(Vector3f(-10, -10, -10), Vector3f(1, 1, 1), Vector3f(0, 0, -1), Vector2f(0, 0)));
mesh->addVertex(Vertex(Vector3f(10, -10, -10), Vector3f(1, 1, 1), Vector3f(0, 0, -1), Vector2f(1, 0)));
}

...

//Setup Player
player = new Player();

player->setMesh(new SolidRectangle(50.0f, 100.0f, 30.0f));
player->setPosition(Vector3f(0, 100.0f, 0));

...

//Update Player Position in Transform
mesh->getTransform()->setPosition(Position);

//Update Mesh Transformation
mesh->Update(dt);

...

void Game::Update(float dt)
{
temp += dt;

player->setPosition(Vector3f(10.0f * sin(temp), 0, 10.0f * cos(temp)));
player->Update(dt);
}



The width of that hallway is 200.0f and the faces start out at 20 width and should be position so its above the floor and scaled. It should be much larger...

1537

When I switch everything over to Vertex*, the line that fails is:



glBufferData(GL_ARRAY_BUFFER, sizeof(Vertex)* vertlets.size(), &vertlets[0], GL_STATIC_DRAW);


When I use std::vector<Vertex> then passing &verlets[0] works fine. But for std::vector<Vertex*> this won't work. How do I fix this?

On the StackOverflow they suggest:



glBufferData(GL_ARRAY_BUFFER, sizeof(Vertex)* vertlets.size(), vertlets.data(), GL_STATIC_DRAW);


But this does not work for me either.


Edit:
Changed to GL_DYNAMIC_DRAW, still not working. I have verified through cout'ing the position that the data is being updated properly the line before glDrawElements() is called. Still not working.


glBufferData(GL_ARRAY_BUFFER, sizeof(Vertex)* vertlets.size(), %vertlets[0] GL_DYANMIC_DRAW);

carsten neumann
12-09-2014, 01:49 PM
My plan is to multiple the position of the vertex by the transformation matrix and store these values in verlets. So that I can translate, rotate, scale my objects, and then pass them to the shader which multiples by the projection matrix to get the final position.


So the GPU still has to perform a matrix times vector multiplication for each vertex - in other words you've not saved any work on the GPU. You can keep things more flexible without any loss of performance if you just send the shader the combined model view projection matrix and have the GPU do the transformation. GPUs are good at this sort of number crunching, take advantage of it ;)

GClements
12-09-2014, 08:57 PM
Do I need to bind to the vertex buffer object every frame when I update my vertices?
Yes

I was on the impression it was saving the location in memory of the Vertex array and I could update these values each frame?
No.

If you use client-side vertex arrays (which are deprecated in modern OpenGL), you can just modify the contents of the array and expect the new values to automatically be used on subsequent draw calls.

But a naive implementation is inefficient (the implementation must copy the contents of the vertex array from client memory to video memory on each draw call), and more advanced implementations requires low-level hack such as memory snooping, which enables the implementation to track which parts of the array have been modified, but only partially solves the problem, as it can only report modifications at the granularity of entire pages, typically 4096 bytes.

Buffer objects solve this problem by requiring buffer access to be explicit. Either you replace the buffer store (glBufferData), modify specific portions of it (glBufferSubData), read portions of it (glGetBufferSubData), or map and unmap it (glMapBuffer etc). Mapping has a range of variants and flags which allow you to minimise both the amount of data which needs to be transferred between client memory and video memory.


So the GPU still has to perform a matrix times vector multiplication for each vertex - in other words you've not saved any work on the GPU.
I assumed that the intention is to avoid splitting the rendering into as many draw calls as there are object matrices. Sending pre-transformed vertices allows many objects to be sent in one draw call, without also having to send the object matrices and the information regarding which vertices use which matrix.

ParagonArcade
12-11-2014, 09:46 PM
So the GPU still has to perform a matrix times vector multiplication for each vertex - in other words you've not saved any work on the GPU. You can keep things more flexible without any loss of performance if you just send the shader the combined model view projection matrix and have the GPU do the transformation. GPUs are good at this sort of number crunching, take advantage of it ;)

I'm still figuring out the whole passing the matrices to the shaders thing. I have a function: getProjectionMatrix() which returns the product of the modelview transformation matrix, camera transforms, etc. Right now I am passing only one to my shader program for the world / fps camera. But to rotate and place each individual object in my world, I believe I would need to use a "varying" type for that projection matrix?

ParagonArcade
12-11-2014, 09:53 PM
If you use client-side vertex arrays (which are deprecated in modern OpenGL), you can just modify the contents of the array and expect the new values to automatically be used on subsequent draw calls.

I don't want to use any deprecated code. I am aiming for code compliant with version 440.

I expected it to act as you state above, but it did the opposite. It didn't update the vertices values even though "client-side" I did modify them. Was that "can" a type for "can't"...?



Buffer objects solve this problem by requiring buffer access to be explicit. Either you replace the buffer store (glBufferData), modify specific portions of it (glBufferSubData), read portions of it (glGetBufferSubData), or map and unmap it (glMapBuffer etc). Mapping has a range of variants and flags which allow you to minimise both the amount of data which needs to be transferred between client memory and video memory.


I assumed that the intention is to avoid splitting the rendering into as many draw calls as there are object matrices. Sending pre-transformed vertices allows many objects to be sent in one draw call, without also having to send the object matrices and the information regarding which vertices use which matrix.
Yes. I was thinking I could save myself a lot of work by just pre-transforming my modelview but that won't work as I see now.

But I am using buffer objects right? Isn't this a buffer object?


void Mesh::addVertices()
{
if (vbo)
{
//Bind
glBindBuffer(GL_ARRAY_BUFFER, vbo);

//Load Vertices
glBufferData(GL_ARRAY_BUFFER, sizeof(Vertex)* vertices.size(), &vertices[0], GL_STATIC_DRAW);

glVertexAttribPointer(0, 3, GL_FLOAT, GL_FALSE, sizeof(Vertex), (GLvoid*)0);
glEnableVertexAttribArray(0);
glVertexAttribPointer(1, 3, GL_FLOAT, GL_FALSE, sizeof(Vertex), (GLvoid*)sizeof(Vector3f));
glEnableVertexAttribArray(1);
glVertexAttribPointer(2, 3, GL_FLOAT, GL_FALSE, sizeof(Vertex), (GLvoid*)(sizeof(Vector3f)+sizeof(Vector3f)));
glEnableVertexAttribArray(2);
glVertexAttribPointer(3, 2, GL_FLOAT, GL_FALSE, sizeof(Vertex), (GLvoid*)(sizeof(Vector3f)+sizeof(Vector3f)+sizeof (Vector3f)));
glEnableVertexAttribArray(3);
}
}

GClements
12-12-2014, 12:10 AM
I expected it to act as you state above, but it did the opposite. It didn't update the vertices values even though "client-side" I did modify them. Was that "can" a type for "can't"...?
No.

A client-side vertex array means that you're passing a pointer to client (system, CPU) memory to glVertexAttribPointer(), rather than an offset into a buffer object (which is typically in video memory).

If you're binding a buffer to GL_ARRAY_BUFFER and passing an offset to glVertexAttribPointer(), then you have to update the buffer's contents explicitly, either with glBufferSubData() or by mapping the buffer and updating the contents of the mapped range.

Unless you request a coherent mapping by passing the GL_MAP_COHERENT_BIT to glBufferStorage or glMapBufferRange, you have to either unmap the buffer or call glFlushMappedBufferRange or glMemoryBarrier before the changes are guaranteed to take effect). Also, unless you request a persistent mapping (GL_MAP_PERSISTENT_BIT), it is an error to issue GL commands which read from the buffer while it is mapped. Note that coherent and persistent mappings were added in OpenGL 4.4, so using those features will significantly limit compatibility.


But I am using buffer objects right? Isn't this a buffer object?
Yes. Which is why you can't simply modify the client-side array and expect it to have an effect.

glBufferData() allocates the buffer's storage and (if the data argument is non-null) copies the supplied data into it. It doesn't remember the data pointer and won't attempt to automatically synchronise the buffer's contents with the client-side array.

GClements
12-12-2014, 12:34 AM
I'm still figuring out the whole passing the matrices to the shaders thing. I have a function: getProjectionMatrix() which returns the product of the modelview transformation matrix, camera transforms, etc. Right now I am passing only one to my shader program for the world / fps camera. But to rotate and place each individual object in my world, I believe I would need to use a "varying" type for that projection matrix?

Historically, OpenGL maintained separate model-view and projection matrices. The model-view matrix transforms vertices from object space to eye space, then the projection matrix transforms the vertices from eye space to clip space.

Certain fixed-function operations (primarily lighting) are performed in eye space. For these operations to work correctly, the model-view matrix must have no perspective projection component (i.e. the bottom row of the matrix must be [0,0,0,1]), and the projection matrix must have no translation component (i.e. the viewpoint must be [0,0,0] in eye space).

If a vertex shader is used, it can implement whatever transformations it wants; the implementation simply takes the result from gl_Position, which is in clip coordinates. However, for reasons of practicality, you typically still want to perform calculations such as lighting in an affine space, which means keeping perspective projection separate from other transformations.

Transformations which are constant for an entire draw call would be passed in a uniform variable. If you want to specify a different transformation for different objects within a single draw call, you could either


Make it a vertex attribute (or rather, 4 vertex attributes, one for each column). This will waste memory, as you'll be specifying the complete matrix for each vertex of an object).
Pass a uniform containing an array of matrices, which is indexed by an integer vertex attribute. This would only require one or two bytes per vertex (depending upon how many transformations you have).


Incidentally: the "varying" qualifier is deprecated. It was originally used to refer to variables passed from the vertex shader to the fragment shader (i.e. vertex shader outputs and fragment shader inputs). Vertex shader inputs used the "attribute" qualifier.

Modern syntax uses the "in" and "out" qualifiers instead. So vertex attributes are specified in the vertex shader with the "in" qualifier. The variables which are passed to the fragment shader use "out" in the vertex shader and "in" in the fragment shader (or the geometry shader, if present).