Transition from the old opengl to the modern opengl

Hi forum,

I am trying to update an old opengl code to the modern opengl one and I am not getting the expected output. I believe that I am confused with the matrix manipulation . Please check the old code snippet:


void drawHilbertCurve(float x, float y, std::string lrep)
{
  glClear(GL_COLOR_BUFFER_BIT);
  glPushMatrix();

  /*
    browse through the grammar of the hilbert curve that is generated 
    for  a number of iteration
   */
  for(std::string::iterator it = lrep.begin(); it != lrep.end(); it++)
    {
      if(*it == 'F')
	{
	  //define the color of the primitive to be drawn
	  glColor4f(0.5f, 1.0f, 0.0f, 1.0f);
	  //define the line width
	  glLineWidth(4.0f);

	  //define the vertex array
	  GLfloat vertices[] = {x, y, x + LINE_LENGTH, y};

	  //the following fucked up every thing
	  //GLfloat vertices[] = {x, y, x , y + LINE_LENGTH };

	  //increment the x - position
	  x = x + LINE_LENGTH;
	  
	  glEnableClientState(GL_VERTEX_ARRAY);
	  glVertexPointer(2, GL_FLOAT, 0, vertices);
	  glDrawArrays(GL_LINES, 0, 4);
	  glDisableClientState(GL_VERTEX_ARRAY);

	}
      if(*it == '-' || *it == '+')
	{
	  glTranslatef(x, y, 0.0f);
	  x = 0.0f;
	  y = 0.0f;
	  float angle = ((*it == '-') ? 90.0f : -90.0f);
	  glRotatef(angle, 0.0f, 0.0f, 1.0f);
	}
    }
  glPopMatrix();
}


The corresponding code in modern opengl as follows:


void Hilbert2DScene::populateHilbertLSystem(const std::string &LString)
{

    float x = 0.0f;
    float y = 0.0f;
    float z = 0.0f;

    //the vertex to start from
    glm::vec3 p1(x,y,0.0);

    mVertices.push_back(p1);

    glm::vec3 p2;

    for(std::string::const_iterator it = LString.begin(); it != LString.end();it++)
    {
        if(*it == 'F')
        {
            p2.x += 1.0;

            mVertices.push_back(p2);
        }
        if(*it == '-' || *it == '+')
        {
            glm::mat4 trans = glm::translate(glm::mat4(1.0),p2);
            x = 0.0f;
            y = 0.0f;

            float angle = ((*it == '-') ? 90.0f : -90.0f);

            mHilbertOrientation = glm::rotate(trans,glm::radians(angle),glm::vec3(0.0,0.0,1.0));
        }
    }



    //normalize the inserted value inside the vector
    //normalize the points data
    unsigned long N = 1 << mIteration;


    for(std::vector<glm::vec3>::iterator it = mVertices.begin(), itend = mVertices.end(); it != itend;++it)
    {
        (*it).x /= (N - 1);
        (*it).y /= (N - 1);
        (*it).z /= 0.5;
    }
}



void Hilbert2DScene::render()
{
    if(!mInitialized)
        return;

    mShader->Use();

    GL_CHECK_ERRORS;

    //allocate the buffer if the buffer is not allocated yet
    if(!mIsBufferAllocated)
    {

       populateHilbertLSystem(hilbertWithLSystem(mIteration));


        //specify the amount of storage we want to use for the buffer
        /*
         * 1st - the target the buffer I want to allocate storage for is bound to
         * 2nd - how big the buffer should be
         * 3rd - pointer to some initial data for the buffer
         * */
        glBufferData(GL_ARRAY_BUFFER,sizeof(glm::vec3) * mVertices.size(),&mVertices[0],GL_STATIC_DRAW);

        mIsBufferAllocated = true;

        mShowPoints = getNumberOfPointsForIteration(mIteration,mDim);


    static const GLfloat color[] = {0.0f,0.0f,0.0f,1.0f};
    static const GLfloat depth = 1.0f;

    glClearBufferfv(GL_COLOR,0,color);
    glClearBufferfv(GL_DEPTH,0,&depth);


    //draw stuff

    //transfer the scene along the z-axis
    glm::mat4 T = glm::translate(glm::mat4(1.0f),glm::vec3(0.0f,0.0f,mDist));

    //centralize the whole scene
    glm::mat4 centralizeTranslation = glm::translate(T,glm::vec3(-0.5,-0.5,-0.5));

    centralizeTranslation = centralizeTranslation * mHilbertOrientation;

    //the rotation matrix along X concatenated with the translation matrix
    glm::mat4 Rx = glm::rotate(centralizeTranslation, glm::radians(static_cast<float>(mRX)),glm::vec3(1.0f,0.0f,0.0f));

    //rotation matrix along Y is concatenated with the rotation matrix along X
    glm::mat4 MV = glm::rotate(Rx,glm::radians(static_cast<float>(mRY)),glm::vec3(0.0f,1.0f,0.0f));


    mModelViewMatrix = mProjectionMatrix * MV;

    /*
     * Bind the vertex array object and vertex buffer object
     * */
    glBindVertexArray(mVaoID);
    /*
     * Now bind it to the context using the GL_ARRAY_BUFFER binding point
     * */
    glBindBuffer(GL_ARRAY_BUFFER,mVboVerticesID);

    /*
     * Tell OpenGL to use the data in the buffer to fill the vertex attribute rather than
     * using the data we give it using one of the functions as glVertexAttrib*() functions
     * */
    glEnableVertexAttribArray((GLuint)0);


    //set the uniform matrix value that will be passed to the shader
    glUniformMatrix4fv(mShader->getUniform("MVP"),1,GL_FALSE,glm::value_ptr(mModelViewMatrix));

    glUniform4fv(mShader->getUniform("modelColor"),1,glm::value_ptr(glm::vec4(1.0,1.0,0.0,1.0)));

    if(mShowPoints < getNumberOfPointsForIteration(mIteration,mDim))
        ++mShowPoints;

    /*
     * OpenGL draw command
     * Sends vertices to the OpenGL pipeline
     *
     * 1st - what type of graphics primitive we want to render
     * 2nd -
     * 3rd - the number of vertices we want to render
     * */
    glDrawArrays(GL_LINE_STRIP,0,mShowPoints);

    glBindBuffer(GL_ARRAY_BUFFER,0);

    glDisableVertexAttribArray(0);
    glBindVertexArray(0);

    GL_CHECK_ERRORS;

    mShader->UnUse();
}


Please note that the hilbertWithLSystem() is just generating a pool of the characters represented as a string. If I include the mHilbertOrientation into the render function I see blank and if I do not include it I just a line which in either case is not the expected output. I am supposed to get a hilbert curve here for a certain number of iteration. I gues I am having trouble to map the glTranslate or glRotate function . What do you think ?
Some references would be of great help.

Thanks

In the original version, you render the vertices as you generate them (more or less), so the vertex positions are transformed by the current rotation.

In the new version, you accumulate a list of untransformed vertices, and accumulate the transformations, then render all of the vertices with the final transformation. You need to transform each vertex by the current transformation before storing it.

Thanks for the hint!

Now I see something more on the screen, but not exactly I am expecting. So I have more to do here with more hints of course. Here goes the code I updated since the last tips I got:


void Hilbert2DScene::populateHilbertLSystem(const std::string &LString)
{

    float x = 0.0f;
    float y = 0.0f;
    float z = 0.0f;

    //the vertex to start from
    glm::vec3 p1(x,y,0.0);

    mVertices.push_back(p1);

    glm::vec3 p2;

    glm::vec4 p(0.0,0.0,0.0,1.0);

    for(std::string::const_iterator it = LString.begin(); it != LString.end();it++)
    {
        if(*it == 'F')
        {

            //update the positions that
            //will be stored
            p2.x = p.x;
            p2.y = p.y;
            p2.z = p.z;

            p2.x += 1.0;

            mVertices.push_back(p2);
        }
        if(*it == '-' || *it == '+')
        {
            //create a translation matrix
            glm::mat4 trans = glm::translate(glm::mat4(1.0),p2);

            //update the point to be stored
            p = trans * p;

            //re-initialize the points
            x = 0.0f;
            y = 0.0f;

            //define the angle
            float angle = ((*it == '-') ? 90.0f : -90.0f);

            mHilbertOrientation = glm::rotate(trans,glm::radians(angle),glm::vec3(0.0,0.0,1.0));

            p = mHilbertOrientation * p;
        }
    }



    //normalize the inserted value inside the vector
    //normalize the points data
    unsigned long N = 1 << mIteration;


    for(std::vector<glm::vec3>::iterator it = mVertices.begin(), itend = mVertices.end(); it != itend;++it)
    {
        (*it).x /= (N - 1);
        (*it).y /= (N - 1);
        (*it).z /= 0.5;
    }
}


And I am getting the following image of a hilbert curve for a single iteration.

http://i.imgur.com/KGeqoOv.png

The correct image should look like :

http://i.imgur.com/FdQdbMF.png

Looking forward to more hints , guys!

Thanks

You need to transform each vertex before appending it the vector.

Transforming only when you change rotation won’t work, because each “F” is moving the vertex along the untransformed X axis.

[QUOTE=GClements;1280148]You need to transform each vertex before appending it the vector.

Transforming only when you change rotation won’t work, because each “F” is moving the vertex along the untransformed X axis.[/QUOTE]

Did you mean the following to be done ?


...............................
...............................
        if(*it == 'F')
        {

            p.x += 1;

            p = mHilbertOrientation * p;

            //update the positions that
            //will be stored
            p2.x = p.x;
            p2.y = p.y;
            p2.z = p.z;

            mVertices.push_back(p2);
        }
......................
......................

Thanks

Hello forum,

Lets get down to basics to understand the modelling transformation better. The following will contain code snippet written in old OpenGL pipeline and comments within the snippet to make sure that I have understood the concept. You are most welcome to correct or complement any of them.


  for(std::string::iterator it = lrep.begin(); it != lrep.end(); it++)
    {
      if(*it == 'F')
	{
	  //define the color of the primitive to be drawn
	  glColor4f(0.5f, 1.0f, 0.0f, 1.0f);
	  //define the line width
	  glLineWidth(4.0f);

	  /*define the vertex array
	    that define the start vertex and the end vertex for the line
	    the end vertex is differed by LINE_LENGTH  in the x-direction
	   */
	  GLfloat vertices[] = {x, y, x + LINE_LENGTH, y};

	  //the following fucked up every thing
	  //GLfloat vertices[] = {x, y, x , y + LINE_LENGTH };

	  //increment the x - position - update the x-position
	  x = x + LINE_LENGTH;
	  
	  glEnableClientState(GL_VERTEX_ARRAY);
	  glVertexPointer(2, GL_FLOAT, 0, vertices);

	  /*
	    draw the line as defined by the vertices array
	   */
	  glDrawArrays(GL_LINES, 0, 4);


	  glDisableClientState(GL_VERTEX_ARRAY);

	}
      if(*it == '-' || *it == '+')
	{
	  /*
	    x-value is probably updated in the previous condition
	    so the translation matrix updates the current model-view matrix

	    What would you do here in case of modern opengl conversion ? 
	    1.declare a translation matrix
            2.multiply the translation matrix by the vertex and insert the vertex into the vector 
               which will be rendered later.
	   */
	  glTranslatef(x, y, 0.0f);


	  /*
	    re-initialize the x and y value
	   */
	  x = 0.0f;
	  y = 0.0f;

	  /*
	    define the angle based on the character we read from the string
	   */
	  float angle = ((*it == '-') ? 90.0f : -90.0f);


	  /*
	    multiplies the current matrix with the matrix that rotates the object 
	    by the angle we have just defined.


	    What would you do here in case of modern opengl conversion ? 
	    1.declare a rotation matrix
            2.multiply the rotation matrix with the translation matrix, the resulted matrix will be the matrix
               will be used to transform all the points
	   */
	  glRotatef(angle, 0.0f, 0.0f, 1.0f);
	}
    }
  glPopMatrix();
}


What will be correspondent functionalities for the old OpenGL command glPushMatrix()/glPopMatrix() ?

Now I tried to map the above function in the Modern OpenGL context as follows:


void Hilbert2DScene::populateHilbertLSystem(const std::string &LString)
{

    float x = 0.0f;
    float y = 0.0f;
    float z = 0.0f;

    //the vertex to start from
    glm::vec3 p1(x,y,0.0);

    //insert the vertex into the container
    mVertices.push_back(p1);

    glm::vec3 p2;

    glm::vec4 p(0.0,0.0,0.0,1.0);

    for(std::string::const_iterator it = LString.begin(); it != LString.end();it++)
    {
        if(*it == 'F')
        {
            //increment the x-component 
            p.x += 1;

            //transform the vertex with the matrix that is initialized as identity matrix at the class constructor  
            p = mHilbertOrientation * p;

            //update the positions that
            //will be stored
            p2.x = p.x;
            p2.y = p.y;
            p2.z = p.z;

            //insert the vertex into the container
            mVertices.push_back(p2);
        }
        if(*it == '-' || *it == '+')
        {
            //create a translation matrix
            glm::mat4 trans = glm::translate(glm::mat4(1.0),p2);

            //re-initialize the points
            x = 0.0f;
            y = 0.0f;

            //define the angle
            float angle = ((*it == '-') ? 90.0f : -90.0f);

            mHilbertOrientation = glm::rotate(trans,glm::radians(angle),glm::vec3(0.0,0.0,1.0));

        }
    }



    //normalize the inserted value inside the vector
    //normalize the points data
    unsigned long N = 1 << mIteration;


    for(std::vector<glm::vec3>::iterator it = mVertices.begin(), itend = mVertices.end(); it != itend;++it)
    {
        (*it).x /= (N - 1);
        (*it).y /= (N - 1);
        (*it).z /= 0.5;
    }
}

Please do mention if any issue is missing while mapping to the new standard .

Thanks

Those aren’t relevant here.

Your problems arise from trying to merge multiple glDrawArrays() commands into a single command. You’d have exactly the same problems if you tried to use the new approach with legacy OpenGL. Conversely, you could translate the old program structure directly to modern OpenGL and it would work fine.

The legacy OpenGL matrix transformations are equivalent to having a vertex shader which does (amongst many other things)


    gl_Position = gl_ProjectionMatrix * gl_ModelViewMatrix * gl_Vertex;

I.e. the vertex position used for rendering is obtained by transforming the supplied object-space vertex position by the model-view and projection matrices at the time of the draw call.

Your original program uses many draw calls, and changes the model-view matrix between them. Your new program accumulates the vertices and draws them all with a single call.

Thus, you need to transform each vertex as it is added to the array, so that it uses the transformation’s value at that point in time, not the final value.

[QUOTE=GClements;1280159]Those aren’t relevant here.

Conversely, you could translate the old program structure directly to modern OpenGL and it would work fine.

[/QUOTE]

Could you please help me to formulate the modern OpenGL structure directly as you have mentioned ?

I think I am doing it as follows:


p = mHilbertOrientation * p;


I am multiplying the vertices by the cumulative model matrix before inserting into the container. Did you mean to update and insert the position after each of the affine transformation ?

This is a direct conversion of your code:


  GLint a_position = glGetAttribLocation(program, "position");
  GLint u_color = glGetUniformLocation(program, "color");
  GLint u_matrix = glGetUniformLocation(program, "matrix");
  glm::mat4 mHilbertOrientation(1.0f);

  GLuint vertex_buffer;
  glGenBuffers(1, &vertex_buffer);
  glBindBuffer(GL_ARRAY_BUFFER, vertex_buffer);
  glBufferData(GL_ARRAY_BUFFER, 4*sizeof(GLfloat), (const GLvoid *)0, GL_DYNAMIC_DRAW);

  glVertexAttribPointer(a_position, 2, GL_FLOAT, GL_FALSE, 0, (const GLvoid*)0);

  glUseProgram(program);

  for(std::string::iterator it = lrep.begin(); it != lrep.end(); it++)
    {
      if(*it == 'F')
	{
	  //define the color of the primitive to be drawn
	  glUniform4f(u_color, 0.5f, 1.0f, 0.0f, 1.0f);
	  //define the line width
	  glLineWidth(4.0f);
 
	  /*define the vertex array
	    that define the start vertex and the end vertex for the line
	    the end vertex is differed by LINE_LENGTH  in the x-direction
	   */
	  GLfloat vertices[] = {x, y, x + LINE_LENGTH, y};
 
	  //increment the x - position - update the x-position
	  x = x + LINE_LENGTH;
 
	  glEnableVertexAttribArray(a_position);
          glBufferSubData(GL_ARRAY_BUFFER, 0, sizeof(vertices), vertices);
 
	  /*
	    draw the line as defined by the vertices array
	   */
          glUniformMatrix4fv(u_matrix, 1, GL_FALSE, glm::value_ptr(mHilbertOrientation));
	  glDrawArrays(GL_LINES, 0, 4);
 
 
	  glDisableVertexAttribArray(a_position);
 
	}
      if(*it == '-' || *it == '+')
	{
	  /*
	    x-value is probably updated in the previous condition
	    so the translation matrix updates the current model-view matrix
 
	    What would you do here in case of modern opengl conversion ? 
	    1.declare a translation matrix
            2.multiply the translation matrix by the vertex and insert the vertex into the vector 
               which will be rendered later.
	   */
	  mHilbertOrientation = mHilbertOrientation * glm::translate(x, y, 0.0f);
 
 
	  /*
	    re-initialize the x and y value
	   */
	  x = 0.0f;
	  y = 0.0f;
 
	  /*
	    define the angle based on the character we read from the string
	   */
	  float angle = ((*it == '-') ? 90.0f : -90.0f);
 
 
	  /*
	    multiplies the current matrix with the matrix that rotates the object 
	    by the angle we have just defined.
 
 
	    What would you do here in case of modern opengl conversion ? 
	    1.declare a rotation matrix
            2.multiply the rotation matrix with the translation matrix, the resulted matrix will be the matrix
               will be used to transform all the points
	   */
	  mHilbertOrientation = mHilbertOrientation * glm::rotate(angle, 0.0f, 0.0f, 1.0f);
	}
    }
  glPopMatrix();
}

The above assumes a vertex shader along the lines of


uniform vec4 color;
uniform mat4 matrix;
in vec2 position;
out vec4 vcolor;

void main()
{
    vcolor = color;
    gl_Position = matrix * position;
}

That overwrites the untransformed position.

With your existing code, you need to retain the untransformed position, because your handling of “F” involves adding an untransformed offset vector.

There are several other ways by which you could implement this.

One option is to eliminate the current position entirely, and just keep a current matrix. “F” would append a translation, while “+” and “-” append a rotation. The vertices which are added to the array would be obtained by transforming the origin position (0,0,0,1) by the current matrix.

Another option is to maintain a current position and a current direction, both vectors. “F” would add the current direction to the current position, while “+” and “-” rotate the current direction vector. In this case, there’s no need to store a current matrix (you might store the matrices for the + and - rotations rather than generating them each time).

Thanks for the detailed hint with code snippet!

I read somewhere that it is not good call glDrawArrays(…) several times in loop. It is always better to pile up positional values in a container and render them all for once with one glDrawArrays() command. So in this post I am trying to break down in steps , one of the first options that you have suggested:

“One option is to eliminate the current position entirely, and just keep a current matrix. “F” would append a translation, while “+” and “-” append a rotation. The vertices which are added to the array would be obtained by transforming the origin position (0,0,0,1) by the current matrix.”

  1. Declare a matrix that will contain all the modelling transformation for the hilbert curve as follows:

     glm::mat4 currentMatrix = glm::mat4(1.0f); // class member variable, initialized inside the class constructor and has the class scope
   
  1. Update the current matrix with the translation vector, one we encounter ‘F’ in the string array, as follows:

        glm::vec3 translationVector = glm::vec3(1.0,0.0,0.0);
        currentMatrix = glm::translate(currentMatrix,translationVector);
   
  1. Insert the position into the container updated by the current matrix as follows:

      glm::vec4 pos(0.0,0.0,0.0,1.0); // local variable
      pos = currentMatrix * pos;
      mVertices.push_back(glm::vec3(pos.x,pos.y,pos.z));

      //re-initialize the positional value declared as local variable
      pos.x = 0.0;
      pos.y = 0.0;
      pos.z = 0.0;
   
  1. Update the current matrix with the rotational value once the iterator encounter ‘-’ / ‘+’ character inside the strings array:

      ..................
      ..................
      float angle = ((*it == '-') ? 90.0f : -90.0f);

      currentMatrix = glm::rotate(currentMatrix,glm::radians(angle),glm::vec3(0.0,0.0,1.0));


Please note that the immediate above snippet does also concatenate the previous translation with the translation vector. Now do I have to insert the positional value here as well after being updated by the current matrix ? I ask this because we are dealing with the two conditions as follows:



        if(*it == 'F')
        {
                //current matrix updated here

               // Should the position be entered here inside the container ????
        }

        if(*it == '-' || *it == '+')
        {
              //current matrix updated here

               // OR Should the position be entered here inside the container ????
        }



Please do comment if any of my explanation is not clear enough.

Thanks

This isn’t wrong, but it’s unnecessarily verbose compared to e.g.


      mVertices.push_back(glm::vec3(currentMatrix * glm::vec4(0.0,0.0,0.0,1.0)));

which does exactly the same thing.

There is no translation vector any more, only a matrix.

You can either append a vertex after each translation, or before or after each rotation; there’s no need to do both. It only makes a difference if you have multiple consecutive translations or multiple consecutive rotations. If the commands alternate between translation and rotation, either approach will generate one vertex for each pair.

If you want to avoid adding unnecessary vertices in the case of consecutive translations or consecutive rotations, one way to do it is to generate a vertex whenever a rotation follows a translation (and also one at the end, if the last command was a translation). E.g.


        // add the starting position
        bool need_vertex = false;
        if(*it == 'F')
        {
                //current matrix updated here
                need_vertex = true;
        }

        if(*it == '-' || *it == '+')
        {
              //current matrix updated here
              if (need_vertex) {
                      // add_vertex
                      need_vertex = false;
              }
        }
...
// at end of loop
if (need_vertex)
      // add_vertex

Thanks !

Now I get the expected image. The snippet that made it possible is as follows:



void Hilbert2DScene::populateHilbertLSystem(const std::string &LString)
{

    /*
     * Store the start position of the hilbert curve, later in the loop the
     * consecutive update of the current matrix will change the all the positional values
     * */
    mVertices.push_back(glm::vec3(mHilbertCurrentMatrix * glm::vec4(0.0,0.0,0.0,1.0)));

    //declare a translation vector that will be updating the current matrix
    glm::vec3 translationVector = glm::vec3(1.0,0.0,0.0);


    for(std::string::const_iterator it = LString.begin(); it != LString.end();it++)
    {
        if(*it == 'F')
        {                                   

            /*
             * update the current matrix matrix with translation vector we have declared
             * */
            mHilbertCurrentMatrix = glm::translate(mHilbertCurrentMatrix,translationVector);

            /*
             * F means to draw forward. Since we are working in the batch mode, we shall store the vertex position
             * that are to be rendered later.
             *
             * At  the beginning in the loop the matrix is an identity matrix and in this case initial
             * */
            mVertices.push_back(glm::vec3(mHilbertCurrentMatrix * glm::vec4(0.0,0.0,0.0,1.0)));

        }
        if(*it == '-' || *it == '+')
        {
            /*
             * "−" means "turn left 90°", "+" means "turn right 90°"
             *
             * update the current matrix with the rotation matrix against the z-axis
             * */
            float angle = ((*it == '-') ? 90.0f : -90.0f);

            mHilbertCurrentMatrix = glm::rotate(mHilbertCurrentMatrix,glm::radians(angle),glm::vec3(0.0,0.0,1.0));

        }        
    }



    //normalize the inserted value inside the vector
    //normalize the points data
    unsigned long N = 1 << mIteration;


    for(std::vector<glm::vec3>::iterator it = mVertices.begin(), itend = mVertices.end(); it != itend;++it)
    {
        (*it).x /= (N - 1);
        (*it).y /= (N - 1);
        (*it).z /= 0.5;
    }
}


Please do mention if there is any more improvement possible over the above snippet !

Now I encounter another issue, the program crashes with the following output:



terminate called after throwing an instance of 'std::bad_alloc'
  what():  std::bad_alloc



once I go up the iteration level 16. I guess this particular issue is not directly related to OpenGL. While running the debugger, I found that the function actually crashes while generating the string which will be traversed to generate the hilbert curve.
This is the level I must try in this simulation so that the hilbert curve with iteration level 16 works in my other thesis project. Some reference would be helpful here as well.

The following link shows some of the images generated with the iteration level 1, 6, and 10.

http://i.imgur.com/plliLcy.png

http://i.imgur.com/XG8wgPE.png

http://i.imgur.com/OZzbyN0.png

Thanks

The length of the string increases four-fold with each extra level. I would expect the string for level 16 to occupy at least 4 gigabytes of memory. But I’d also expect the vertex array to be even larger. Storing each vertex as a pair of 16-bit integers (i.e. GL_SHORT or GL_UNSIGNED_SHORT) would improve matters, but it would probably still be too large to store the entire curve as a single array. Also, 16 bits is the absolute minimum, so you’d need to take some care with the conversion to avoid adjacent floating-point values being rounded to the same integer.

I would expect that the string will need to be generated lazily, i.e. consuming the characters as they are generated rather than trying to store the entire string in memory.

One way to do this would be to have distinct processes which read the string for one level from stdin and write the string for the next level to stdout. The renderer would read characters from a file or pipe rather than from a string stored in memory.

Another way would be to remove the “while” loop from the rendering code. Instead, the generator would call a function to process each character as it is generated.

Something similar will need to be done for the rendering. You aren’t going to be able to construct a vertex array of 232 vertices and render it in a single draw call. You’ll need to limit the size of the array, and whenever it gets full, render the data then copy the last element to the first and reset the size to 1.

But I believe that you’ll also need at least a 131072x131072 framebuffer in order to be able to see the lines (that will allow a one-pixel gap between lines). At one byte per pixel, that would be 16 GiB, and I’m not sure whether any existing OpenGL implementation supports tat. So you’d need to render the image in tiles, i.e. perform the rendering process multiple times with different transformations.

Hi ,

Generating and simulating a hilbert curve for a grid size of 2^16 X 2^16 is impossible , which I was trying to do. Thanks for the explanation. Even I try to save it as an image file in the hard drive the corresponding image size will be 4 gigabytes. Doing range query within this size of image will be quite a resource intensive I think. And this is the resolution of the frame I have to work with here.

No, it’s not impossible. But a “naive” approach which would work for e.g. 8 levels isn’t going to work for 16 levels.

For the specific case of a Hilbert curve, you can use a recursive formulation to generate arbitrary portions on demand. I.e you can generate the result of zooming in on a very small region of a million-level curve, using a realistic amount of time and memory. In theory, you can do this for any L-system, but the Hilbert curve is a particularly straightforward case (each quarter of the curve occupies one quadrant of the total area, with this property applying recursively).

The idea is to generate the fractal toolpath for 3D printing and as a preparation I was attempting to simulate on screen first. The initial version of the application divide the printing region in to a grid mask and the span of the region is calculated as follows:


double span = static_cast<double>(std::numeric_limits<short int>::max()) - static_cast<double>(std::numeric_limits<short int>::min());

The above returns you a grid of [0,65536] in each dimension. The software initially generated the parallel scan tool path and it was not that complicated scenario since you start a separate line at distinct points with a constant scan distance and you also aware of the bounding box of the print region of a particular layer. As a result you can trim your scan region. But in the case of fractal (Hilbert), we start at a point and end at another point for the whole square printing region and recursive formulation of an arbitrary trimmed region seems to be complicated to me. It will be nice if you share with me the algorithm, or at least some references that already does it. I am thinking over one solution, but it seems to be very resource intensive.

After slicing a 3D model , we get a contour that we print layer by layer. Lets consider one of the layers here. I already know the bounding area of the outer-most layer, so I am sure that the printing region will be inside the this bounding area. Then to deal with arbitrary geometry I calculate the intersection points between the outer contour (represented as a pool of edge structures) and hilbert curve. Initially, I am thinking to generate an image of 65536 X 65536 pixels and store the hilbert curve there only for once. The image size will be huge (4GB), as you have mentioned in your last post. The generated image will be used to find the hilbert curves within the printing range and find the hilbert edges and outer contour intersection.
Eventually, those intersection points will be defining the scan line for printing each layer.

I believe this what I am looking and looking for a technique to accomplish it. Any reference materials to strive into this direction ?

I have tried to generate the hilbert curve both recursively and iteratively (using the L-system) for the region 65536 X 65536, and in both cases it crashed. The reason behind it is well understood. Now I am looking forward some better technique to address the problem. Even though, it may sound a bit off-topic here, but I believe that the basics do still remain within the domain of computer graphics, so please have some more patience and guide me further.

Thanks

Well, starting from the production rules of the L-system:


A -> - B F + A F A + F B -
B -> + A F - B F B - F A +

A and B both behave like F, in that the cumulative effect is to move in the current direction, leaving the orientation unchanged (both production rules have an equal number of + and -). But whereas F moves one step, A and B move 2N-1 steps.

A level-N curve is bounded by a square of size 2N-1 units. The start and end positions share an edge, specifically the edge corresponding to the current direction; 2N-1 “F” commands would move the current point by the same amount as a level-N curve. This is true for both A and B, the only difference being that they fill on opposite sides of the path (A to the left, B to the right).

The key point is that because you can determine the change to the current position and orientation resulting from A or B without having to actually draw them, you can determine the position and orientation of each A, B and F directly from the position and orientation of the parent.

So a recursive formulation would look like:


directions = [[1,0],[0,-1],[-1,0],[0,1]]

def AB(N, s, a, x, y):
    # A curve if s == 1, B curve if s == -1
    if N <= 0:
        return
    N-=1
    K=1<<N
    xx,xy = directions[(a+0)%4]
    yx,yy = directions[(a-1)%4]
    def point(dx, dy):
        dy *= s
        return (x + dx*xx + dy*yx,
                y + dx*xy + dy*yy)
    x0,y0 = point(0,0)
    x1,y1 = point(0, K-1)
    x2,y2 = point(0, K)
    x3,y3 = point(K-1, K)
    x4,y4 = point(K, K)
    x5,y5 = point(2*K-1, K)
    x6,y6 = point(2*K-1, K-1)
    x7,y7 = point(2*K-1, 0)
    xm,ym = point(2*K-1, 2*K-1)
    xmin,xmax = (x0,xm) if x0<xm else (xm,x0)
    ymin,ymax = (y0,ym) if y0<ym else (ym,y0)
    AB(N, -s, a-s, x0,y0)
    line(x1,y1, x2,y2)
    AB(N, s, a+0, x2,y2)
    line(x3,y3, x4,y4)
    AB(N, s, a+0, x4,y4)
    line(x5,y5, x6,y6)
    AB(N, -s, a+s, x6,y6)

def hilbert(N):
    AB(N, 1, 0, 0,0)

Note that this computes the bounding box of the curve being drawn in xmin,xmax,ymin,ymax (although they aren’t used here). If you are drawing a small region of a much larger curve, you can test this against the clip region, and if there’s no intersection you can just return at that point, avoiding further recursion.

I am yet to understand the python snippet you attached, I am working on it now. Till then, I have the following questions from the rest of the explanations from your previous post

I do understand that with the encounter of ‘F’ character we proceed one step and I am trying to decode how many steps we move when we encounter ‘A’ or ‘B’

For level-1 curve, A and B, move 2¹ - 1 steps = 1 step
For level-2 curve, A and B, move 2² - 1 steps = 3 steps.
For level-3 curve, A and B, move 2³ - 1 steps = 7 steps.

As I can visualize, for level-1 curve , if the starting string is ‘A’, then we are moving 3 steps in total. Is not that so ? Please check the following hilbert curve of level-1

http://i.imgur.com/plliLcy.png

But according to the mathematical formulation, it is only 1 step.

For example ,
level-1 curve is bounded by a square of size 2¹ - 1 units - 1 unit (does it mean that only one square cell ?)
level-2 curve is bounded by a square of size 2² - 1 units - 3 units.

I did not get it properly. Check the following image. How does your formulation gets mapped into the image in the link

http://i.imgur.com/OFtyDeD.png

Thanks

The total length of the curve is 4N-1 steps. But the offset between the start and end points is 2N-1.

Each level increases the width of the square by a factor of two, and its area (and the length of the curve which fills it) by a factor of 4.

[QUOTE=sajis997;1280194]
I did not get it properly. Check the following image. How does your formulation gets mapped into the image in the link [/QUOTE]
Like this:

Where green = A, red = B, blue = F.

The numbers correspond to the x,y pairs.

Hi

I hope that things have started to get unfolded. I still suspect that I may not have explained the issue properly. To make sure that, the real goal of the project does not get lost in translation, I am kind of re-iterating it with a image now. Please Check the following attached image.

http://i.imgur.com/jUGgTSp.png

As you can see from the image the outer most rectangle in the image has to be imagined as the printing bed region. In the current setup it is physically 125 mm sidewise and the mapping is done in the following manner:


    //the difference between the maximum and minimum values in double would be the span we are dealing with
    //2 ^ 16
    // the following statement returns 65535
    double span = static_cast<double>(std::numeric_limits<short int>::max()) - static_cast<double>(std::numeric_limits<short int>::min());

    /*
     * side lengh is given in mm
     * The conversion facto from mm to steps. [step/mm]
     * Sajjadul's analysis
     *
     * 2¹⁶ steps is sideLength_ mm, or in other words
     *
     * sideLength_ mm = 2¹⁶ steps
     *     1       mm = (2¹⁶/sideLength_) steps.
     * */
    mmToStep_ = span / sideLength_;

I have to maintain the above setup for the remaining work-flow of the project. As you can see and as I have mentioned in my last post that the printing bed region is going through the grid mask mapping of 2¹⁶ X 2¹⁶. The parallel scanning is already implemented and the following image depicts the pattern

To follow the above pattern, the bounding area is calculated for the outermost contour loop of the sliced data, which ought to fill a square area containing the whole outer loop. So these vertices of the outer loop are first sorted, a minimum square is first ascertained, then according to the diameter used laser beam and the printing process rule. Once the bounding area is decided, the computation gets simpler , we only need to generate lines and calculate intersection between the parallel scan lines at a certain scan spacing step and the edges of the contour loop which reside inside the bounding area( Rectangle Region R). This bounding area does not necessarily have to be square all the time and it is not required for the parallel scan line, whereas it is ABSOLUTELY required to the hilbert scan path - more specifically it has to be the 2^n X 2^n region and its starting point is at one corner while ending at the other adjacent corner. My question is "Is it possible to generate the hilbert curve only for region bounded by the trimmed bounded area or you only focusing on the trimmed bounding area from the much larger region(2¹⁶ X 2¹⁶ steps) of hilbert curve? (Rectangle Region which may not be 2^n X 2^n)? " I hope that your last python snippet addresses the issue and I am still working on it. If it does not then , you know now specifically the issue that I am trying to solve.

Thanks