6

I've been trying to implement Morph Target animation in OpenGL with Facial Blendshapes but following this tutorial. The vertex shader for the animation looks something like this:

#version 400 core

in vec3 vNeutral;
in vec3 vSmile_L;
in vec3 nNeutral;
in vec3 nSmile_L;
in vec3 vSmile_R;
in vec3 nSmile_R;

uniform float left;
uniform float right;
uniform float top;
uniform float bottom;
uniform float near;
uniform float far;

uniform vec3 cameraPosition;
uniform vec3 lookAtPosition;
uniform vec3 upVector;

uniform vec4 lightPosition;

out vec3 lPos;
out vec3 vPos;
out vec3 vNorm;

uniform vec3 pos;
uniform vec3 size;
uniform mat4 quaternion;

uniform float smile_w;

void main(){

    //float smile_l_w = 0.9;
    float neutral_w = 1 - 2 * smile_w;
    clamp(neutral_w, 0.0, 1.0);

    vec3 vPosition = neutral_w * vNeutral + smile_w * vSmile_L + smile_w * vSmile_R;
    vec3 vNormal = neutral_w * nNeutral + smile_w * nSmile_L + smile_w * nSmile_R;
    //vec3 vPosition = vNeutral + (vSmile_L - vNeutral) * smile_w;
    //vec3 vNormal = nNeutral + (nSmile_L - nNeutral) * smile_w;

    normalize(vPosition);
    normalize(vNormal);

    mat4 translate = mat4(1.0, 0.0, 0.0, 0.0,
                          0.0, 1.0, 0.0, 0.0,
                          0.0, 0.0, 1.0, 0.0,
                          pos.x, pos.y, pos.z, 1.0);

    mat4 scale = mat4(size.x, 0.0, 0.0, 0.0,
                      0.0, size.y, 0.0, 0.0,
                      0.0, 0.0, size.z, 0.0,
                      0.0, 0.0, 0.0, 1.0);

    mat4 model = translate * scale * quaternion;

    vec3 n = normalize(cameraPosition - lookAtPosition);
    vec3 u = normalize(cross(upVector, n));
    vec3 v = cross(n, u);

    mat4 view=mat4(u.x,v.x,n.x,0,
                    u.y,v.y,n.y,0,
                    u.z,v.z,n.z,0,
                    dot(-u,cameraPosition),dot(-v,cameraPosition),dot(-n,cameraPosition),1);

    mat4 modelView = view * model;

    float p11=((2.0*near)/(right-left));
    float p31=((right+left)/(right-left));
    float p22=((2.0*near)/(top-bottom));
    float p32=((top+bottom)/(top-bottom));
    float p33=-((far+near)/(far-near));
    float p43=-((2.0*far*near)/(far-near));

    mat4 projection = mat4(p11, 0, 0, 0,
                           0, p22, 0, 0,
                           p31, p32, p33, -1,
                           0, 0, p43, 0);


    //lighting calculation
    vec4 vertexInEye = modelView * vec4(vPosition, 1.0);
    vec4 lightInEye = view * lightPosition;
    vec4 normalInEye = normalize(modelView * vec4(vNormal, 0.0));


    lPos = lightInEye.xyz;
    vPos = vertexInEye.xyz;
    vNorm = normalInEye.xyz;


    gl_Position = projection * modelView * vec4(vPosition, 1.0);
}

Although the algorithm for morph target animation works, I get missing faces on the final calculated blend shape. The animation somewhat looks like the follow gif.

Morph Target Facial Animation

The blendshapes are exported from a markerless facial animation software known as FaceShift.

But also, the algorithm works perfectly on a normal cuboid with it's twisted blend shape created in Blender:

Cube Twist Morph Target Animation

Could it something wrong with the blendshapes I am using for the facial animation? Or I am doing something wrong in the vertex shader?

--------------------------------------------------------------Update----------------------------------------------------------

So as suggested, I made the changes required to the vertex shader, and made a new animation, and still I am getting the same results.

Here's the updated vertex shader code:

#version 400 core

in vec3 vNeutral;
in vec3 vSmile_L;
in vec3 nNeutral;
in vec3 nSmile_L;
in vec3 vSmile_R;
in vec3 nSmile_R;

uniform float left;
uniform float right;
uniform float top;
uniform float bottom;
uniform float near;
uniform float far;

uniform vec3 cameraPosition;
uniform vec3 lookAtPosition;
uniform vec3 upVector;

uniform vec4 lightPosition;

out vec3 lPos;
out vec3 vPos;
out vec3 vNorm;

uniform vec3 pos;
uniform vec3 size;
uniform mat4 quaternion;

uniform float smile_w;

void main(){

    float neutral_w = 1.0 - smile_w;
    float neutral_f = clamp(neutral_w, 0.0, 1.0);

    vec3 vPosition = neutral_f * vNeutral + smile_w/2 * vSmile_L + smile_w/2 * vSmile_R;
    vec3 vNormal = neutral_f * nNeutral + smile_w/2 * nSmile_L + smile_w/2 * nSmile_R;

    mat4 translate = mat4(1.0, 0.0, 0.0, 0.0,
                          0.0, 1.0, 0.0, 0.0,
                          0.0, 0.0, 1.0, 0.0,
                          pos.x, pos.y, pos.z, 1.0);

    mat4 scale = mat4(size.x, 0.0, 0.0, 0.0,
                      0.0, size.y, 0.0, 0.0,
                      0.0, 0.0, size.z, 0.0,
                      0.0, 0.0, 0.0, 1.0);

    mat4 model = translate * scale * quaternion;

    vec3 n = normalize(cameraPosition - lookAtPosition);
    vec3 u = normalize(cross(upVector, n));
    vec3 v = cross(n, u);

    mat4 view=mat4(u.x,v.x,n.x,0,
                    u.y,v.y,n.y,0,
                    u.z,v.z,n.z,0,
                    dot(-u,cameraPosition),dot(-v,cameraPosition),dot(-n,cameraPosition),1);

    mat4 modelView = view * model;

    float p11=((2.0*near)/(right-left));
    float p31=((right+left)/(right-left));
    float p22=((2.0*near)/(top-bottom));
    float p32=((top+bottom)/(top-bottom));
    float p33=-((far+near)/(far-near));
    float p43=-((2.0*far*near)/(far-near));

    mat4 projection = mat4(p11, 0, 0, 0,
                           0, p22, 0, 0,
                           p31, p32, p33, -1,
                           0, 0, p43, 0);


    //lighting calculation
    vec4 vertexInEye = modelView * vec4(vPosition, 1.0);
    vec4 lightInEye = view * lightPosition;
    vec4 normalInEye = normalize(modelView * vec4(vNormal, 0.0));


    lPos = lightInEye.xyz;
    vPos = vertexInEye.xyz;
    vNorm = normalInEye.xyz;


    gl_Position = projection * modelView * vec4(vPosition, 1.0);
}

Also, my fragment shader looks something like this. (I just added new material settings as compared to earlier)

#version 400 core
uniform vec4 lightColor;
uniform vec4 diffuseColor;

in vec3 lPos;
in vec3 vPos;
in vec3 vNorm;

void main(){
    //copper like material light settings
    vec4 ambient = vec4(0.19125, 0.0735, 0.0225, 1.0);
    vec4 diff = vec4(0.7038,    0.27048, 0.0828, 1.0);
    vec4 spec = vec4(0.256777, 0.137622, 0.086014, 1.0);

    vec3 L = normalize (lPos - vPos);
    vec3 N = normalize (vNorm);
    vec3 Emissive = normalize(-vPos);
    vec3 R = reflect(-L, N);
    float dotProd = max(dot(R, Emissive), 0.0);
    vec4 specColor = lightColor*spec*pow(dotProd,0.1 * 128);
    vec4 diffuse = lightColor * diff * (dot(N, L));
    gl_FragColor = ambient + diffuse + specColor;
}

And finally the animation I got from updating the code:

Updated Morph animation

As you can see, I am still getting some missing triangles/faces in the morph target animation. Any more suggestions/comments regarding the issue would be really helpful. Thanks again in advance. :)

Update:

So as suggested, I flipped the normals if dot(vSmile_R, nSmile_R) < 0 and I got the following image result.

Also, instead of getting the normals from the obj files, I tried calculating my own (face and vertex normals) and still I got the same result.

enter image description here

pslayer89
  • 307
  • 5
  • 14
  • 1
    Your `view` matrix appears to be transposed, at least the top-left 3x3 sub-matrix is. Each set of 4 scalars in the constructor of `mat4` represents a single column in the matrix; you have your basis vectors spanning each ***row*** of the constructed `mat4` for some reason. – Andon M. Coleman Jun 12 '14 at 02:45
  • Umm, are you sure? Because I've been using the same view matrix for almost all my projects and it seems to work fine with all the camera movements. I tried changing the view matrix as you suggested, but there was no display. I think it's supposed to be like this, considering GLSL uses column-major matrix representation? – pslayer89 Jun 12 '14 at 02:54
  • 1
    The spatial axes are supposed to exist as the first three columns of your view matrix and the translation as the fourth. You have the translation correct, but the vector `u` is occupying the first *row* of your view matrix, the vector `v` is occupying the second *row* and the vector `n` is occupying the third *row*. – Andon M. Coleman Jun 12 '14 at 02:56
  • But as I recall from my basic GLSL tutorial, since GLSL is column major, each row in the `mat4` is actually a column. So, by that logic, `u` looks like the first _row_ but is actually the first _column_, and so on. Please feel free to correct me if I'm wrong. – pslayer89 Jun 12 '14 at 03:08
  • 1
    Yes, each row in your `mat4` constructor is a column. Your first column is this: `u.x`, `v.x`, `n.x`, **0.0**. – Andon M. Coleman Jun 12 '14 at 03:10
  • I see your point. But when I do that in my code, nothing gets displayed. I also tried inversing my forward vector and changing the translation from last row to last column. But still nothing. This is the only view matrix which displays the shapes. Also, I found [this](http://schabby.de/view-matrix/) tutorial which has `u`, `v` and `n` as rows in the matrix. But this is in C++ so I think it's row major, and when this gets passed to the shader, each row becomes a column, which is the view matrix in my case. Isn't it? Sorry I am a bit confused. – pslayer89 Jun 12 '14 at 04:00
  • 1
    You know what; nevermind. I was thinking the wrong way. Your shader is effectively implementing `LookAt (...)`, which does translation followed by inverse rotation. The inverse of a rotation matrix in Euclidean space is the same as its transpose and that is why the top-left 3x3 matrix (rotation) is transposed. Sorry if I confused you ;) – Andon M. Coleman Jun 12 '14 at 04:04
  • Ah I see! Thanks for clearing that up! :) But any suggestions/ideas on what might be causing the missing faces? Thanks again for your help. – pslayer89 Jun 12 '14 at 13:54
  • 1
    Probably not causing your issue, but your `clamp` call and first two `normalize` calls won't do anything (they don't modify their arguments). You don't want to normalize `vPosition` anyway. – GuyRT Jun 12 '14 at 14:53
  • Is it definitely whole faces going missing at a time (it's hard to see in the animation)? I did wonder if you were not clearing the depth buffer between frames, but I don't think it's likely. – GuyRT Jun 12 '14 at 14:55
  • Oh the normalize functions were commented, I uncommented them to see if there's any difference and I forgot to comment them again. But the output was definitely recorded with the normalize functions commented. I put the clamp just in case if the neutral weight goes below zero, as a sanity check. Also, I am calling `glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT)` in my display function. So I think does gets cleared between frames. Could it be that the blendshape meshes is too dense and that's what causing the missing triangles in the interpolation? – pslayer89 Jun 12 '14 at 15:04
  • 1
    I don't think the mesh being too dense can cause this. If you are using the same indices each time and the mesh is "water-tight" (i.e. adjacent triangles are using the same vertices or at least vertices with exactly the same position), I can't fathom how triangles can go missing. Possibly some vertices are being transformed outside the near or far clipping planes? – GuyRT Jun 12 '14 at 15:54
  • I don't think anything's going out of far or the near plane. I double checked by moving the camera towards the model. Also, I am using `glDrawArrays` instead of `glDrawElements`. That's because I have a flag to set which normals (vertex for smooth shading or face for flat shading) the user wants to use for lighting. So as a result of that, I send the vertices and the normals of all the blendshapes in the expanded form, i.e. without any element buffer attached to it. Should that make a difference? – pslayer89 Jun 12 '14 at 21:00
  • What is the range of `smile_w` supposed to be in the first place? `1 - 2 * smile_w` looks a lot like something you would do if you wanted to re-scale [**0**,**1**] to the range [**-1**,**1**] (though it reverses the direction; that is, **1** becomes **-1** and **0** becomes **1**). – Andon M. Coleman Jun 13 '14 at 00:36
  • Umm so I have a blendshape for "_Left Smile_" and a blendshape for "_Right Smile_" and I figured if a person would smile, the weight for both left and right should be the same. So that's why I subtract smile_w (weight of each smile component, 2 in this case) twice from the neutral component. – pslayer89 Jun 13 '14 at 00:50
  • 1
    That clamping indeed looks dubious. – Solkar Jun 20 '14 at 10:04
  • 1
    Especially because you discard the clamped value - the first parameter of "clamp" is not [inout] but only [in], so to actually clamp it, you have to assign the return value of "clamp" to some variable. – Solkar Jun 20 '14 at 10:12
  • Ah I get it! I thought the value was supposed to be _passed by reference_ but I guess I was wrong! I'll try assigning the clamped value to `neutral_w` and see if the output is any different. – pslayer89 Jun 20 '14 at 15:52
  • As @Solkar suggested, I tried doing `neutral_w = clamp(neutral_w, 0.0, 1.0)` before calculating `vPosition` and `vNormal` but still I am getting the same output. :( Any other suggestions might be really helpful. Thanks. – pslayer89 Jun 21 '14 at 22:24
  • 1
    I hope you have looked at the OpenGL docs meanwhile and understand "other" in terms of "additional"; the way you had tried to clamp was just plainly without effect. The same, btw, holds for the normalize statement for vNormal. Normalizing "vPosition" should be scratched anyway. because normalizing the position vectors would project the point onto the unit sphere and you would not want to turn the face into a plain balloon. – Solkar Jun 22 '14 at 11:08
  • 1
    Fix allt that pls, and then re-post the fixed code and the anim or a significant frame you get with that fixed code, and one COMPLETE sample for that frame - the values of all uniforms and one complete set of attributes which refers to a mesh vector with three non-vanishing components. – Solkar Jun 22 '14 at 11:10
  • I updated the post with the new code and animation. In the starting I set the uniform `smile_w` to 0.8 in the beginning, but when I animate the scene, its value ranges from 0.0 - 1.0. I'm sorry I didn't get the _one complete set of attributes which refers to a mesh vector with three non-vanishing components_ part from your last comment, do want to look at the C++ code where I am passing the attributes into the vbo's or the actual values of the attributes? Fyi, I am reading data from the Obj files which I exported from Faceshift (A facial expression generation software). – pslayer89 Jun 23 '14 at 23:55
  • Well... What are the "attributes" used in your shader? Obviously in vec3 vNeutral; in vec3 vSmile_L; in vec3 nNeutral; in vec3 nSmile_L; in vec3 vSmile_R; in vec3 nSmile_R; What is a "complete set" of those attributes? Obviously a set of 6 x vec3 values used to render one vertex. What is a "mesh vector with three non-vanishing components"? Obviously a vector (x, y, z) for which x,y,z ≠ 0 holds. – Solkar Jun 24 '14 at 07:34
  • Do you have backface culling enabled? I suspect that the morphing of the vertices might turn counterclockwise triangles clockwise. – Reto Koradi Nov 05 '14 at 06:11

2 Answers2

2

Not an answer attempt, I just need more formatting than available for comments.

I cannot tell which data was actually exported from Fasceshift and how that was put into the custom ADTs of the app; my crystal ball is currently busy with predicting the FIFA Wold Cup results.

But generally, a linear morph is a very simple thing:

There is one vector "I" of data for the initial mesh and a vector "F" of equal size for the position data of the final mesh; their count and ordering must match for the tessellation to remain intact.

Given j ∈ [0,count), corresponding vectors initial_ = I[j], final_ = F[j] and a morph factor λ ∈ [0,1] the j-th (zero-based) current vector current_(λ) is given by

current_(λ) = initial_ + λ . (final_ - initial_) = (1 - λ ) . initial_ + λ . final_.


From this perspective, this

vec3 vPosition = neutral_w * vNeutral + 
                 smile_w/2 * vSmile_L + smile_w/2 * vSmile_R;

looks dubious at best.

As I said, my crystal ball is currently defunct, but the naming would imply that, given the OpenGL standard reference frame,

vSmile_L = vSmile_R * (-1,1,1),

this "*" denoting component-wise multiplication, and that in turn would imply cancelling out the morph x-component by above addition.

But apparently, the face does not degenerate into a plane (a line from the projected pov), so the meaning of those attributes is unclear.

That's the reason why I want to look at the effective data, as stated in the comments.


Another thing, not related to the effect in question, but to the the shading algorithm.

As stated in the answer to this

Can OpenGL shader compilers optimize expressions on uniforms?,

the shader optimizer could well optimize pure uniform expressions like the M/V/P calculations done with

uniform float left;
uniform float right;
uniform float top;
uniform float bottom;
uniform float near;
uniform float far;

uniform vec3 cameraPosition;
uniform vec3 lookAtPosition;
uniform vec3 upVector;

/* */   

uniform vec3 pos;
uniform vec3 size;
uniform mat4 quaternion;

but I find it highly optimistic to rely on such assumed optimizations.

if it is not optimized accordingly doing this means doing it once per frame per vertex so for a human face with a LOD of 1000 vertices, and 60Hz that would be done 60,000 times per second by the GPU, instead of once and for all by the CPU.

No modern CPU would give up soul if these calculations are put once on her shoulders, so passing the common trinity of M/V/P matrices as uniforms seems appropriate instead of constructing those matrices in the shader.

For reusing the code from the shaders - glm provides a very glsl-ish way to do GL-related maths in C++.

Community
  • 1
  • 1
Solkar
  • 1,228
  • 12
  • 22
  • Firstly apologies for replying so late, this has been a rought week for me. That being said, I exported all the blendshapes (47 of them for 47 different facial expressions) as obj files from faceshift. Each blendshape contains equal number of vertices too. – pslayer89 Jun 27 '14 at 15:01
  • As for the attributes I am using, I have one single vertex array object (vao) and I have 2 vbos for each blendshape in that single vao. 1 vbo is for the vertex data and the other one for the normal. I don't know if it matters, but I prefer separate vbos for separate data. So `vNeutral` is the vertex data contained in the vbo[0] of the vao, and `nNeutral` is the normal data contained in vbo[1]. Similarly `vSmile_L` is the vertex data for the _left smile_ blendshape stored in the vbo[2] and `nSmile_L` is the normal data for the same blendshape stored in vbo[3], and so on. – pslayer89 Jun 27 '14 at 15:02
  • Also, I have 2 blendshapes for a smile, `Left Smile` and `Right Smile`, so I figures if the split the smile weight by 2 and multiply each of the split smile weight with each of the "sub-smile blendshape vertex" it should work, which does except for the same glitch. But ignoring that, even if I do it with one blendshape, say "Left smile" I still get the same glitch. – pslayer89 Jun 27 '14 at 15:04
  • Again, apologies with the burst of comments, but I've been aware of the matrix issue for a while now and been meaning to port it to the CPU side as soon as this glitch problem gets resolved. Thanks for pointing it out though. :) – pslayer89 Jun 27 '14 at 15:06
  • 1
    Thanks, now I got the picture. I'll contemplate on that and get back you. – Solkar Jun 28 '14 at 05:27
  • 1
    I think it's good to eliminate the usual suspect first - the normals. Just iterate once over both the normals and the position vectors of each the morphed attribute data and and just try and flip the normal if the std::inner_product of position and normal is < 0. If then the cheeks and chin are ok, but the e.g. ears start to fade, we'll at least know that the normal data IS flawed and we can go and refine the re-factoring then. – Solkar Jun 28 '14 at 11:32
  • Just to be clear, I should check the inner product of each vertex attribute with its corresponding vertex normal for > 0 condition and if the condition is true then I should do `normal[i] = -normal[i]` where `normal[i]` is a `vec3` and is the corresponding normal for the `vertex[i]`. – pslayer89 Jun 29 '14 at 03:53
  • 1
    Yup. Esp for vSmile_L vs. nSmile_L and vSmile_R vs. nSmile_R; the base mesh data looks ok. The head human is similar to an ellipsoid (skip the ears), thus n_ and v_ should point along, (given the frame is at the center of the head, was I would assume). – Solkar Jun 29 '14 at 16:28
  • Firstly, Apologies for a delayed update, It had been a rough last week. I updated the content as suggested in your last comment and I get some weird highlighted areas when I flip the normals. But the ears remain in tact, and as a matter of fact do not fade completely, though the inner part gets darkened (because of the normal flipping?). As mentioned in the update, I tried with both the normals coming from the obj file and the ones I calculated using the triangle vertices, I get the same results. So does it mean that the normal data is fine and something else might be the root of the problem? – pslayer89 Jul 06 '14 at 02:18
  • Also, I only flipped the normals just for the target blendshape and the neutral face's normals still remain untouched. – pslayer89 Jul 06 '14 at 02:19
  • 1
    Hmmm... What about exporting the final frame's mesh to an obj file and having a look at it in Blender or some other app with solid mesh editing support? – Solkar Jul 11 '14 at 22:41
  • That's actually one of the first things I tried when I got the obj files. I used blender to see if the morphing between the starting and the ending target is smooth. It works pretty good on Blender. – pslayer89 Jul 12 '14 at 03:38
  • 1
    Where is the origin of that mesh when you import it into blender? – Solkar Jul 12 '14 at 11:34
  • I got all the blendshapes from the software called faceshift. It generates all the blendshapes for each expression for your face and then you can export all the blendshapes as objs from the software itself. Would it help if I uploaded them somewhere and provided you a download link for the obj files? – pslayer89 Jul 12 '14 at 18:00
  • 1
    ;) With "origin" I meant the origin of its local frame of reference; its local vec3(0.,0.,0.). – Solkar Jul 12 '14 at 18:04
  • Umm sorry for the dumb question, but how exactly do I find that out from the obj file? Should I look in the data and figure out by analyzing the vertex positions or is there a way to find out by loading it in Blender or any other 3d viewing software? – pslayer89 Jul 18 '14 at 19:22
  • 1
    import the obj into Blender, set the 3D cursor to the origin; screenshot. – Solkar Jul 18 '14 at 19:37
  • Okay, firstly, I apologize it took me so long to reply. But I think I've found the root of the problem which I think lies in the obj file itself. The original obj file I got exported from Faceshift contains quads mixed with some triangle faces (obj file with mixed faces?), and when I triangulated all the faces in Blender/3ds max, I think the order of the vertices got changed causing the glitch. Now another problem arises here, how do I possibly parse an obj file which has both quad and triangle faces. – pslayer89 Aug 01 '14 at 03:59
  • For an example, here is a [link](https://gist.github.com/pxv8270/af361a1adc531145fae8) to the original obj file that I got from the software. Note that at line # 22187, the faces get triangulated instead of being quadrangular. Also, I tried creating shape keys in blender with both the original obj files and the ones with the triangulated vertices, I found that original work fine but the triangulated ones show all sorts of weird outputs. Then I tried triangulating the mesh in 3ds Max, and used those to create shape keys in blender. The jaggedness was still there but it was much less. – pslayer89 Aug 01 '14 at 04:01
1

I had a very similar problem some time ago. As you eventually noticed, your problem most probably lies in the mesh itself. In my case, it was inconsistent mesh triangulation. Using the Triangluate Modifier in Blender solved the problem for me. Perhaps you should give it a try too.

d0c
  • 515
  • 5
  • 7
  • I played around with the mesh itself in Blender and figured that triangular version of the mesh was producing those missing triangle artifacts, but when I used the quad mesh from the software, it worked like a charm for me. I will definitely give the Triangulate Modifier a try and see what result I get. Thanks for pointing that out. :) – pslayer89 Nov 06 '14 at 20:59
  • 1
    Let me know if it works. Please note that you'll probably have to start with one single base mesh, add the triangulate modifier to it (but not apply it) and then proceed to add the various shapekeys and keyframes yourself. Afterwards, you can export the animation as separate OBJ files (check 'animation', 'apply modifiers' but NOT 'triangulate faces' in the OBJ exporter options) that you can use in your engine. – d0c Nov 06 '14 at 22:20