I'm rendering a texture to screen with this code:
if (beganDraw)
{
beganDraw = false;
GL.BindFramebuffer(FramebufferTarget.Framebuffer, 0);
if (CameraMaterial != null)
{
GL.BindBuffer(BufferTarget.ArrayBuffer, screenMesh.VBO);
GL.BindVertexArray(VAO);
GL.BindBuffer(BufferTarget.ElementArrayBuffer, screenMesh.VEO);
CameraMaterial.Use();
screenMesh.ApplyDrawHints(CameraMaterial.Shader);
GL.DrawElements(PrimitiveType.Triangles, 6, DrawElementsType.UnsignedInt, 0);
GL.BindBuffer(BufferTarget.ElementArrayBuffer, 0);
GL.BindVertexArray(0);
GL.BindBuffer(BufferTarget.ArrayBuffer, 0);
GL.UseProgram(0);
}
}
As you see there is no transformation matrix.
I create the mesh to render the surface like this:
screenMesh = new Mesh();
screenMesh.SetVertices(new float[] {
-1,-1,
1,-1,
1,1,
-1,1
});
screenMesh.SetIndices(new uint[] {
2,3,0,
0,1,2
});
And my question is, why do I have to go from -1 to 1 in order to fill the screen? Shouldn't it default to 0 to 1 ? Also, how can I make it to go from 0 to 1? Or is that even advised?
This is the shader:
[Shader vertex]
#version 150 core
in vec2 pos;
out vec2 texCoord;
uniform float _time;
uniform sampler2D tex;
void main() {
gl_Position = vec4(pos, 0, 1);
texCoord = pos/2+vec2(0.5,0.5);
}
[Shader fragment]
#version 150 core
#define PI 3.1415926535897932384626433832795
out vec4 outColor;
uniform float _time;
uniform sampler2D tex;
in vec2 texCoord;
//
void main() {
outColor = texture2D(tex, texCoord);
}