3

I'm making a game in C# / XNA. I'm currently working on the shader I'll be using for the terrain. I'm using a texture atlas for speed and efficiency but I'm experiencing texture/color bleeding between tiles: https://i.stack.imgur.com/249Ha.png

I get this effect in both FX Composer and my game itself. Here is my shader:

//-----------------------------------------------------------------------------
// InstancedModel.fx
//
// Microsoft XNA Community Game Platform
// Copyright (C) Microsoft Corporation. All rights reserved.
//-----------------------------------------------------------------------------


// Camera settings.
float4x4 World : World < string UIWidget="None"; >;
float4x4 View : View < string UIWidget="None"; >;
float4x4 Projection : Projection < string UIWidget="None"; >;

// This sampler uses a simple Lambert lighting model.
float3 LightDirection = normalize(float3(-1, -1, -1));
float3 DiffuseLight = 1.25;
float3 AmbientLight = 0.25;

float TextureSide = 0; //0 = top, 1 = side, 2 = bottom
float2 TextureCoord;
texture Texture;
float2 TextureSize = 2.0;

sampler Sampler = sampler_state
{
    Texture = (Texture);
    MinFilter = Linear;
    MipFilter = Linear;
    MagFilter = Linear;
    AddressU = Clamp;
    AddressV = Clamp;
};


struct VertexShaderInput
{
    float4 Position : POSITION0;
    float3 Normal : NORMAL0;
    float2 TextureCoordinate : TEXCOORD0;
};


struct VertexShaderOutput
{
    float4 Position : POSITION0;
    float4 Color : COLOR0;
    float2 TextureCoordinate : TEXCOORD0;
};

// Vertex shader helper function shared between the two techniques.
VertexShaderOutput VertexShaderCommon(VertexShaderInput input, float4x4 instanceTransform, float2 atlasCoord, float4 colour)
{
    VertexShaderOutput output;

    // Apply the world and camera matrices to compute the output position.
    float4 worldPosition = mul(input.Position, instanceTransform);
    float4 viewPosition = mul(worldPosition, View);
    output.Position = mul(viewPosition, Projection);

    // Compute lighting, using a simple Lambert model.
    float3 worldNormal = mul(input.Normal, instanceTransform);    
    float diffuseAmount = max(-dot(worldNormal, LightDirection), 0);    
    float3 lightingResult = saturate(diffuseAmount * DiffuseLight + AmbientLight);    
    output.Color = float4(lightingResult, 1);
    output.Color = output.Color * colour;

    //calculate texture coords  
    float2 InputTextureCoords = input.TextureCoordinate;// / TextureSize;
    float2 InputAtlasCoords = atlasCoord;// / TextureSize;  

    float2 textCoordsActual = InputTextureCoords + InputAtlasCoords;

    output.TextureCoordinate = textCoordsActual;

    return output;
}


// Hardware instancing reads the per-instance world transform from a secondary vertex stream.
VertexShaderOutput HardwareInstancingVertexShader(VertexShaderInput input,
                                                  float4x4 instanceTransform : BLENDWEIGHT,
                                                  float2 atlasCoord1 : TEXCOORD1, float2 atlasCoord2 : TEXCOORD2, float2 atlasCoord3 : TEXCOORD3, 
                                                  float4 colour : COLOR1)
{
    float2 atlasCoord = atlasCoord1;
    if (TextureSide == 1)
    {
        atlasCoord = atlasCoord1;
    }
    if (TextureSide == 2)
    {
        atlasCoord = atlasCoord2;
    }
    else if (TextureSide == 3)
    {
        atlasCoord = atlasCoord3;
    }
    return VertexShaderCommon(input, mul(World, transpose(instanceTransform)), atlasCoord, colour);
}


// When instancing is disabled we take the world transform from an effect parameter.
VertexShaderOutput NoInstancingVertexShader(VertexShaderInput input,
                                                  float4x4 instanceTransform : BLENDWEIGHT,
                                                  float2 atlasCoord1 : TEXCOORD1, float2 atlasCoord2 : TEXCOORD2, float2 atlasCoord3 : TEXCOORD3, 
                                                  float4 colour : COLOR1)
{
    return VertexShaderCommon(input, World, TextureCoord, float4(1,1,1,1));
}

float2 HalfPixileCorrectedCoords(float2 coords)
{
    float u = (coords.x) / TextureSize;
    float v = (coords.y) / TextureSize;

    return float2(u, v);
}

// Both techniques share this same pixel shader.
float4 PixelShaderFunction(VertexShaderOutput input, 
                            float2 atlasCoord1 : TEXCOORD1) : COLOR00
{                           
    float2 outputTextureCoords = HalfPixileCorrectedCoords(input.TextureCoordinate);    
    return tex2D(Sampler, outputTextureCoords) * input.Color;
}


// Hardware instancing technique.
technique HardwareInstancing
{
    pass Pass1
    {
        VertexShader = compile vs_3_0 HardwareInstancingVertexShader();
        PixelShader = compile ps_3_0 PixelShaderFunction();
    }
}

// For rendering without instancing.
technique NoInstancing
{
    pass Pass1
    {
        VertexShader = compile vs_3_0 NoInstancingVertexShader();
        PixelShader = compile ps_3_0 PixelShaderFunction();
    }
}

My FX Composer HLSL profile: https://i.stack.imgur.com/HZCNI.png

and the test atlas im using: (cant post because I need more reputation, I can perhaps post it in a followup?)

I've done a lot of reading about this, and it seems that I either need to do a "half pixel correction" or wrap the pixels at the edges of the specified texture within the atlas. I've tried both of these with no success.

Question: How do I solve the pixel bleeding issue I'm experiencing?

Vadim Kotov
  • 8,084
  • 8
  • 48
  • 62
user2002287
  • 51
  • 1
  • 6

1 Answers1

5

If you want to get nice seamless tiling textures using an atlas, you have to create a texture that's 4 times bigger than you'd expect it to be (that is (2 x width) x (2 x height)).

More specifically, each tile in the atlas should look like this: image

The whole tile should be repeated twice, starting with its center at (u,v).

(u,v) are the coordinates of the tile in the atlas texture.

However, the coordinates which you should use for this tile while texturing an object are:

(u0, v0) <---> (u1, v1)

You can calculate them as follows:

rw = tile_width / atlas_width
rh = tile_height / atlas_height
u0 = u + 0.5 * rw
v0 = v + 0.5 * rh
u1 = u0 + rw
v1 = v0 + rh

One of the main problems with color bleeding when using texture atlas is mipmapping. When the mipmaps are created the texture is downsampled and the adjacent tiles are being blended together which causes artifacts. The method I described above prevents it by providing sufficient reserve of texture's area.

Another reason you get artifacts when sampling the texture is texture filtering. The above method also helps with it, since there is always sufficient area covered by tile's texture in the proximity of the samples in the range (u0, v0) - (u1, v1).

miloszmaki
  • 1,635
  • 15
  • 21
  • Thank you very much for your reply! What you suggested should work, but is terribly inefficient. I may have to resort to something along these lines, but I was really hoping for a more efficient solution. One problem this introduces is a limit to the number of textures I can use. I was going to use a max atlas size of 2048x2048, with textures of 128*128 (for a total of 256 textures). This would limit me to 128 textures. Which may be something I have to work within, but I was really hoping to avoid. – user2002287 Apr 17 '13 at 22:40
  • I've been doing some more reading, it does look like your solution is the most robust.. I guess I'll have to limit my textures then! Or perhaps add two textures in the shader if I reach that 128 limit... – user2002287 Apr 17 '13 at 22:48
  • Actually, for 2048x2048 atlas and 128x128 textures, the limit is 64. That's because you need a 256x256 subset of the atlas for each texture which gives (2048/256) * (2048/256) = 8 * 8 = 64 available textures. – miloszmaki Apr 18 '13 at 08:12
  • 1
    So I've implemented your solution and it's working perfectly. Thanks again for the tip! http://i.imgur.com/W4zclNe.jpg – user2002287 Apr 18 '13 at 09:41