0

Take the following example, for a row of pixels that is 2 pixels wide:

  • 4-bit: 1 byte long row (2 pixels = 1 byte) (currently, I get 2 instead of 1)
  • 8-bit: 2 bytes long row, (1 pixel = 1 byte)
  • 16-bit: 4 bytes long row (1 pixel = 2 bytes)
  • 24-bit: 6 bytes long row (1 pixel = 3 bytes)
  • 32-bit: 8 bytes long row (1 pixel = 4 bytes)

Something's definitely wrong in the formula but I can't figure it out:

const int rowWidthPixels = 2;

foreach (var bitsPerPixel in new[] { 4, 8, 16, 24, 32 })
{
    var bytesPerPixel = (bitsPerPixel + 7) / 8;

    var message1 = $"{nameof(bytesPerPixel)}: {bytesPerPixel}";

    switch (bitsPerPixel)
    {
        case 4:
            Assert.AreEqual(1, bytesPerPixel, message1);
            break;
        case 8:
            Assert.AreEqual(1, bytesPerPixel, message1);
            break;
        case 16:
            Assert.AreEqual(2, bytesPerPixel, message1);
            break;
        case 24:
            Assert.AreEqual(3, bytesPerPixel, message1);
            break;
        case 32:
            Assert.AreEqual(4, bytesPerPixel, message1);
            break;
    }

    var bytesPerRow = rowWidthPixels * bytesPerPixel;

    var message2 = $"{nameof(bytesPerRow)}: {bytesPerRow}";

    switch (bitsPerPixel)
    {
        case 4:
            Assert.AreEqual(1, bytesPerRow, message2); // BUG this should be 1 but currently is 2
            break;
        case 8:
            Assert.AreEqual(2, bytesPerRow, message2);
            break;
        case 16:
            Assert.AreEqual(4, bytesPerRow, message2);
            break;
        case 24:
            Assert.AreEqual(6, bytesPerRow, message2);
            break;
        case 32:
            Assert.AreEqual(8, bytesPerRow, message2);
            break;
    }
}
aybe
  • 15,516
  • 9
  • 57
  • 105

0 Answers0