I'm developing a webgl-based renderer and as the title says I need to pack 4 8-bit unsigned integer to a 32-bit float, I wrote the following code :
//pack 4 8-bit integer to a float
function packIntsToFloat(i1, i2, i3, i4) {
//ensure 32-bit allocation
var ints = new Uint32Array(4);
ints[0] = i1;
ints[1] = i2;
ints[2] = i3;
ints[3] = i4;
ints[0] <<= 24;
ints[1] <<= 16;
ints[2] <<= 8;
ints[3] |= ints[0] | ints[1] | ints[2];
//convert to float
var f = new Float32Array(1);
f[0] = ints[3];
return f[0];
}
//unpack 4 8-bit integer from a float
function unPackIntsFromFloat(f) {
var i = new Uint32Array(4);
i[3] = f;
var mask_7_to_0 = 255;
var mask_15_to_8 = mask_7_to_0 << 8;
var mask_23_to_16 = mask_15_to_8 << 8;
var mask_31_to_24 = mask_23_to_16 << 8;
i[0] = (i[3] & mask_31_to_24 ) >>> 24;
i[1] = (i[3] & mask_23_to_16 ) >>> 16;
i[2] = (i[3] & mask_15_to_8 ) >>> 8;
i[3] = (i[3] & mask_7_to_0);
return new Uint8Array(i);
}
but it won't work unless skipping what I need :
//convert to float
var f = new Float32Array(1);
f[0] = ints[3];
I'm aware of the IEEE standard but there shouldn't be any change of the bits, only their interpretation as a value. Thank you in advance.