0

In our EXISTING code we have two shaders applaying one by another for convert from eMFCC_ARGB32 pixel format to eMFCC_NV12.

First shader:

Texture2D tx_top : register( t0 );
Texture2D tx_bottom : register( t1 );
SamplerState sampler_in : register( s0 );
cbuffer PS_CONVERT : register (b0)
{
    float4 size;
    float4 vecY;
    float4 vecU;
    float4 vecV;
};
struct PS_INPUT
{
    float4 Pos : SV_POSITION;
    float2 Tex : TEXCOORD;
};
float4 PS(PS_INPUT input) : SV_Target
{
    float4 p1 = tx_top.Sample( sampler_in, float2(input.Tex.x - 0.5/size.x, input.Tex.y) );
    float4 p2 = tx_top.Sample( sampler_in, float2(input.Tex.x + 0.5/size.x, input.Tex.y) );
    float4 pm = (p1 + p2) * 0.5;
    float4 yuv;
    yuv.b = dot(pm.rgb, vecU.rgb) + vecY.a;
    yuv.g = dot(p1.rgb, vecY.rgb) + vecY.a;
    yuv.r = dot(pm.rgb, vecV.rgb) + vecU.a;
    yuv.a = dot(p2.rgb, vecY.rgb) + vecY.a;
   yuv = clamp(yuv, float4(0.0625, 0.0625, 0.0625, 0.0625), float4(0.9375, 0.9180, 0.9375, 0.9180));
    return yuv;
}

Second shader:

Texture2D tx : register( t0 );
SamplerState sampler_in : register( s0 );
cbuffer PS_CONVERT : register (b0)
{
    float4 size;
};
struct PS_INPUT
{
    float4 Pos : SV_POSITION;
    float2 Tex : TEXCOORD;
};
float4 PS(PS_INPUT input) : SV_Target
{
    float luma_flag = step(input.Tex.y, 0.666666);
    float pos_y = luma_flag * input.Tex.y / 0.666666 + (1 - luma_flag) * (input.Tex.y - 0.666666) / 0.333333;
    float4 pix1 = tx.Sample(sampler_in, float2(input.Tex.x - 0.5 / size.x, pos_y - 0.5 * (1.0 - luma_flag) / size.y) );
    float4 pix2 = tx.Sample(sampler_in, float2(input.Tex.x + 0.5 / size.x, pos_y - 0.5 * (1.0 - luma_flag) / size.y) );
    float4 pix3 = tx.Sample(sampler_in, float2(input.Tex.x - 0.5 / size.x, pos_y + 0.5 * (1.0 - luma_flag) / size.y) );
    float4 pix4 = tx.Sample(sampler_in, float2(input.Tex.x + 0.5 / size.x, pos_y + 0.5 * (1.0 - luma_flag) / size.y) );
    pix1 = (pix1 + pix3) * 0.5;
    pix2 = (pix2 + pix4) * 0.5;
    return float4(pix2.g, pix1.a, pix1.g, pix2.a) * luma_flag + float4(pix2.b, pix1.r, pix1.b, pix2.r) * (1.0 - luma_flag);
}

But the image got ligher than original.

Original:

enter image description here

After processing:

enter image description here

Botje
  • 26,269
  • 3
  • 31
  • 41
Olga Pshenichnikova
  • 1,509
  • 4
  • 22
  • 47
  • 1
    First of all the question has a scent of incompleteness. Second, if you are using an commercial ML product, why wouldn't you get in touch with their support instead (and after all they speak Russian as you would possibly prefer)? – Roman R. Feb 17 '20 at 13:59
  • We are the ML ))) – Olga Pshenichnikova Feb 17 '20 at 14:10
  • what colorspace are you operating in? rgb or yuv doesn't mean anything without specifying if you're in srgb, bt709, bt2020 or any of the other color spaces in existence. See [color grading](https://www.khanacademy.org/partner-content/pixar/color/color-space/v/color-10) – Fredrik Pihl Mar 04 '20 at 12:11
  • Thus it is converting from RGB 4:4:4:4, I suppose that we speak about BT.2020 – Olga Pshenichnikova Mar 05 '20 at 14:15

0 Answers0