15

Currently I am trying to convert Camera2.Face to actual view's rect in order to draw circle over the face detected by the Camera2 API.

I am able to get number of faces and its data into Callback by below code:

private CameraCaptureSession.CaptureCallback mCaptureCallback
= new CameraCaptureSession.CaptureCallback() {
    private void process(CaptureResult result) {
        Integer mode = result.get(CaptureResult.STATISTICS_FACE_DETECT_MODE);
        Face [] faces = result.get(CaptureResult.STATISTICS_FACES);
        if(faces != null && mode != null)
            Log.e("tag", "faces : " + faces.length + " , mode : " + mode ); 
    }

    @Override
    public void onCaptureProgressed(CameraCaptureSession session, CaptureRequest request, CaptureResult partialResult) {
        process(partialResult);
    }

    @Override
    public void onCaptureCompleted(CameraCaptureSession session, CaptureRequest request, TotalCaptureResult result) {
        process(result);
    }
}

I tried below code so far to convert Face rect to actual view co-ordinates(seems like it is not working):

/**
* Callback from the CameraCaptureSession.CaptureCallback
*/
@Override
public void onFaceDetection(Face[] faces) {
    if (mCameraView != null) {
        setFaceDetectionMatrix();
        setFaceDetectionLayout(faces);
    }
}

/**
 * This method gets the scaling values of the face in matrix
 */
private void setFaceDetectionMatrix() {
    // Face Detection Matrix
    mFaceDetectionMatrix = new Matrix();
    // Need mirror for front camera.
    boolean mirror = mCameraView.getFacing() == CameraView.FACING_FRONT;
    mFaceDetectionMatrix.setScale(mirror ? -1 : 1, 1);
    mFaceDetectionMatrix.postRotate(mCameraDisplayOrientation);

    Rect activeArraySizeRect = mCameraView.getCameraCharacteristics().get(CameraCharacteristics.SENSOR_INFO_ACTIVE_ARRAY_SIZE);
    Log.i("Test", "activeArraySizeRect1: (" + activeArraySizeRect + ") -> " + activeArraySizeRect.width() + ", " + activeArraySizeRect.height());
    Log.i("Test", "activeArraySizeRect2: " + cameraOverlayDrawingView.getWidth() + ", " + cameraOverlayDrawingView.getHeight());
    float s1 = cameraOverlayDrawingView.getWidth() / activeArraySizeRect.width();
    float s2 = cameraOverlayDrawingView.getHeight() / activeArraySizeRect.height();
    mFaceDetectionMatrix.postScale(s1, s2);
    mFaceDetectionMatrix.postTranslate(cameraOverlayDrawingView.getWidth() / 2, cameraOverlayDrawingView.getHeight() / 2);
}

/**
 * This method set the matrix for translating rect
 */
private void setFaceDetectionLayout(Face[] faces) {
    if (faces.length == 0) {
        cameraOverlayDrawingView.setHaveFaces(false, null);
    } else if (faces.length > 0) {
        List<Rect> faceRects;
        faceRects = new ArrayList<>();
        for (int i = 0; i < faces.length; i++) {
            Log.i("Test", "Activity face" + i + " bounds: " + faces[i].getBounds());
            if (faces[i].getScore() > 50) {
                int left = faces[i].getBounds().left;
                int top = faces[i].getBounds().top;
                int right = faces[i].getBounds().right;
                int bottom = faces[i].getBounds().bottom;

                Rect uRect = new Rect(left, top, right, bottom);
                RectF rectF = new RectF(uRect);
                mFaceDetectionMatrix.mapRect(rectF);
                uRect.set((int) rectF.left, (int) rectF.top, (int) rectF.right, (int) rectF.bottom);
                Log.i("Test", "Activity rect" + i + " bounds: " + uRect);
                    faceRects.add(uRect);
            }
        }
        cameraOverlayDrawingView.setHaveFaces(true, faceRects);
    }
}
Rushabh Patel
  • 3,052
  • 4
  • 26
  • 58
  • Would worth it to post a sample app on github, so we could play around with it, and maybe find a solution – romtsn Feb 04 '18 at 19:09
  • I have added code to draw circle over the circle based on this link: https://github.com/rajktariya/Android-Camera2-Front-with-Face-Detection/blob/master/Application/src/main/java/com/example/android/camera2basic/Camera2BasicFragment.java – Rushabh Patel Feb 06 '18 at 03:42
  • 1
    are those calculations wrong or what does it mean it's not working? maybe post a screenshot ? – Ovidiu Dolha Feb 07 '18 at 12:31
  • you mean it's not working. so any error ? – core114 Feb 07 '18 at 13:01
  • @OvidiuDolha: Yes current calculation is not showing face where it should show. – Rushabh Patel Feb 07 '18 at 14:23
  • view on top of the camera is showing yellow circle where face matrix are there but due to calculation it is showing outside of the screen. Camera1 API gave that calculation but Camera2 API does not provide that. I am following this link: https://developer.android.com/reference/android/hardware/camera2/CameraCharacteristics.html#SENSOR_INFO_ACTIVE_ARRAY_SIZE – Rushabh Patel Feb 07 '18 at 14:29
  • It seems so. Even if your matrix is not like the one provided there https://developer.android.com/reference/android/hardware/Camera.Face.html With both matrices cannot get the right coords – shadowsheep Feb 08 '18 at 13:07
  • @shadowsheep: yes Camera and Camera2 face metrics are different. – Rushabh Patel Feb 08 '18 at 15:17
  • Anyway, with your matrix I get a point near faces. I've got a Rect with (top,left) = (right, bottom) I didn't figure out why – shadowsheep Feb 08 '18 at 15:33
  • @shadowsheep: I got those same metrics.. btw did you make any calculation changes with it? – Rushabh Patel Feb 08 '18 at 16:06
  • 1
    Not yet. I’ll sure let you know if I found something. – shadowsheep Feb 08 '18 at 17:39
  • Found one problem in your code. You do an integer division. That should solve your problem `float s1 = mFaceDetectionMatrix.getWidth() / (float)activeArraySizeRect.width(); float s2 = mFaceDetectionMatrix.getHeight() / (float)activeArraySizeRect.height(); ` And that shold be more correct: `mFaceDetectionMatrix.postTranslate(cameraOverlayDrawingView.getWidth() / 2f, cameraOverlayDrawingView.getHeight() / 2f);` 2f instead of 2 – shadowsheep Feb 09 '18 at 11:40
  • Still it is not working, It is giving preview as per this link: https://drive.google.com/file/d/1vITjxML0CUrzIDWWtmDXvHDIPAMXJUt6/view?usp=sharing – Rushabh Patel Feb 09 '18 at 12:28
  • @RushabhPatel Found the right math in portrait. If you wanna the code I used to test it, check the github link on my answer! Hope it helps! – shadowsheep Feb 10 '18 at 07:47

1 Answers1

7

NEW: I've manage all my phone rotations. The offsetDxDy I guess depends on my layout, but if I've to tell you the truth I don't know why I put a value of 100. It works well on my Huawei P9 and I've found it in an empirical way. I still not have tried to find out if depends on my phone or on my XML layout or both.

Anyway the Matrices now are found, so you could adapt them so that they can fit your needs.

Note: my setRotation is not so general, because I didn't parametrized it upon

int orientationOffset = mCameraCharacteristics.get(CameraCharacteristics.SENSOR_ORIENTATION);

You can try do do it so that to have a full general code working with SENSOR_ORIENTATION different from the one of this example that is 270.

So this code works with a phone with an hardware camera sensor with orientation of 270.

The Huawei P9 has it.

Just to give you an idea of making the rotation bind to se HW sensor orientation that also works well on my P9 (but I don't have any other hardware to test)

if (mSwappedDimensions) {
    // Display Rotation 0
    mFaceDetectionMatrix.setRotate(orientationOffset);
    mFaceDetectionMatrix.postScale(mirror ? -s1 : s1, s2);
    mFaceDetectionMatrix.postTranslate(mPreviewSize.getHeight() + offsetDxDy, mPreviewSize.getWidth() + offsetDxDy);
} else {
    // Display Rotation 90 e 270
    if (displayRotation == Surface.ROTATION_90) {
        mFaceDetectionMatrix.setRotate(orientationOffset + 90);
        mFaceDetectionMatrix.postScale(mirror ? -s1 : s1, s2);
        mFaceDetectionMatrix.postTranslate(mPreviewSize.getWidth() + offsetDxDy, -offsetDxDy);
    } else if (displayRotation == Surface.ROTATION_270) {
        mFaceDetectionMatrix.setRotate(orientationOffset + 270);
        mFaceDetectionMatrix.postScale(mirror ? -s1 : s1, s2);
        mFaceDetectionMatrix.postTranslate(-offsetDxDy, mPreviewSize.getHeight() + offsetDxDy);
    }
}

Here my final code (also available on GitHub)

int orientationOffset = mCameraCharacteristics.get(CameraCharacteristics.SENSOR_ORIENTATION);
Rect activeArraySizeRect = mCameraCharacteristics.get(CameraCharacteristics.SENSOR_INFO_ACTIVE_ARRAY_SIZE);

// Face Detection Matrix
mFaceDetectionMatrix = new Matrix();

Log.i("Test", "activeArraySizeRect1: (" + activeArraySizeRect + ") -> " + activeArraySizeRect.width() + ", " + activeArraySizeRect.height());
Log.i("Test", "activeArraySizeRect2: " + mPreviewSize.getWidth() + ", " + mPreviewSize.getHeight());
float s1 = mPreviewSize.getWidth() / (float)activeArraySizeRect.width();
float s2 = mPreviewSize.getHeight() / (float)activeArraySizeRect.height();
//float s1 = mOverlayView.getWidth();
//float s2 = mOverlayView.getHeight();
boolean mirror = (facing == CameraCharacteristics.LENS_FACING_FRONT); // we always use front face camera
boolean weAreinPortrait = true;
int offsetDxDy = 100;
if (mSwappedDimensions) {
    // Display Rotation 0
    mFaceDetectionMatrix.setRotate(270);
    mFaceDetectionMatrix.postScale(mirror ? -s1 : s1, s2);
    mFaceDetectionMatrix.postTranslate(mPreviewSize.getHeight() + offsetDxDy, mPreviewSize.getWidth() + offsetDxDy);
} else {
    // Display Rotation 90 e 270
    if (displayRotation == Surface.ROTATION_90) {
        mFaceDetectionMatrix.setRotate(0);
        mFaceDetectionMatrix.postScale(mirror ? -s1 : s1, s2);
        mFaceDetectionMatrix.postTranslate(mPreviewSize.getWidth() + offsetDxDy, -offsetDxDy);
    } else if (displayRotation == Surface.ROTATION_270) {
        mFaceDetectionMatrix.setRotate(180);
        mFaceDetectionMatrix.postScale(mirror ? -s1 : s1, s2);
        mFaceDetectionMatrix.postTranslate(-offsetDxDy, mPreviewSize.getHeight() + offsetDxDy);
    }
}

This is the public github repo where you can find the code: https://github.com/shadowsheep1/android-camera2-api-face-recon. Hope it could help you.

enter image description here

Anyway just to give you also some theory, what you are doing is a 2D plane transformation. I mean you have a plane (the HW Sensor) and you have to remap the object on that plane on your preview plane.

So you have to take care of:

  • Rotation: That depends on your HW Sensor rotation and the Phone Rotation.
  • Mirroring: Horizontal mirroring that depends if you are using the front face camera or not and the Vertical mirroring that depends on the phone rotation). Mirroring is done with a '-' sign in the scaling matrix.
  • Translation: That depends where your object has been placed by the rotation (that depends also from which rotation center your are dealing with) and translation. So you have to replace in your preview View your objects.

Math Theory

I've also write some technical post in my blog some time ago but they are in Italian.

shadowsheep
  • 14,048
  • 3
  • 67
  • 77