1

I am trying to align two images using OpenCV. I based this code off of a C++/Python tutorial I found: http://www.learnopencv.com/image-alignment-ecc-in-opencv-c-python/

The findTransformECC() function in android requires an extra parameter for inputMask. The C++ and Python functions don't have this.

My Code:

import android.graphics.Bitmap;

import org.opencv.android.Utils;
import org.opencv.core.CvType;
import org.opencv.core.Mat;
import org.opencv.core.TermCriteria;
import org.opencv.imgproc.Imgproc;

import static org.opencv.core.CvType.CV_32F;
import static org.opencv.video.Video.MOTION_EUCLIDEAN;
import static org.opencv.video.Video.findTransformECC;

public class ImageProcessor {

    public static Bitmap alignImages(Bitmap A, Bitmap B){
        final int warp_mode = MOTION_EUCLIDEAN;
        Mat matA = new Mat(A.getHeight(),A.getWidth(), CvType.CV_8UC3);
        Mat matAgray = new Mat(A.getHeight(),A.getWidth(), CvType.CV_8U);
        Mat matB = new Mat(B.getHeight(),B.getWidth(), CvType.CV_8UC3);
        Mat matBgray = new Mat(B.getHeight(),B.getWidth(), CvType.CV_8U);
        Mat matBaligned = new Mat(A.getHeight(),A.getWidth(), CvType.CV_8UC3);
        Mat warpMatrix = Mat.eye(3,3,CV_32F);

        Utils.bitmapToMat(A,matA);
        Utils.bitmapToMat(B,matB);


        Imgproc.cvtColor(matA,matAgray, Imgproc.COLOR_BGR2GRAY);
        Imgproc.cvtColor(matB,matBgray,Imgproc.COLOR_BGR2GRAY);

        int numIter = 5000;
        double terminationEps = 1e-10;
        TermCriteria criteria = new TermCriteria(TermCriteria.COUNT+TermCriteria.EPS,numIter,terminationEps);

        findTransformECC(matAgray,matBgray,warpMatrix,warp_mode,criteria,matBgray);
        Imgproc.warpPerspective(matA,matBaligned,warpMatrix,matA.size(),Imgproc.INTER_LINEAR+ Imgproc.WARP_INVERSE_MAP);
        Bitmap alignedBMP = Bitmap.createBitmap(A.getWidth(),A.getHeight(),null);
        Utils.matToBitmap(matBaligned,alignedBMP);
        return alignedBMP;
    }

}

I get the following error

W/System.err: CvException [org.opencv.core.CvException: cv::Exception: /build/master_pack-android/opencv/modules/imgproc/src/imgwarp.cpp:5987: error: (-215) (M0.type() == CV_32F || M0.type() == CV_64F) && M0.rows == 2 && M0.cols == 3 in function void cv::warpAffine(cv::InputArray, cv::OutputArray, cv::InputArray, cv::Size, int, int, const Scalar&)
W/System.err: ]
W/System.err:     at org.opencv.video.Video.findTransformECC_0(Native Method)
W/System.err:     at org.opencv.video.Video.findTransformECC(Video.java:132)
W/System.err:     at com.test.imgptest.ImageProcessor.alignImages(ImageProcessor.java:42)
W/System.err:     at com.test.imgptest.MainActivity.onActivityResult(MainActivity.java:141)
W/System.err:     at android.app.Activity.dispatchActivityResult(Activity.java:6937)
W/System.err:     at android.app.ActivityThread.deliverResults(ActivityThread.java:4122)
W/System.err:     at android.app.ActivityThread.handleSendResult(ActivityThread.java:4169)
W/System.err:     at android.app.ActivityThread.-wrap20(ActivityThread.java)
W/System.err:     at android.app.ActivityThread$H.handleMessage(ActivityThread.java:1552)
W/System.err:     at android.os.Handler.dispatchMessage(Handler.java:102)
W/System.err:     at android.os.Looper.loop(Looper.java:154)
W/System.err:     at android.app.ActivityThread.main(ActivityThread.java:6186)
W/System.err:     at java.lang.reflect.Method.invoke(Native Method)
W/System.err:     at com.android.internal.os.ZygoteInit$MethodAndArgsCaller.run(ZygoteInit.java:889)
W/System.err:     at com.android.internal.os.ZygoteInit.main(ZygoteInit.java:779)
xSooDx
  • 493
  • 1
  • 5
  • 19

2 Answers2

2

You're inputting a full 3x3 homography matrix warpMatrix to findTransformECC but your selected warp_mode is MOTION_EUCLIDEAN.

If you want to use a 3x3 homography, then you should set warp_mode to MOTION_HOMOGRAPHY.

If you want a Euclidean transformation, you simply need to clip the last row off your input warpMatrix as Euclidean transformations are given by 2x3 matrices. According to the findTransformECC() documentation,

MOTION_EUCLIDEAN sets a Euclidean (rigid) transformation as motion model; three parameters are estimated; warpMatrix is 2x3.

Then since you'll be working with 2x3 warp matrices, use warpAffine() instead of warpPerspective() to align the images.

alkasm
  • 22,094
  • 5
  • 78
  • 94
  • Thank You! I meant to keep it `MOTION_HOMOGRAPHY`. feel stupid now. :P – xSooDx Jun 06 '17 at 09:24
  • 1
    @xSooDx it happens to the best of us! I edited my answer a little bit to include that possibility as well, for anyone else. – alkasm Jun 06 '17 at 09:54
1

The complete functions from the accepted answer:

public static Bitmap alignImagesHomography(Bitmap A, Bitmap B)
{
    final int warp_mode = MOTION_HOMOGRAPHY;
    Mat matA = new Mat(A.getHeight(), A.getWidth(), CvType.CV_8UC3);
    Mat matAgray = new Mat(A.getHeight(), A.getWidth(), CvType.CV_8U);
    Mat matB = new Mat(B.getHeight(), B.getWidth(), CvType.CV_8UC3);
    Mat matBgray = new Mat(B.getHeight(), B.getWidth(), CvType.CV_8U);
    Mat matBaligned = new Mat(A.getHeight(), A.getWidth(), CvType.CV_8UC3);
    Mat warpMatrix = Mat.eye(3, 3, CV_32F);
    Utils.bitmapToMat(A, matA);
    Utils.bitmapToMat(B, matB);
    Imgproc.cvtColor(matA, matAgray, Imgproc.COLOR_BGR2GRAY);
    Imgproc.cvtColor(matB, matBgray, Imgproc.COLOR_BGR2GRAY);
    int numIter = 5;
    double terminationEps = 1e-10;
    TermCriteria criteria = new TermCriteria(TermCriteria.COUNT + TermCriteria.EPS, numIter, terminationEps);
    findTransformECC(matAgray, matBgray, warpMatrix, warp_mode, criteria, matBgray);
    Imgproc.warpPerspective(matB, matBaligned, warpMatrix, matA.size(), Imgproc.INTER_LINEAR + Imgproc.WARP_INVERSE_MAP);
    Bitmap alignedBMP = Bitmap.createBitmap(A.getWidth(), A.getHeight(), Bitmap.Config.RGB_565);
    Utils.matToBitmap(matBaligned, alignedBMP);
    return alignedBMP;
}

public static Bitmap alignImagesEuclidean(Bitmap A, Bitmap B)
{
    final int warp_mode = MOTION_EUCLIDEAN;
    Mat matA = new Mat(A.getHeight(), A.getWidth(), CvType.CV_8UC3);
    Mat matAgray = new Mat(A.getHeight(), A.getWidth(), CvType.CV_8U);
    Mat matB = new Mat(B.getHeight(), B.getWidth(), CvType.CV_8UC3);
    Mat matBgray = new Mat(B.getHeight(), B.getWidth(), CvType.CV_8U);
    Mat matBaligned = new Mat(A.getHeight(), A.getWidth(), CvType.CV_8UC3);
    Mat warpMatrix = Mat.eye(2,3,CV_32F);
    Utils.bitmapToMat(A, matA);
    Utils.bitmapToMat(B, matB);
    Imgproc.cvtColor(matA, matAgray, Imgproc.COLOR_BGR2GRAY);
    Imgproc.cvtColor(matB, matBgray, Imgproc.COLOR_BGR2GRAY);
    int numIter = 5;
    double terminationEps = 1e-10;
    TermCriteria criteria = new TermCriteria(TermCriteria.COUNT + TermCriteria.EPS, numIter, terminationEps);
    findTransformECC(matAgray, matBgray, warpMatrix, warp_mode, criteria, matBgray);
    Imgproc.warpAffine(matB, matBaligned, warpMatrix, matA.size(), Imgproc.INTER_LINEAR + Imgproc.WARP_INVERSE_MAP);
    Bitmap alignedBMP = Bitmap.createBitmap(A.getWidth(), A.getHeight(), Bitmap.Config.RGB_565);
    Utils.matToBitmap(matBaligned, alignedBMP);
    return alignedBMP;
}

and here a faster version using alignMTB:

public static Bitmap AlignExposures(Mat[] image_list) {
    List<Mat> src = Arrays.asList(image_list);
    Bitmap output = Bitmap.createBitmap(bitmapWidth,bitmapHeight, Bitmap.Config.ARGB_8888);
    AlignMTB align = createAlignMTB(8, 4, false);
    align.process(src,src);
    for(int i = 1; i < src.size(); i++) {
        addWeighted(src.get(0),1-1/(i+1),src.get(i),1/(i+1),0,src.get(0));
    }
    Utils.matToBitmap(src.get(0),output);
    return output;
}
  • There is a bug in the first two functions. findTransformECC() finds a mapping from matBgray onto matAgray (see [the docs](https://docs.opencv.org/3.2.0/dc/d6b/group__video__track.html#ga7ded46f9a55c0364c92ccd2019d43e3a)), but warpPerspective() and warpAffine(), respectively, use the mapping to transform matA, storing the result in matBaligned; they should use the mapping to transform matB. The code on learnopencv.com is correct, so this is most likely a transcription error by the OP. – Chungzuwalla Jul 25 '18 at 05:46
  • There is also a risk with the method using AlignMTB. (AlignMTB is intended for creating HDR images from multiple exposures.) The aligned images are all added back onto the first source image, and if this addition exceeds the dynamic range of the source image's datatype (which is likely if the input images are 8-bit greyscale or 24-bit RGB), the image will be saturated. To avoid that, it would be necessary to create a new OpenCV Mat with a larger datatype (16-bit or 32-bit integer, or float) to store the summed results, then compress back to a smaller dynamic range (eg. by averaging). – Chungzuwalla Jul 25 '18 at 06:04
  • Thank you @Chungzuwalla for pointing this out, I edited my answer accordingly. – BenjaminWegener Aug 01 '18 at 08:37