0

I am trying to get an opencv pipeline working on a REV control hub for FTC with tensorflow. I have a trained TFLite model and I am processing it and retrieving the confidences. It works well on an android device as an android app, but it doesn't seem to work with the REV control hub. You may see that I have a variable named here2, this always evaluates to false unless it is before model.process, showing that the model doesn't get processed. Any idea why this is?

package org.firstinspires.ftc.teamcode.vision;

import android.app.Activity;
import android.content.Context;
import android.graphics.Bitmap;

import org.firstinspires.ftc.robotcontroller.internal.FtcRobotControllerActivity;
import org.firstinspires.ftc.robotcore.external.Telemetry;
import org.firstinspires.ftc.teamcode.ml.ModelUnquant;
import org.opencv.core.Mat;
import org.openftc.easyopencv.OpenCvPipeline;
import org.opencv.android.Utils;
import org.tensorflow.lite.DataType;
import org.tensorflow.lite.support.tensorbuffer.TensorBuffer;

import java.io.IOException;
import java.nio.ByteBuffer;
import java.nio.ByteOrder;

public class CircularTagDetectionPipeline extends OpenCvPipeline {
    Telemetry telemetry;
    public boolean done = false;
    public int pos = -1;
    public boolean here1 = false;
    public boolean here2 = false;
    public CircularTagDetectionPipeline(Telemetry telemetry){
        this.telemetry = telemetry;
    }
    @Override
    public Mat processFrame(Mat input) {
        Bitmap bmp = Bitmap.createBitmap(input.width(), input.height(), Bitmap.Config.ARGB_8888);
        Utils.matToBitmap(input, bmp);
        pos = classifyImage(bmp);
        done = true;
        return input;
    }

    public int classifyImage(Bitmap image){
        try {
            image = Bitmap.createScaledBitmap(image, 224, 224, false);
            //image.setWidth(224);
            //image.setHeight(224);
            ModelUnquant model = ModelUnquant.newInstance(FtcRobotControllerActivity.contextFTC);
            int imageWidth = image.getWidth();
            int imageHeight = image.getHeight();

            TensorBuffer inputFeature0 = TensorBuffer.createFixedSize(new int[]{1, 224, 224, 3}, DataType.FLOAT32);
            ByteBuffer byteBuffer = ByteBuffer.allocateDirect(4*imageWidth*imageHeight*3);
            byteBuffer.order(ByteOrder.nativeOrder());
            int[] intValues = new int[imageWidth*imageHeight];
            image.getPixels(intValues, 0, imageWidth,0, 0, imageWidth, imageHeight);
            int pixel = 0;
            for(int i = 0; i < imageWidth; i++){
                for(int j = 0; j < imageHeight; j++){
                    int val = intValues[pixel++];
                    byteBuffer.putFloat(((val >> 16)&0xFF)*(1.f/255.f));
                    byteBuffer.putFloat(((val >> 8)&0xFF)*(1.f/255.f));
                    byteBuffer.putFloat((val & 0xFF)*(1.f/255.f));
                }
                telemetry.addLine(i+"");
            }
            here1 = true;


            inputFeature0.loadBuffer(byteBuffer);


            ModelUnquant.Outputs outputs = model.process(inputFeature0);

            TensorBuffer outputFeature0 = outputs.getOutputFeature0AsTensorBuffer();
            here2 = true;
            float[] confidences = outputFeature0.getFloatArray();
            int maxPos = 0;
            float maxConfidence = 0;
            for(int i = 0; i < confidences.length; i++){
                if(confidences[i] > maxConfidence){
                    maxConfidence = confidences[i];
                    maxPos = i;
                }
            }

            String[] classes = {"1", "2", "3"};
            int posReturn = Integer.parseInt(classes[maxPos]);
            model.close();
            return posReturn;
        } catch (IOException e) {
            e.printStackTrace();
            return 0;
        }
    }
}

I am expecting the robot to go to execute the desired trajectory based on the confidences, but confidences never get processed. I tried on an android app and it worked, just not on the control hub.

0 Answers0