0

Can someone help with understanding how micromutableops resolver is used in Arduino instead of Allopsresolver? I am trying it to reduce my sketch size and i obtained the ops in my model using Netron but i cant quite get past The model invocation. This is the code?

  static tflite::MicroErrorReporter micro_error_reporter;
  error_reporter = &micro_error_reporter;

  // Map the model into a usable data structure. This doesn't involve any
  // copying or parsing, it's a very lightweight operation.

  model = tflite::GetModel(slu_model_lug150_tflite);
  if (model->version() != TFLITE_SCHEMA_VERSION) {
    TF_LITE_REPORT_ERROR(error_reporter,
                         "Model provided is schema version %d not equal "
                         "to supported version %d.",
                         model->version(), TFLITE_SCHEMA_VERSION);
    return;
  }

  // This pulls in all the operation implementations we need.
  // NOLINTNEXTLINE(runtime-global-variables)
  //static tflite::MicroMutableOpResolver<1> resolver;
  // static tflite::AllOpsResolver resolver;


// Set up a mutable op resolver for the TFLite model
tflite::MicroMutableOpResolver<13> resolver;

// Add quantization and dequantization ops
resolver.AddQuantize();
resolver.AddDequantize();

// Add ops for each layer in the model
resolver.AddDepthwiseConv2D();
resolver.AddConv2D();
resolver.AddBatchToSpaceNd();
resolver.AddRelu();
resolver.AddMul();
resolver.AddAdd();
resolver.AddAssignVariable();
resolver.AddReadVariable();
resolver.AddExp();
resolver.AddShape();
// resolver.Add

resolver.AddConv2D();
resolver.AddRelu();
resolver.AddMul();
resolver.AddAdd();
resolver.AddMaxPool2D();

resolver.AddConv2D();
resolver.AddRelu();
resolver.AddMul();
resolver.AddAdd();
resolver.AddMaxPool2D();

resolver.AddConv2D();
resolver.AddRelu();
resolver.AddMul();
resolver.AddAdd();
resolver.AddMaxPool2D();

resolver.AddConv2D();
resolver.AddRelu();
resolver.AddMul();
resolver.AddAdd();
resolver.AddReduceMax();

resolver.AddFullyConnected();
resolver.AddRelu();

resolver.AddFullyConnected();
resolver.AddRelu();

resolver.AddFullyConnected();
resolver.AddSoftmax();

resolver.AddReshape();
 
  // Build an interpreter to run the model with.
  static tflite::MicroInterpreter static_interpreter(model, resolver, tensor_arena, kTensorArenaSize, error_reporter);
  interpreter = &static_interpreter;

  // Allocate memory from the tensor_arena for the model's tensors.
  TfLiteStatus allocate_status = interpreter->AllocateTensors();
  if (allocate_status != kTfLiteOk) {
    TF_LITE_REPORT_ERROR(error_reporter, "AllocateTensors() failed");
    return;
  }

  // Obtain pointers to the model's input and output tensors. One input and one output.
  input = interpreter->input(0);

  output_intent = interpreter->output(0);
//The header files included are the following
#include <TensorFlowLite.h>
#include "tensorflow/lite/micro/micro_mutable_op_resolver.h"
#include "tensorflow/lite/micro/kernels/micro_ops.h"
// #include "tensorflow/lite/micro/all_ops_resolver.h"
#include "tensorflow/lite/micro/micro_error_reporter.h"
#include "tensorflow/lite/micro/system_setup.h"
#include "tensorflow/lite/micro/micro_interpreter.h"
#include "tensorflow/lite/schema/schema_generated.h"

namespace {
tflite::ErrorReporter* error_reporter = nullptr;
const tflite::Model* model = nullptr;
tflite::MicroInterpreter* interpreter = nullptr;
TfLiteTensor* input = nullptr;
TfLiteTensor* output_intent = nullptr;

constexpr int kTensorArenaSize = 1024*55;
uint8_t tensor_arena[kTensorArenaSize];
}

These are the ops extracted from the model using interpreter.get_tensor_details()

serving_default_main_input:0
model_8/global_max_pooling2d/Max/reduction_indices
model_8/conv2d/Conv2D
model_8/conv2d_2/Conv2D
model_8/conv2d_2/Conv2D1
model_8/conv2d_2/Conv2D2
model_8/batch_normalization/FusedBatchNormV3
model_8/batch_normalization/FusedBatchNormV31
model_8/conv2d_1/Conv2D
model_8/batch_normalization_1/FusedBatchNormV3
model_8/batch_normalization_1/FusedBatchNormV31
model_8/conv2d_2/Conv2D11
model_8/batch_normalization_2/FusedBatchNormV3
model_8/batch_normalization_2/FusedBatchNormV31
model_8/conv2d_3/Conv2D1
model_8/conv2d_3/Conv2D
model_8/batch_normalization_3/FusedBatchNormV3
model_8/batch_normalization_3/FusedBatchNormV31
model_8/conv2d_4/Conv2D1
model_8/conv2d_4/Conv2D
model_8/batch_normalization_4/FusedBatchNormV3
model_8/batch_normalization_4/FusedBatchNormV31
model_8/dense_1/MatMul
model_8/dense_1/BiasAdd/ReadVariableOp
model_8/dense_2/MatMul
model_8/dense_2/BiasAdd/ReadVariableOp
model_8/intent_output/MatMul
model_8/conv2d/Relu;model_8/conv2d/Conv2D
model_8/batch_normalization/FusedBatchNormV32
model_8/batch_normalization/FusedBatchNormV33
model_8/conv2d_1/Relu;model_8/conv2d_1/Conv2D
model_8/batch_normalization_1/FusedBatchNormV32
model_8/batch_normalization_1/FusedBatchNormV33
model_8/max_pooling2d/MaxPool
model_8/conv2d_2/Relu;model_8/conv2d_2/Conv2D
model_8/batch_normalization_2/FusedBatchNormV32
model_8/batch_normalization_2/FusedBatchNormV33
model_8/max_pooling2d_1/MaxPool
model_8/conv2d_3/Relu;model_8/conv2d_3/Conv2D
model_8/batch_normalization_3/FusedBatchNormV32
model_8/batch_normalization_3/FusedBatchNormV33
model_8/max_pooling2d_2/MaxPool
model_8/conv2d_4/Relu;model_8/conv2d_4/Conv2D
model_8/batch_normalization_4/FusedBatchNormV32
model_8/batch_normalization_4/FusedBatchNormV33
model_8/global_max_pooling2d/Max
model_8/dense_1/MatMul;model_8/dense_1/Relu;model_8/dense_1/BiasAdd
model_8/dense_2/MatMul;model_8/dense_2/Relu;model_8/dense_2/BiasAdd
model_8/intent_output/MatMul2
StatefulPartitionedCall:0

The sketch compiles but the program crashes on the Wio Terminal at the time when the model is invoked. I expected the model to work. The one with AllOpsResolver works very well but a big program size is obtained which is not appropriate.static tflite::AllOpsResolver resolver;

0 Answers0