I’m new in C++ and libtorch, I try load model by torchscript and execute inference, the codes like below:
torch::jit::script::Module module;
try {
module = torch::jit::load("../../weights/card_extraction/pytorch/2104131340/best_model_27_mAP=0.9981_torchscript.pt");
}
catch (const c10::Error& e) {
std::cerr << "Error to load model\n";
return -1;
}
std::cout << "Load model successful!\n";
torch::DeviceType device_type;
device_type = torch::kCPU;
torch::Device device(device_type, 0);
module.to(device);
torch::Tensor sample = torch::zeros({3, 800, 800});
std::vector<torch::jit::IValue> inputs;
std::vector<torch::Tensor> images;
images.push_back(sample);
/* images.push_back(torch::ones({3, 224, 224})); */
inputs.push_back(images);
auto t1 = std::chrono::high_resolution_clock::now();
auto output = module.forward(inputs);
auto t2 = std::chrono::high_resolution_clock::now();
int duration = std::chrono::duration_cast<std::chrono::milliseconds> (t2 - t1).count();
std::cout << "Inference time: " << duration << " ms" << std::endl;
std::cout << output << std::endl;
And the result like this:
Load model successful!
[W mask_rcnn.py:86] Warning: RCNN always returns a (Losses, Detections) tuple in scripting (function )
Inference time: 2321 ms
({}, [{boxes: [ CPUFloatType{0,4} ], labels: [ CPULongType{0} ], scores: [ CPUFloatType{0} ], masks: [ CPUFloatType{0,1,800,800} ]}])
How do I get value boxes, labels, scores and masks from return output object using c++ ? I tried many ways but compile always error with “c10::IValue” error thrown.
And more question, why is the time inference when I convert the model to torchscript, executed by C++ is slower than python? Many thanks