I am using react-native, react-native-vision-camera (with frameProcessor) and mlkit to perform in device text recognition.
My code works on Android, but I got always empty results on iOS.
Here is my Objective C implementation:
static inline id scanOCR(Frame* frame, NSArray* args) {
MLKTextRecognizer *textRecognizer = [MLKTextRecognizer textRecognizer];
MLKVisionImage *image = [[MLKVisionImage alloc] initWithBuffer:frame.buffer];
image.orientation = frame.orientation;
NSError *error;
MLKText *result = [textRecognizer resultsInImage:image error:&error];
if (error != nil || result == nil) {
NSLog(@"%@", error); // <- This is NEVER called
} else {
NSLog(@"text: %@", result.text); // <- this is always empty
NSLog(@"blocks lenght: %lu", (unsigned long)result.blocks.count); // <- this is always 0
}
return @{@"text": result.text};
}
The project corretly compile, with no errors, but results.text
is always empty, even if the camera is framing texts.
I think that MLKit is corretly installed. This is my Podfile
require_relative '../node_modules/react-native/scripts/react_native_pods'
require_relative '../node_modules/@react-native-community/cli-platform-ios/native_modules'
platform :ios, '11.0'
target 'com.digitalbore.papertag' do
config = use_native_modules!
use_react_native!(
:path => config[:reactNativePath],
# to enable hermes on iOS, change `false` to `true` and then install pods
:hermes_enabled => false
)
pod 'GoogleMLKit/TextRecognition','2.2.0'
target 'com.digitalbore.papertagTests' do
inherit! :complete
# Pods for testing
pod 'react-native-splash-screen', :path => '../node_modules/react-native-splash-screen'
pod 'react-native-receive-sharing-intent', :path => '../node_modules/react-native-receive-sharing-intent'
end
# Enables Flipper.
#
# Note that if you have use_frameworks! enabled, Flipper will not work and
# you should disable the next line.
use_flipper!()
post_install do |installer|
react_native_post_install(installer)
__apply_Xcode_12_5_M1_post_install_workaround(installer)
end
end