I have a simple app that contains a button, UIImageView, and a label. Once you click on the button, you will be able to take a photo using the camera. Then the model has to predict what is the object in the photo, and finally the label has to display the output (the predicted object).
Everything is working fine and there are no errors, but after I take a picture, the label is not being changed, the model is not returning anything, why is that ?
NOTE: The model is working fine and it has been tested using another app, but I think I am missing something in this code.
Here is my code:
import UIKit
import CoreML
class secondViewController: UIViewController, UINavigationControllerDelegate {
@IBOutlet weak var imageView: UIImageView!
@IBOutlet weak var classifier: UILabel!
var model: VGG16!
let cameraPicker = UIImagePickerController()
override func viewDidLoad() {
super.viewDidLoad()
// Do any additional setup after loading the view, typically from a nib.
}
override func viewWillAppear(_ animated: Bool) {
model = VGG16()
}
override func didReceiveMemoryWarning() {
super.didReceiveMemoryWarning()
// Dispose of any resources that can be recreated.
}
@IBAction func camera(_ sender: Any) {
if !UIImagePickerController.isSourceTypeAvailable(.camera) {
return
}
cameraPicker.delegate = self
cameraPicker.sourceType = .camera
cameraPicker.allowsEditing = false
present(cameraPicker, animated: true)
}
}
extension secondViewController: UIImagePickerControllerDelegate {
func imagePickerControllerDidCancel(_ picker: UIImagePickerController) {
dismiss(animated: true, completion: nil)
}
// private func imagePickerController(_ picker: UIImagePickerController, didFinishPickingMediaWithInfo info: [String : Any]) {
private func imagePickerController(_ picker: UIImagePickerController, didFinishPickingMediaWithInfo info: [UIImagePickerController.InfoKey: Any]) {
picker.dismiss(animated: true, completion: nil)
let image = info[UIImagePickerController.InfoKey.originalImage]! as! UIImage
picker.dismiss(animated: true)
classifier.text = "Analyzing Image..."
UIGraphicsBeginImageContextWithOptions(CGSize(width: 299, height: 299), true, 2.0)
image.draw(in: CGRect(x: 0, y: 0, width: 299, height: 299))
let newImage = UIGraphicsGetImageFromCurrentImageContext()!
UIGraphicsEndImageContext()
let attrs = [kCVPixelBufferCGImageCompatibilityKey: kCFBooleanTrue, kCVPixelBufferCGBitmapContextCompatibilityKey: kCFBooleanTrue] as CFDictionary
var pixelBuffer : CVPixelBuffer?
let status = CVPixelBufferCreate(kCFAllocatorDefault, Int(newImage.size.width), Int(newImage.size.height), kCVPixelFormatType_32ARGB, attrs, &pixelBuffer)
guard (status == kCVReturnSuccess) else {
return
}
CVPixelBufferLockBaseAddress(pixelBuffer!, CVPixelBufferLockFlags(rawValue: 0))
let pixelData = CVPixelBufferGetBaseAddress(pixelBuffer!)
let rgbColorSpace = CGColorSpaceCreateDeviceRGB()
let context = CGContext(data: pixelData, width: Int(newImage.size.width), height: Int(newImage.size.height), bitsPerComponent: 8, bytesPerRow: CVPixelBufferGetBytesPerRow(pixelBuffer!), space: rgbColorSpace, bitmapInfo: CGImageAlphaInfo.noneSkipFirst.rawValue) //3
context?.translateBy(x: 0, y: newImage.size.height)
context?.scaleBy(x: 1.0, y: -1.0)
UIGraphicsPushContext(context!)
newImage.draw(in: CGRect(x: 0, y: 0, width: newImage.size.width, height: newImage.size.height))
UIGraphicsPopContext()
CVPixelBufferUnlockBaseAddress(pixelBuffer!, CVPixelBufferLockFlags(rawValue: 0))
imageView.image = newImage
// Core ML
guard let prediction = try? model.prediction(image: pixelBuffer!) else {
return
}
classifier.text = "I think this is a \(prediction.classLabel)."
}
}