When I add audio input to capture session, photoOutput(_ output: AVCapturePhotoOutput, didFinishProcessingPhoto photo: AVCapturePhoto, error: Error?) callback returns semantic segmentation mattes properly. Without audio input, returned mattes are nil. Is it possible to avoid adding audio input and requesting user to give permission for microphone in order to get mattes?
// MARK: - Session
private func setupSession() {
captureSession = AVCaptureSession()
captureSession?.sessionPreset = .photo
setupInputOutput()
setupPreviewLayer(view)
captureSession?.startRunning()
}
// MARK: - Settings
private func setupCamera() {
settings = AVCapturePhotoSettings()
let supportsHEVC = AVAssetExportSession.allExportPresets().contains(AVAssetExportPresetHEVCHighestQuality)
settings = supportsHEVC ? AVCapturePhotoSettings(format: [AVVideoCodecKey: AVVideoCodecType.hevc]) : AVCapturePhotoSettings(format: [AVVideoCodecKey: AVVideoCodecType.jpeg])
settings!.flashMode = .auto
settings!.isHighResolutionPhotoEnabled = true
settings!.previewPhotoFormat = [kCVPixelBufferPixelFormatTypeKey as String: settings!.__availablePreviewPhotoPixelFormatTypes.first ?? NSNumber()]
settings!.isDepthDataDeliveryEnabled = true
settings!.isPortraitEffectsMatteDeliveryEnabled = true
if self.photoOutput?.enabledSemanticSegmentationMatteTypes.isEmpty == false {
settings!.enabledSemanticSegmentationMatteTypes = self.photoOutput?.enabledSemanticSegmentationMatteTypes ?? [AVSemanticSegmentationMatte.MatteType]()
}
settings!.photoQualityPrioritization = self.photoQualityPrioritizationMode
}
private func setupInputOutput() {
photoOutput = AVCapturePhotoOutput()
guard let captureSession = captureSession else { return }
guard let photoOutput = photoOutput else { return }
do {
captureSession.beginConfiguration()
captureSession.sessionPreset = .photo
let devices = self.videoDeviceDiscoverySession.devices
currentDevice = devices.first(where: { $0.position == .front && $0.deviceType == .builtInTrueDepthCamera })
guard let videoDevice = currentDevice else {
captureSession.commitConfiguration()
return
}
videoDeviceInput = try AVCaptureDeviceInput(device: videoDevice)
if captureSession.canAddInput(videoDeviceInput) {
captureSession.addInput(videoDeviceInput)
} else {
captureSession.commitConfiguration()
return
}
currentDevice = AVCaptureDevice.default(for: .audio)
captureDeviceInput = try AVCaptureDeviceInput(device: currentDevice!)
if captureSession.canAddInput(captureDeviceInput) {
captureSession.addInput(captureDeviceInput)
} else {
captureSession.commitConfiguration()
return
}
} catch {
errorMessage = error.localizedDescription
print(error.localizedDescription)
captureSession.commitConfiguration()
return
}
if captureSession.canAddOutput(photoOutput) {
captureSession.addOutput(photoOutput)
photoOutput.isHighResolutionCaptureEnabled = true
photoOutput.isLivePhotoCaptureEnabled = photoOutput.isLivePhotoCaptureSupported
photoOutput.isDepthDataDeliveryEnabled = photoOutput.isDepthDataDeliverySupported
photoOutput.isPortraitEffectsMatteDeliveryEnabled = photoOutput.isPortraitEffectsMatteDeliverySupported
photoOutput.enabledSemanticSegmentationMatteTypes = photoOutput.availableSemanticSegmentationMatteTypes
photoOutput.maxPhotoQualityPrioritization = .balanced
}
captureSession.commitConfiguration()
}
private func setupPreviewLayer(_ view: UIView) {
self.cameraPreviewLayer = AVCaptureVideoPreviewLayer(session: captureSession ?? AVCaptureSession())
self.cameraPreviewLayer?.videoGravity = AVLayerVideoGravity.resizeAspectFill
self.cameraPreviewLayer?.connection?.videoOrientation = AVCaptureVideoOrientation.portrait
self.cameraPreviewLayer?.frame = view.frame
view.layer.insertSublayer(self.cameraPreviewLayer ?? AVCaptureVideoPreviewLayer(), at: 0)
}