I am trying to do real time image processing with an iPhone 6 at 240fps. The problem is when I capture video at that speed, I can't process the image fast enough since I need to sample each pixel to get an average. Reducing the image resolution would easily solve this problem, but I'm not able to figure out how to do this. The available AVCaptureDeviceFormat's have options with 192x144 px, but at 30fps. All 240fps options all have larger dimensions. Here is how I am sampling the data:
- (void)startDetection
{
const int FRAMES_PER_SECOND = 240;
self.session = [[AVCaptureSession alloc] init];
self.session.sessionPreset = AVCaptureSessionPresetLow;
// Retrieve the back camera
NSArray *devices = [AVCaptureDevice devices];
AVCaptureDevice *captureDevice;
for (AVCaptureDevice *device in devices)
{
if ([device hasMediaType:AVMediaTypeVideo])
{
if (device.position == AVCaptureDevicePositionBack)
{
captureDevice = device;
break;
}
}
}
NSError *error;
AVCaptureDeviceInput *input = [[AVCaptureDeviceInput alloc] initWithDevice:captureDevice error:&error];
[self.session addInput:input];
if (error)
{
NSLog(@"%@", error);
}
// Find the max frame rate we can get from the given device
AVCaptureDeviceFormat *currentFormat;
for (AVCaptureDeviceFormat *format in captureDevice.formats)
{
NSArray *ranges = format.videoSupportedFrameRateRanges;
AVFrameRateRange *frameRates = ranges[0];
// Find the lowest resolution format at the frame rate we want.
if (frameRates.maxFrameRate == FRAMES_PER_SECOND && (!currentFormat || (CMVideoFormatDescriptionGetDimensions(format.formatDescription).width < CMVideoFormatDescriptionGetDimensions(currentFormat.formatDescription).width && CMVideoFormatDescriptionGetDimensions(format.formatDescription).height < CMVideoFormatDescriptionGetDimensions(currentFormat.formatDescription).height)))
{
currentFormat = format;
}
}
// Tell the device to use the max frame rate.
[captureDevice lockForConfiguration:nil];
captureDevice.torchMode=AVCaptureTorchModeOn;
captureDevice.activeFormat = currentFormat;
captureDevice.activeVideoMinFrameDuration = CMTimeMake(1, FRAMES_PER_SECOND);
captureDevice.activeVideoMaxFrameDuration = CMTimeMake(1, FRAMES_PER_SECOND);
[captureDevice setVideoZoomFactor:4];
[captureDevice unlockForConfiguration];
// Set the output
AVCaptureVideoDataOutput* videoOutput = [[AVCaptureVideoDataOutput alloc] init];
// create a queue to run the capture on
dispatch_queue_t captureQueue=dispatch_queue_create("catpureQueue", NULL);
// setup our delegate
[videoOutput setSampleBufferDelegate:self queue:captureQueue];
// configure the pixel format
videoOutput.videoSettings = [NSDictionary dictionaryWithObjectsAndKeys:[NSNumber numberWithUnsignedInt:kCVPixelFormatType_32BGRA], (id)kCVPixelBufferPixelFormatTypeKey,
nil];
videoOutput.alwaysDiscardsLateVideoFrames = NO;
[self.session addOutput:videoOutput];
// Start the video session
[self.session startRunning];
}