1

I am trying to resize a CMSampleBufferRef as quickly as possible on an iOS 8 device for use in image processing. From what I have found online, the way to do this seems to be by using the vImage API in the Accelerate framework. However, I haven't done much with the Accelerate framework and I can't quite figure out how to do this. Here is what I have so far to scale an image to 200x200:

- (void)captureOutput:(AVCaptureOutput *)captureOutput didOutputSampleBuffer:(CMSampleBufferRef)sampleBuffer fromConnection:(AVCaptureConnection *)connection
{
    CVImageBufferRef cvimgRef = CMSampleBufferGetImageBuffer(sampleBuffer);
    CVPixelBufferLockBaseAddress(cvimgRef,0);
    void *imageData = CVPixelBufferGetBaseAddress(cvimgRef);
    NSInteger width = CVPixelBufferGetWidth(cvimgRef);
    NSInteger height = CVPixelBufferGetHeight(cvimgRef);

    unsigned char *newData= // NOT SURE WHAT THIS SHOULD BE...
    vImage_Buffer  inBuff = { imageData, height, width, 4*width };
    vImage_Buffer  outBuff = { newData, 200, 200, 4*200 };

    // NOT SURE IF THIS IS THE CORRECT METHOD... video output settings for kCVPixelBufferPixelFormatTypeKey is set to kCVPixelFormatType_32BGRA
    // This seems wrong since the image scale is ARGB, not BGRA.
    vImageScale_ARGB8888(inBuffer, outBuffer, NULL, kvImageNoFlags);
    CVPixelBufferUnlockBaseAddress(cvimgRef,0);
}

Where outBuffer is the result. After that, I am also not sure how to convert the outBuffer back to a CVImageBufferRef for further image processing. Any suggestions would be appreciated!

lehn0058
  • 19,977
  • 15
  • 69
  • 109

3 Answers3

0

vImageScale returns just a buffer data, and pay attention that buffers need to be freed.
I don't know if there is a faster way just using that out buffer but I would convert the buffer into a CGImage. Something like that taken from here so take it as a reference

vImage_CGImageFormat format = {
        .bitsPerComponent = 8,
        .bitsPerPixel = 32,
        .colorSpace = NULL,
        .bitmapInfo = (CGBitmapInfo)kCGImageAlphaFirst,
        .version = 0,
        .decode = NULL,
        .renderingIntent = kCGRenderingIntentDefault,
    };
ret = kvImageNoError;
    CGImageRef destRef = vImageCreateCGImageFromBuffer(&dstBuffer, &format, NULL, NULL, kvImageNoFlags, &ret)


Later I will convert it into a CVPixelBuffer.

- (CVPixelBufferRef) pixelBufferFromCGImage: (CGImageRef) image
{
    NSDictionary *options = @{
                              (NSString*)kCVPixelBufferCGImageCompatibilityKey : @YES,
                              (NSString*)kCVPixelBufferCGBitmapContextCompatibilityKey : @YES,
                              };

CVPixelBufferRef pxbuffer = NULL;
CVReturn status = CVPixelBufferCreate(kCFAllocatorDefault, CGImageGetWidth(image),
                    CGImageGetHeight(image), kCVPixelFormatType_32ARGB, (__bridge CFDictionaryRef) options,
                    &pxbuffer);
if (status!=kCVReturnSuccess) {
    DLog(@"Operation failed");
}
NSParameterAssert(status == kCVReturnSuccess && pxbuffer != NULL);

CVPixelBufferLockBaseAddress(pxbuffer, 0);
void *pxdata = CVPixelBufferGetBaseAddress(pxbuffer);

CGColorSpaceRef rgbColorSpace = CGColorSpaceCreateDeviceRGB();
CGContextRef context = CGBitmapContextCreate(pxdata, CGImageGetWidth(image),
                                             CGImageGetHeight(image), 8, 4*CGImageGetWidth(image), rgbColorSpace,
                                             kCGImageAlphaNoneSkipFirst);
NSParameterAssert(context);

CGContextConcatCTM(context, CGAffineTransformMakeRotation(0));

CGContextDrawImage(context, CGRectMake(0, 0, CGImageGetWidth(image),
                                       CGImageGetHeight(image)), image);
CGColorSpaceRelease(rgbColorSpace);
CGContextRelease(context);

CVPixelBufferUnlockBaseAddress(pxbuffer, 0);
return pxbuffer;
}


I'm pretty sure that is possible to avoid the conversion into a CGImage and start using the buffer, but I never tried.

Andrea
  • 26,120
  • 10
  • 85
  • 131
  • 1
    See also vImageBuffer_InitWithCVPixelBuffer() / vImageBuffer_InitForCopyToCVPixelBuffer() – Ian Ollmann Mar 23 '15 at 18:01
  • @IanOllmann those functions are awesome!! vImage is a great deal in performance, can you provide an example about how to do that, in the header they say that this should be used for prepare the buffer to another conversion. – Andrea Mar 24 '15 at 07:28
0

You have to use a resampling filter in conjunction with any vImage operations that alter image geometry: page 32, vImage Programming Guide.

James Bush
  • 1,485
  • 14
  • 19
0
- (CVPixelBufferRef)copyRenderedPixelBuffer:(CVPixelBufferRef)pixelBuffer {

CVPixelBufferLockBaseAddress( pixelBuffer, 0 );

// vImage processing
vImage_Error err;
vImage_Buffer buffer;
buffer.data = (unsigned char *)CVPixelBufferGetBaseAddress( pixelBuffer );
buffer.rowBytes = CVPixelBufferGetBytesPerRow( pixelBuffer );
buffer.width = CVPixelBufferGetWidth( pixelBuffer );
buffer.height = CVPixelBufferGetHeight( pixelBuffer );
vImageCVImageFormatRef vformat = vImageCVImageFormat_CreateWithCVPixelBuffer( pixelBuffer );
vImage_CGImageFormat cgformat = {
    .bitsPerComponent = 8,
    .bitsPerPixel = 32,
    .bitmapInfo = kCGBitmapByteOrderDefault,
    .colorSpace = NULL,    //sRGB
};
const CGFloat bgColor[3] = {0.0, 0.0, 0.0};
vImageBuffer_InitWithCVPixelBuffer(&buffer, &cgformat, pixelBuffer, vformat, bgColor, kvImageNoAllocate);

vImage_Buffer outbuffer;
void *tempBuffer;
tempBuffer = malloc(CVPixelBufferGetBytesPerRow( pixelBuffer ) * CVPixelBufferGetHeight( pixelBuffer ));
outbuffer.data = tempBuffer;
outbuffer.rowBytes = CVPixelBufferGetBytesPerRow( pixelBuffer );
outbuffer.width = CVPixelBufferGetWidth( pixelBuffer );
outbuffer.height = CVPixelBufferGetHeight( pixelBuffer );

// PROCESS vIMAGE HERE

    err = vImageBuffer_CopyToCVPixelBuffer(&outbuffer, &cgformat, pixelBuffer, vformat, bgColor, kvImageNoFlags);

if(err != -1)
    free(tempBuffer);

CVPixelBufferUnlockBaseAddress( pixelBuffer, 0 );

return (CVPixelBufferRef)CFRetain( pixelBuffer );

}

James Bush
  • 1,485
  • 14
  • 19