We are exporting video using AVAssetWriter from UIImages which are first converted to CVPixelBufferRef's - but the images are "squished" in the video by a 160 pixel high black "bar" of empty pixels across the bottom.
Given that the images are 640x480, the fact that 160 pixels of black bar are seen - it is as if the image was rotated and the bottom was lost - BUT - when we look at the video images - the same height of the image is captured - it is just "squished" to fit the black bar.
Below is our UIImage to CVBufferPixelRef code.
Any ideas what may be causing this behavior?
- (CVPixelBufferRef) pixelBufferFromCGImage: (CGImageRef) image
{
ATHSingleton *singleton = [ATHSingleton singletons];
int height = singleton.screenHeight;
int width = singleton.screenWidth;
NSDictionary *options = [NSDictionary dictionaryWithObjectsAndKeys:
[NSNumber numberWithBool:YES], kCVPixelBufferCGImageCompatibilityKey,
[NSNumber numberWithBool:YES], kCVPixelBufferCGBitmapContextCompatibilityKey,
nil];
CVPixelBufferRef pxbuffer = NULL;
CVReturn status = CVPixelBufferCreate(kCFAllocatorDefault, 480,
640, kCVPixelFormatType_32ARGB, (__bridge CFDictionaryRef) options,
&pxbuffer);
NSParameterAssert(status == kCVReturnSuccess && pxbuffer != NULL);
CVPixelBufferLockBaseAddress(pxbuffer, 0);
void *pxdata = CVPixelBufferGetBaseAddress(pxbuffer);
NSParameterAssert(pxdata != NULL);
CGColorSpaceRef rgbColorSpace = CGColorSpaceCreateDeviceRGB();
CGContextRef context = CGBitmapContextCreate(pxdata, width,
height, 8, 4*width, rgbColorSpace,
kCGImageAlphaNoneSkipFirst);
NSParameterAssert(context);
CGContextConcatCTM(context, CGAffineTransformMakeRotation(0));
CGContextDrawImage(context, CGRectMake(0, -80, 480, 640), image);
CGColorSpaceRelease(rgbColorSpace);
CGContextRelease(context);
CVPixelBufferUnlockBaseAddress(pxbuffer, 0);
return pxbuffer;
}