I'm using a renderer class within a capture pipeline to add a CI filter to video. Inside the copyRenderedPixelBuffer of the renderer I'd also like to copy the pixel buffer and send it to Vision to detect facial landmarks.
I've made a singleton for Vision with a serial dispatch queue. The problem is that once I add the dispatch queue the pixelBuffer doesn't get released from memory - hence a massive leak (even though the pixel buffer is release in the objc code). With the dispatch queue muted the memory leak goes away (however there's massive video lag with the video preview, due to the Vision functions).
Any help greatly appreciated!
- (CVPixelBufferRef)copyRenderedPixelBuffer:(CVPixelBufferRef)pixelBuffer
{
OSStatus err = noErr;
CVPixelBufferRef renderedOutputPixelBuffer = NULL;
CVPixelBufferRef visionOutputPixelBuffer;
CIImage *sourceImage = nil;
err = CVPixelBufferPoolCreatePixelBuffer( kCFAllocatorDefault, _bufferPool, &renderedOutputPixelBuffer );
if ( err ) {
NSLog(@"Cannot obtain a pixel buffer from the buffer pool (%d)", (int)err );
goto bail;
}
err = CVPixelBufferPoolCreatePixelBuffer( kCFAllocatorDefault, _bufferPool, &visionOutputPixelBuffer );
if ( err ) {
NSLog(@"Cannot obtain a pixel buffer from the buffer pool (%d)", (int)err );
}
// Vision
CVPixelBufferLockBaseAddress(pixelBuffer, 0);
int bufferHeight = (int)CVPixelBufferGetHeight(pixelBuffer);
size_t bytesPerRow = CVPixelBufferGetBytesPerRow(pixelBuffer);
uint8_t *baseAddress = (uint8_t *)CVPixelBufferGetBaseAddress(pixelBuffer);
CVPixelBufferLockBaseAddress(visionOutputPixelBuffer, 0);
uint8_t *copyBaseAddress = (uint8_t *)CVPixelBufferGetBaseAddress(visionOutputPixelBuffer);
memcpy(copyBaseAddress, baseAddress, bufferHeight * bytesPerRow);
CVPixelBufferUnlockBaseAddress(pixelBuffer,0);
CVPixelBufferUnlockBaseAddress(visionOutputPixelBuffer,0);
[[VisionFaceDetection sharedInstance] detectFaceOnPixelBuffer:visionOutputPixelBuffer];
CVPixelBufferRelease(visionOutputPixelBuffer);
// ... Other Filter code here ...
bail:
if(sourceImage != nil)
{
[sourceImage release];
}
return renderedOutputPixelBuffer;
}
The Swift class for Vision
@objc final class VisionFaceDetection: NSObject {
@objc static let sharedInstance = VisionFaceDetection() // singleton so it can handle the async dispatch
private override init() {}
let serialQueue = DispatchQueue(label: "vision", qos: DispatchQoS.userInitiated)
let faceLandmarks = VNDetectFaceLandmarksRequest()
let faceLandmarksDetectionRequest = VNSequenceRequestHandler()
// CVPixelBuffer
@objc func detectFace(onPixelBuffer pixelBuffer: CVPixelBuffer) {
// Currently this block causes a memory leak with the pixel buffer
serialQueue.async {
let faceDetectionRequest = VNDetectFaceRectanglesRequest { [weak self] (request, error) in
guard let observations = request.results as? [VNFaceObservation] else {
print("Unexpected result type from face detection request")
return
}
// happens even when these are muted
// self?.faceLandmarks.inputFaceObservations = observations
// self?.detectLandmarks(onPixelBuffer: pixelBuffer)
}
let faceDetectionRequestHandler = VNSequenceRequestHandler()
try? faceDetectionRequestHandler.perform([faceDetectionRequest], on: pixelBuffer)
}
}
// ... other methods in class
}