I'm trying to read an AVAsset and in every frame of the asset, do a translation or a rotation operation, using the GPU.
From my researches, I found out a way to convert a CMSampleBuffer to a GPU texture. I'm using the following method to do it:
func texture(fromBuffer sampleBuffer: CMSampleBuffer) -> GLuint {
var texture : GLuint = 0
glGenTextures(1, &texture)
glBindTexture(GLenum(GL_TEXTURE_2D), texture)
glTexParameteri(GLenum(GL_TEXTURE_2D), GLenum(GL_TEXTURE_MIN_FILTER), GL_LINEAR)
glTexParameteri(GLenum(GL_TEXTURE_2D), GLenum(GL_TEXTURE_MAG_FILTER), GL_LINEAR)
glTexParameteri(GLenum(GL_TEXTURE_2D), GLenum(GL_TEXTURE_WRAP_S), GL_CLAMP_TO_EDGE)
glTexParameteri(GLenum(GL_TEXTURE_2D), GLenum(GL_TEXTURE_WRAP_T), GL_CLAMP_TO_EDGE)
if let pixelBuffer = CMSampleBufferGetImageBuffer(sampleBuffer) {
CVPixelBufferLockBaseAddress(pixelBuffer, CVPixelBufferLockFlags(rawValue: 0))
let width = CVPixelBufferGetWidth(pixelBuffer)
let height = CVPixelBufferGetHeight(pixelBuffer)
glTexImage2D(GLenum(GL_TEXTURE_2D), 0, GL_RGBA, GLsizei(width), GLsizei(height), 0, GLenum(GL_BGRA), GLenum(GL_UNSIGNED_BYTE), CVPixelBufferGetBaseAddress(pixelBuffer))
CVPixelBufferUnlockBaseAddress(pixelBuffer, CVPixelBufferLockFlags(rawValue: 0))
}
return texture
}
Now I need to do my desired operations with this texture, but this is the point I'm stuck. From a Objc example, I translated a function that receive as a param a texture and return a CGImage.
Here's the method:
func processTexture(_ texture: GLuint, width: Int, height: Int) -> CGImage? {
var newImage : CGImage?
var frameBuffer : GLuint = 0
glGenFramebuffers(1, &frameBuffer)
glBindFramebuffer(GLenum(GL_FRAMEBUFFER), frameBuffer)
var colorRenderbuffer : GLuint = 0
glGenRenderbuffers(1, &colorRenderbuffer)
glBindRenderbuffer(GLenum(GL_RENDERBUFFER), colorRenderbuffer)
glRenderbufferStorage(GLenum(GL_RENDERBUFFER), GLenum(GL_RGBA8_OES), GLsizei(width), GLsizei(height))
glFramebufferRenderbuffer(GLenum(GL_FRAMEBUFFER), GLenum(GL_COLOR_ATTACHMENT0), GLenum(GL_RENDERBUFFER), colorRenderbuffer)
let status = glCheckFramebufferStatus(GLenum(GL_FRAMEBUFFER))
if status != GLenum(GL_FRAMEBUFFER_COMPLETE) {
print("ERROR")
} else {
glViewport(0, 0, GLsizei(width), GLsizei(height))
glClearColor(0.0, 0.0, 0.0, 1.0)
glClear(GLbitfield(GL_COLOR_BUFFER_BIT))
let data = UnsafeMutablePointer<Data>.allocate(capacity: width * height * 4)
glReadPixels(0, 0, GLsizei(width), GLsizei(height), GLenum(GL_RGBA), GLenum(GL_UNSIGNED_BYTE), data)
let cgDataProviderReleaseDataCallback: CGDataProviderReleaseDataCallback = { (info: UnsafeMutableRawPointer?, data: UnsafeRawPointer, size: Int) -> () in
return
}
if let dataProvider = CGDataProvider(dataInfo: nil, data: data, size: width * height * 4, releaseData: cgDataProviderReleaseDataCallback) {
let colorSpace = CGColorSpaceCreateDeviceRGB()
newImage = CGImage(width: width, height: height, bitsPerComponent: 8, bitsPerPixel: 32, bytesPerRow: width * 4, space: colorSpace, bitmapInfo: .byteOrder32Big, provider: dataProvider, decode: nil, shouldInterpolate: true, intent: .defaultIntent)
}
}
return newImage
}
But I can't find where I can perform the operations of translation and rotation on this code, or how could I do it.
Glad if anyone that already worked with OpenGL on iOS, could give me a help with it or even tell me where I can find examples of such operations.