I'm trying to get a live Processing video stream to face detect and pixelate faces in real time. I have got the code working to track faces and draw a rectangle around the face, and I have a separate sketch working to pixelate the whole frame, however I cannot figure out how to constrain the pixelate to be within the face rectangle. Any advice on how to merge the two together would be greatly appreciated.
My face tracking code is:
import processing.video.*;
import java.awt.Rectangle;
import gab.opencv.*;
OpenCV opencv;
Capture video;
Rectangle[] faces;
void setup() {
size( 640, 480 );
String[] cameras = Capture.list();
video = new Capture(this, cameras[0]);
opencv = new OpenCV(this, width, height);
opencv.loadCascade(OpenCV.CASCADE_FRONTALFACE);
faces = opencv.detect();
frameRate(24.0);
video.start();
}
void draw() {
opencv.loadImage(video);
image(video, 0, 0);
video.read();
faces = opencv.detect(1.2, 3, 0, 40, 400); //adjust last two numbers to give min and max for face size
noFill();
stroke(255, 20, 255);
strokeWeight(2);
for (int i = 0; i < faces.length; i++) {
rect(faces[i].x, faces[i].y, faces[i].width, faces[i].height);
}
}
and my pixelate code is:
import processing.video.*;
int blockSize = 25;
int numPixelsWide, numPixelsHigh;
Capture video;
color movColors[];
void setup() {
size(640, 480);
noStroke();
String[] cameras = Capture.list();
video = new Capture(this, cameras[0]);
video.start();
numPixelsWide = width;
numPixelsHigh = height;
println(numPixelsWide);
movColors = new color[numPixelsWide * numPixelsHigh];
}
void draw() {
if (video.available() == true) {
video.read();
video.loadPixels();
int count = 0;
for (int j = 0; j < numPixelsHigh; j++) {
for (int i = 0; i < numPixelsWide; i++) {
movColors[count] = video.get(i*blockSize, j*blockSize);
count++;
}
}
}
background(255);
for (int j = 0; j < numPixelsHigh; j++) {
for (int i = 0; i < numPixelsWide; i++) {
fill(movColors[j*numPixelsWide + i]);
rect(i*blockSize, j*blockSize, blockSize, blockSize);
}
}
}