I recently installed the Affectiva SDK (http://www.affectiva.com/) and followed the tutorial on analyzing input from the camera (http://developer.affectiva.com/v3/android/analyze-camera/). Unfortunately, the project does not seem to be working. It is my understanding that the interface/callback functions from FaceListener, ImageListener, ProcessStatusListener
need to be called when a face is detected etc (is this correct).
I am not getting any errors, but these functions are never called either (I've put Console.WriteLine statements in there, as well as placed breakpoints in Visual Studio). From time to time, a series of "Image Captured" statements are printed to the console, but I cannot yet reproduce how or why that happens. Does anyone know what I did wrong? Thank you in advance for any help.
Below is my code so far:
App.xaml.cs
using Affdex;
using System;
using System.Collections.Generic;
using System.Configuration;
using System.Data;
using System.Linq;
using System.Threading.Tasks;
using System.Windows;
namespace Affectiva
{
/// <summary>
/// Interaction logic for App.xaml
/// </summary>
public partial class App : Application, FaceListener, ImageListener, ProcessStatusListener
{
public static CameraDetector detector;
int camId = 10;
int camFPS = 60;
public App(){
detector = new CameraDetector();
String classifierPath = "C:\\Program Files (x86)\\Affectiva\\Affdex SDK\\data";
detector.setClassifierPath(classifierPath);
detector.setCameraId(camId);
detector.setCameraFPS(camFPS);
detector.setFaceListener(this);
detector.setImageListener(this);
detector.setProcessStatusListener(this);
detector.setDetectSmile(true);
detector.setDetectJoy(true);
detector.setDetectAllExpressions(true);
detector.setDetectAllEmotions(true);
detector.setDetectAllEmojis(true);
detector.setDetectAllAppearances(true);
}
public void onFaceFound(float f, int i)
{
Console.WriteLine("Face Found!");
}
public void onFaceLost(float f, int i)
{
Console.WriteLine("Face Lost!");
}
public void onImageResults(Dictionary<int, Face> faces, Frame f){
Console.WriteLine("OnImageResults - " + faces.Count);
if(faces.Count > 0)
Console.WriteLine(faces.First().Value.Appearance.Age);
}
public void onImageCapture(Frame f){
Console.WriteLine("Image Captured " + f.getHeight());
}
public void onProcessingFinished()
{
Console.WriteLine("Processing Finished");
}
public void onProcessingException(AffdexException e)
{
Console.WriteLine(e.Message);
}
}
}
MainWindow.xaml.cs
using System;
using System.Collections.Generic;
using System.Linq;
using System.Text;
using System.Threading;
using System.Threading.Tasks;
using System.Windows;
using System.Windows.Controls;
using System.Windows.Data;
using System.Windows.Documents;
using System.Windows.Input;
using System.Windows.Media;
using System.Windows.Media.Imaging;
using System.Windows.Navigation;
using System.Windows.Shapes;
using System.Windows.Threading;
using WebEye.Controls.Wpf;
namespace Affectiva
{
/// <summary>
/// Interaction logic for MainWindow.xaml
/// </summary>
public partial class MainWindow : Window
{
public MainWindow()
{
InitializeComponent();
}
private void OnStartButtonClick(object sender, RoutedEventArgs e)
{
var cameraId = webCameraControl.GetVideoCaptureDevices().First();
webCameraControl.StartCapture(cameraId);
App.detector.start();
}
}
}