Hello I have a application in rails5 with face-api.js library for face recognition. But in my browser the video of webcam doesnt work and in my console doesnt show anywere errors. I'm not understand whats is happennig.
This is my code of the view:
<head>
<%= javascript_include_tag 'face_recognition.js'%>
<%= javascript_include_tag "face-api.js/dist/face-api.min.js" %>
<%= javascript_include_tag "face-api.js/dist/face-api.js" %>
<%= javascript_include_tag 'face_api'%>
<style>
canvas{
position: relative;
top: -420px;
left: 350px;
}
</style>
</head>
<body>
<video id="camfr" autoplay="autoplay" width="400" height="400" muted></video>
</body>
This is my app/assets/javascripts/face_recognition.js
//require face_recognition
And in my lib/assets/javascripts i have the face_recogniton.js file where load the camera, canvas and the models of face-api.js.
const camfr = document.getElementById('camfr')
const startVideo = () => {
var constraints = { audio: false, video: { width: 1280, height: 720 } };
navigator.mediaDevices.getUserMedia(constraints)
.then(function(mediaStream) {
var video = document.querySelector('video');
video.srcObject = mediaStream;
video.onloadedmetadata = function(e) {
video.play();
};
})
}
Promise.all([
faceapi.nets.tinyFaceDetector.loadFromUri("<%= asset_path('vendorface-api.js/models/tiny_face_detector/tiny_face_detector_model-weights_manifest.json') %>"),
faceapi.nets.faceLandmark68Net.loadFromUri("<%= asset_path('face-api.js/models/face_landmark_68/face_landmark_68_model-weights_manifest.json') %>"), //desenha os traços do rosto
faceapi.nets.faceRecognitionNet.loadFromUri("<%= asset_path('face-api.js/models/face_recognition/face_recognition_model-weights_manifest.json') %>"),//faz o conhecimento do rosto
faceapi.nets.faceExpressionNet.loadFromUri("<%= asset_path('face-api.js/models/face_expression/face_expression_model-weights_manifest.json') %>"),//detecta expressoes
faceapi.nets.ageGenderNet.loadFromUri("<%= asset_path('face-api.js/models/age_gender_model/age_gender_model-weights_manifest.json') %>"),//idade e genero
faceapi.nets.ssdMobilenetv1.loadFromUri("<%= asset_path('face-api.js/models/ssd_mobilenetv1/ssd_mobilenetv1_model-weights_manifest.json') %>") // usada para detectar rosto
]).then(startVideo)
camfr.addEventListener('play', async () => {
const canvas = faceapi.createCanvasFromMedia(camfr)
const canvasSize = {
width: camfr.width,
height: camfr.height
}
faceapi.matchDimensions(canvas, canvasSize)
document.body.appendChild(canvas)
setInterval(async () => {
const detections = await faceapi
.detectAllFaces(
camfr,
new faceapi.TinyFaceDetectorOptions()
)
.withFaceLandmarks()
.withFaceExpressions()
.withAgeAndGender()
const resizedDetections = faceapi.resizeResults(detections, canvasSize)
canvas.getContext('2d').clearRect(0, 0, canvas.width, canvas.height)
faceapi.draw.drawDetections(canvas, resizedDetections)
faceapi.draw.drawFaceLandmarks(canvas, resizedDetections)
faceapi.draw.drawFaceExpressions(canvas, resizedDetections)
resizedDetections.forEach(detection => {
const {age, gender, genderProbability} = detection
new faceapi.draw.DrawTextField([
`${parseInt(age, 10)} years`,
`${gender} (${ parseInt(genderProbability * 100, 10)})`
], detection.detection.box.topRight).draw(canvas)
})
}, 100)
})