0

I had to write a program for facial recognition in JavaScript , for which I used the opencv4nodejs API , since there's NOT many working examples ; Now I somehow want to record and save the stream (for saving on the client-side or uploading on the server) alongwith the audio. This is where I am stuck. Any help is appreciated. In simple words I need to use the Webcam input for multiple purposes , one for facial recognition and two to somehow save , latter is what i'm unable to do. Also in the worst case, If it's not possible Instead of recording and saving the webcam video I could also save the Complete Screen recording , Please Answer if there's a workaround to this.

Below is what i tried to do, But it doesn't work for obvious reasons.


$(document).ready(function () {
    run1()
})


let chunks = []

// run1() for uploading model and for facecam 
async function run1() {
    const MODELS = "/models"; 
    await faceapi.loadSsdMobilenetv1Model(MODELS)
    await faceapi.loadFaceLandmarkModel(MODELS)
    await faceapi.loadFaceRecognitionModel(MODELS)

    var _stream

    //Accessing the user webcam
    const videoEl = document.getElementById('inputVideo')
    navigator.mediaDevices.getUserMedia({
        video: true,
        audio: true
    }).then(
        (stream) => {
            _stream = stream
            recorder = new MediaRecorder(_stream);
            recorder.ondataavailable = (e) => {
                chunks.push(e.data);
                console.log(chunks, i);
                if (i == 20) makeLink();  //Trying to make Link from the blob for some i==20
            };
            videoEl.srcObject = stream
        },
        (err) => {
            console.error(err)
        }
    )
}


// run2() main recognition code and training
async function run2() {

    // wait for the results of mtcnn  ,  
    const input = document.getElementById('inputVideo')

    const mtcnnResults = await faceapi.ssdMobilenetv1(input)
 
    // Detect All the faces in the webcam
    const fullFaceDescriptions = await faceapi.detectAllFaces(input).withFaceLandmarks().withFaceDescriptors()



    //  Training the algorithm with given data of the Current Student

    const labeledFaceDescriptors = await Promise.all(
        CurrentStudent.map(
            async function (label) {
                // Training the Algorithm with the current students 
                for (let i = 1; i <= 10; i++) {
                    // console.log(label);
                    const imgUrl = `http://localhost:5500/StudentData/${label}/${i}.jpg`
                    const img = await faceapi.fetchImage(imgUrl)

                    // detect the face with the highest score in the image and compute it's landmarks and face descriptor
                    const fullFaceDescription = await faceapi.detectSingleFace(img).withFaceLandmarks().withFaceDescriptor()

                    if (!fullFaceDescription) {
                        throw new Error(`no faces detected for ${label}`)
                    }

                    const faceDescriptors = [fullFaceDescription.descriptor]
                    return new faceapi.LabeledFaceDescriptors(label, faceDescriptors)
                }
            }
        )
    )
    const maxDescriptorDistance = 0.65
    const faceMatcher = new faceapi.FaceMatcher(labeledFaceDescriptors, maxDescriptorDistance)
    const results = fullFaceDescriptions.map(fd => faceMatcher.findBestMatch(fd.descriptor))

    i++;
}

// I somehow want this to work
function makeLink() {
    alert("ML")
    console.log("IN MAKE LINK");
    let blob = new Blob(chunks, {
        type: media.type
        }),
        url = URL.createObjectURL(blob),
        li = document.createElement('li'),
        mt = document.createElement(media.tag),
        hf = document.createElement('a');
    mt.controls = true;
    mt.src = url;
    hf.href = url;
    hf.download = `${counter++}${media.ext}`;
    hf.innerHTML = `donwload ${hf.download}`;
    li.appendChild(mt);
    li.appendChild(hf);
    ul.appendChild(li);
}


// onPlay(video) function
async function onPlay(videoEl) {
    run2()
    setTimeout(() => onPlay(videoEl), 50)
}
Jeru Luke
  • 20,118
  • 13
  • 80
  • 87

1 Answers1

0

I'm not familiar with JavaScript. But in general only one program may communicate with the camera. You will probably need to write a server which will read the data from the camera. Then the server will send the data to your facial recognition, recording, etc.

Jiří Skála
  • 649
  • 9
  • 23