4

I am trying to run this HTML example https://codepen.io/mediapipe/details/KKgVaPJ from https://google.github.io/mediapipe/solutions/face_mesh#javascript-solution-api in a create react application. I have already done:

  • npm install of all the facemesh mediapipe packages.
  • Already replaced the jsdelivr tags with node imports and I got the definitions and functions.
  • Replaced the video element with react-cam

I don't know how to replace this jsdelivr, maybe is affecting:

const faceMesh = new FaceMesh({
    locateFile: (file) => {
      return `https://cdn.jsdelivr.net/npm/@mediapipe/face_mesh/${file}`;
    }
  });

So the question is:

  • Why the facemesh is not showing? Is there any example of what I am trying to do?

This is my App.js code (sorry for the debugugging scaffolding):

import './App.css';
import React, { useState, useEffect } from "react";
import Webcam from "react-webcam";
import { Camera, CameraOptions } from '@mediapipe/camera_utils'
import {
  FaceMesh,
  FACEMESH_TESSELATION,
  FACEMESH_RIGHT_EYE,
  FACEMESH_LEFT_EYE,
  FACEMESH_RIGHT_EYEBROW,
  FACEMESH_LEFT_EYEBROW,
  FACEMESH_FACE_OVAL,
  FACEMESH_LIPS
} from '@mediapipe/face_mesh'
import { drawConnectors } from '@mediapipe/drawing_utils'

const videoConstraints = {
  width: 1280,
  height: 720,
  facingMode: "user"
};

function App() {
  const webcamRef = React.useRef(null);
  const canvasReference = React.useRef(null);
  const [cameraReady, setCameraReady] = useState(false);
  let canvasCtx
  let camera

  const videoElement = document.getElementsByClassName('input_video')[0];
  // const canvasElement = document.getElementsByClassName('output_canvas')[0];

  const canvasElement = document.createElement('canvas');

  console.log('canvasElement', canvasElement)
  console.log('canvasCtx', canvasCtx)

  useEffect(() => {
    camera = new Camera(webcamRef.current, {
      onFrame: async () => {
        console.log('{send}',await faceMesh.send({ image: webcamRef.current.video }));
      },
      width: 1280,
      height: 720
    });

    canvasCtx = canvasReference.current.getContext('2d');
    camera.start();
    console.log('canvasReference', canvasReference)

  }, [cameraReady]);

  function onResults(results) {
    console.log('results')
    canvasCtx.save();
    canvasCtx.clearRect(0, 0, canvasElement.width, canvasElement.height);
    canvasCtx.drawImage(
      results.image, 0, 0, canvasElement.width, canvasElement.height);
    if (results.multiFaceLandmarks) {
      for (const landmarks of results.multiFaceLandmarks) {
        drawConnectors(canvasCtx, landmarks, FACEMESH_TESSELATION, { color: '#C0C0C070', lineWidth: 1 });
        drawConnectors(canvasCtx, landmarks, FACEMESH_RIGHT_EYE, { color: '#FF3030' });
        drawConnectors(canvasCtx, landmarks, FACEMESH_RIGHT_EYEBROW, { color: '#FF3030' });
        drawConnectors(canvasCtx, landmarks, FACEMESH_LEFT_EYE, { color: '#30FF30' });
        drawConnectors(canvasCtx, landmarks, FACEMESH_LEFT_EYEBROW, { color: '#30FF30' });
        drawConnectors(canvasCtx, landmarks, FACEMESH_FACE_OVAL, { color: '#E0E0E0' });
        drawConnectors(canvasCtx, landmarks, FACEMESH_LIPS, { color: '#E0E0E0' });
      }
    }
    canvasCtx.restore();
  }

  const faceMesh = new FaceMesh({
    locateFile: (file) => {
      return `https://cdn.jsdelivr.net/npm/@mediapipe/face_mesh/${file}`;
    }
  });
  faceMesh.setOptions({
    selfieMode: true,
    maxNumFaces: 1,
    minDetectionConfidence: 0.5,
    minTrackingConfidence: 0.5
  });
  faceMesh.onResults(onResults);

  // const camera = new Camera(webcamRef.current, {
  //   onFrame: async () => {
  //     await faceMesh.send({ image: videoElement });
  //   },
  //   width: 1280,
  //   height: 720
  // });
  // camera.start();

  return (
    <div className="App">
      <Webcam
        audio={false}
        height={720}
        ref={webcamRef}
        screenshotFormat="image/jpeg"
        width={1280}
        videoConstraints={videoConstraints}
        onUserMedia={() => {
          console.log('webcamRef.current', webcamRef.current);
          // navigator.mediaDevices
          //   .getUserMedia({ video: true })
          //   .then(stream => webcamRef.current.srcObject = stream)
          //   .catch(console.log);

          setCameraReady(true)
        }}
      />
      <canvas
        ref={canvasReference}
        style={{
          position: "absolute",
          marginLeft: "auto",
          marginRight: "auto",
          left: 0,
          right: 0,
          textAlign: "center",
          zindex: 9,
          width: 1280,
          height: 720,
        }}
      />

    </div >
  );
}

export default App;
Lorenzo SU
  • 45
  • 2
  • 5

1 Answers1

3

You don't have to replace the jsdelivr, that piece of code is fine; also I think you need to reorder your code a little bit:

  • You should put the faceMesh initialization inside the useEffect, with [] as parameter; therefore, the algorithm will start when the page is rendered for the first time
  • Also, you don't need to get videoElement and canvasElement with doc.*, because you already have some refs defined

An example of code:

useEffect(() => {
const faceMesh = new FaceDetection({
  locateFile: (file) => {
    return `https://cdn.jsdelivr.net/npm/@mediapipe/face_detection/${file}`;
  },
});

faceMesh.setOptions({
  maxNumFaces: 1,
  minDetectionConfidence: 0.5,
  minTrackingConfidence: 0.5,
});

faceMesh.onResults(onResults);

if (
  typeof webcamRef.current !== "undefined" &&
  webcamRef.current !== null
) {
  camera = new Camera(webcamRef.current.video, {
    onFrame: async () => {
      await faceMesh.send({ image: webcamRef.current.video });
    },
    width: 1280,
    height: 720,
  });
  camera.start();
    }
  }, []);

Finally, in the onResults callback I would suggest printing first the results, just to check if the Mediapipe implementation is working fine. And don't forget to set the canvas size before drawing something.

function onResults(results){
   console.log(results)
   canvasCtx = canvasReference.current.getContext('2d')
   canvas.width = webcamRef.current.video.videoWidth;
   canvas.height = webcamRef.current.video.videoHeight;;

   ...
}

Good luck! :)

mateomotriz
  • 66
  • 1
  • 7
  • Has anyone been able to load the assets in the CDN in an S3 bucket and still have this work in Safari? Weirdest thing. Safari does not work but Chrome does with S3. They both work when it's the jsdelivr CDN. – zero_cool Mar 31 '23 at 15:20