<div model="text-align:middle"> <video id="video" autoplay model="show:none"></video> <canvas id="canvas" width="300px" top="200px" model="margin:0 auto"></canvas> </div> <script src="https://cdn.jsdelivr.internet/npm/@tensorflow/tfjs"></script> <script src="https://cdn.jsdelivr.internet/npm/@tensorflow-models/blazeface"></script> <script> let video = doc.getElementById("video"); let mannequin; // declare a canvas variable and get its context let canvas = doc.getElementById("canvas"); let ctx = canvas.getContext("second"); const setupCamera = () => { navigator.mediaDevices .getUserMedia({ video: { width: 300, top: 200 }, audio: false, }) .then((stream) => { video.srcObject = stream; }); }; let proven = false; const detectFaces = async () => { const prediction = await mannequin.estimateFaces(video, false); if (proven == false) { // log the prediction as soon as to the browser console console.log(prediction); proven = true; } // draw the video first ctx.drawImage(video, 0, 0, 300, 200); prediction.forEach((pred) => { // draw the rectangle enclosing the face ctx.beginPath(); ctx.lineWidth = "5"; ctx.strokeStyle = "white"; // the final two arguments are width and top // since blazeface returned solely the coordinates, // we are able to discover the width and top by subtracting them. ctx.rect( pred.topLeft[0], pred.topLeft[1], pred.bottomRight[0] - pred.topLeft[0], pred.bottomRight[1] - pred.topLeft[1] ); ctx.stroke(); // drawing small rectangles for the face landmarks ctx.fillStyle = "white"; pred.landmarks.forEach((landmark) => { ctx.fillRect(landmark[0], landmark[1], 5, 5); }); }); }; setupCamera(); video.addEventListener("loadeddata", async () => { mannequin = await blazeface.load(); // name detect faces each 100 milliseconds or 10 occasions each second setInterval(detectFaces, 500); }); </script>