search

Home  >  Q&A  >  body text

Canvas appears blank?

I'm trying to render keys onto a canvas that I can get from Blaze Pose, but I can't seem to get them to draw to the canvas. I know the x and y of each keypoint are retrieved, but I can't get them to display on the canvas. I've tried changing the style but so far no success. thanks for your help.

const video = document.getElementById('webcam');
const canvas = document.getElementById('output')
const liveView = document.getElementById('liveView');
const demosSection = document.getElementById('demos');
const enableWebcamButton = document.getElementById('webcamButton');
const ctx = canvas.getContext("2d"); 
let poses; 

function getUserMediaSupported() {
    return !!(navigator.mediaDevices &&
      navigator.mediaDevices.getUserMedia);
} 
if (getUserMediaSupported()) {
    enableWebcamButton.addEventListener('click', enableCam);
} else {
  console.warn('getUserMedia() is not supported by your browser');
}
  
// Enable the live webcam view and start classification.
function enableCam(event) {
  if (!model) {
    return;
  }
  // Hide the button once clicked.
  event.target.classList.add('removed');  
  // getUsermedia parameters to force video but not audio.
  const constraints = {
    video: true
  };
  document.getElementById('output').style.zIndex = "6";
  // Activate the webcam stream.
  navigator.mediaDevices.getUserMedia(constraints).then(function(stream) {
    video.srcObject = stream;
    video.addEventListener('loadeddata', predictWebcam);
  });
}

async function predictWebcam() {
  
  const videoHeight = video.videoHeight;
  const videoWidth = video.videoWidth;

  video.width = videoWidth;
  video.height = videoHeight;
  canvas.width = videoWidth;
  canvas.height = videoHeight;

  poses = await detector.estimatePoses(video)  
  //ctx.drawImage(video, 0, 0, video.videoWidth, video.videoHeight);

  if(poses && poses.length > 0){
    for(const pose of poses){
      if(pose.keypoints != null){
        drawKeypoints(pose.keypoints);
      }
    }
  }
  window.requestAnimationFrame(predictWebcam);
}

function drawKeypoints(keypoints){
 
  for(let i = 0; i < keypoints.length; i++){
    drawKeypoint(keypoints[i]);
  }

}

function drawKeypoint(keypoint){
  ctx.fillStyle = 'Orange';
  ctx.strokeStyle = 'Green';
  ctx.lineWidth = 2;
  const radius = 4; 
  const circle = new Path2D();
  circle.arc(keypoint.x, keypoint.y, radius, 0, 2 * Math.PI)
  ctx.fill(circle)
  ctx.stroke(circle)
}

// Store the resulting model in the global scope of our app.
let model = undefined;
let detector = undefined; 
// Before we can use BlazePose class we must wait for it to finish
async function loadModel(){
  model = poseDetection.SupportedModels.BlazePose;
  const detectorConfig = {
    runtime: 'tfjs',
    enableSmoothing: true,
    modelType: 'full'
  };
  detector = await poseDetection.createDetector(model, detectorConfig);
  demosSection.classList.remove('invisible');
}

loadModel();
<!DOCTYPE html>
<html lang="en">
  <head>
    <title>Measuring App</title>
    <meta charset="utf-8">
    <!-- Import the webpage's stylesheet -->
    <link rel="stylesheet" href="style.css">
  </head>  
  <body>
    <h1>Measuring App</h1>
    <p>Wait for the model to load before clicking the button to enable the webcam - at which point it will become visible to use.</p>
    
    <section id="demos" class="invisible">      
      <div id="liveView" class="camView">
        <button id="webcamButton">Enable Webcam</button>
        <canvas id="output"></canvas>
        <video id="webcam" autoplay muted width="640" height="480"></video>
      </div>
    </section>

    <!-- Import TensorFlow.js library -->
    <script src="https://cdn.jsdelivr.net/npm/@tensorflow/tfjs/dist/tf.min.js" type="text/javascript"></script>
    <!-- Load the coco-ssd model to use to recognize things in images -->
    <script src="https://cdn.jsdelivr.net/npm/@tensorflow-models/pose-detection"></script>
    
    <!-- Import the page's JavaScript to do some stuff -->
    <script src="script.js" defer></script>
  </body>
</html>
body {
    font-family: helvetica, arial, sans-serif;
    margin: 2em;
    color: #3D3D3D;
  }
  
  h1 {
    font-style: italic;
    color: #FF6F00;
  }
  
  video {
    display: block;
  }
  
  section {
    opacity: 1;
    transition: opacity 500ms ease-in-out;
  }

  .removed {
    display: none;
    z-index: -10;
  }
  
  .invisible {
    opacity: 0.2;
  }

  .camView {
    position: relative;
    float: left;
    width: calc(100% - 20px);
    margin: 10px;
    cursor: pointer;
  }
  
  .camView p {
    position: absolute;
    padding: 5px;
    background-color: rgba(255, 111, 0, 0.85);
    color: #FFF;
    border: 1px dashed rgba(255, 255, 255, 0.7);
    z-index: 1;
    font-size: 12px;
  }

  #output {
    position: absolute;
    z-index: -100;
    top: 0; 
    bottom: 0; 
    left: 0; 
  }

P粉147045274P粉147045274235 days ago522

reply all(1)I'll reply

  • P粉141035089

    P粉1410350892024-04-01 11:38:59

    You should do this fix:

    1. If your video has a fixed size, write this size on the canvas as well:
    
    
    1. Remove the line that sets the canvas size in the predictCam function:
      ...
      video.width = videoWidth;
      video.height = videoHeight;
      // canvas.width = videoWidth;  <----
      // canvas.height = videoHeight;  <----
      ...
    
    1. Add the following lines at the beginning of drawKeypoints:
      ctx.clearRect(0, 0, canvas.width, canvas.height);
    

    Actually, I also added this line at the beginning of the loadModel function to make it work completely.

    await tf.ready();

    reply
    0
  • Cancelreply