detect_multi_scale TypeError
tetreault opened this issue · 2 comments
tetreault commented
Hello,
I'm working on repurposing the emotion detect demo into a Nuxt (VueJS framework) project. I get an initial error I'm unsure about:
Furthermore, ctrack.getCurrentPosition()
always returns false for me, and the results from meanPredict
are always non-zero. What am I doing wrong here?
<template>
<section class="section">
<video playsinline muted width="700" height="500" ref="userMediaVideo" id="video"></video>
<canvas ref="canvasEl" id="canvas" width="700" height="500"></canvas>
<div id="emotions">
<p>
Angry:
<span ref="angry">0</span>
</p>
<p>
Disgusted:
<span ref="disgusted">0</span>
</p>
<p>
Fear:
<span ref="fear">0</span>
</p>
<p>
Sad:
<span ref="sad">0</span>
</p>
<p>
Surprised:
<span ref="surprised">0</span>
</p>
<p>
Happy:
<span ref="happy">0</span>
</p>
</div>
</section>
</template>
<script>
export default {
mounted() {
const video = this.$refs.userMediaVideo;
const canvasEl = this.$refs.canvasEl;
const videoWidth = video.width;
const videoHeight = video.height;
const context = canvasEl.getContext("2d");
const constraints = (window.constraints = {
audio: false,
video: { facingMode: "user" }
});
const ctrack = new clm.tracker({ useWebGL: true });
const classifier = new emotionClassifier();
const emotionData = classifier.getBlank();
let trackingStarted = false;
// set eigenvector 9 and 11 to not be regularized. This is to better detect motion of the eyebrows
pModel.shapeModel.nonRegularizedVectors.push(9);
pModel.shapeModel.nonRegularizedVectors.push(11);
// init clmtracker with the face model (static/libs/clmtrackr/model_pca_20_svm.js)
ctrack.init(pModel);
// initialize the classifier with the emotion model (static/libs/clmtrackr/emotionmodel.js)
classifier.init(emotionModel);
// try connecting to the webcam
try {
compatibility.getUserMedia(
constraints,
stream => {
// set video source
try {
video.srcObject = compatibility.URL.createObjectURL(stream);
} catch (error) {
video.srcObject = stream;
}
// start tracking
ctrack.start(video);
trackingStarted = true;
// requestanimationframe loop to draw the face and predict emotion
compatibility.requestAnimationFrame(play);
},
error => {
alert("WebRTC not available");
}
);
} catch (error) {
alert(error);
}
const play = () => {
compatibility.requestAnimationFrame(play);
context.clearRect(0, 0, videoWidth, videoHeight);
if (video.paused) {
video.play();
}
// Draw video frames on canvas
if (video.readyState === video.HAVE_ENOUGH_DATA && video.videoWidth > 0) {
canvasEl.width = video.videoWidth;
canvasEl.height = video.videoHeight;
context.drawImage(
video,
0,
0,
canvasEl.clientWidth,
canvasEl.clientHeight
);
console.log(ctrack.getCurrentPosition());
// if we have a current position
if (ctrack.getCurrentPosition()) {
ctrack.draw(canvasEl);
}
const cp = ctrack.getCurrentParameters();
const er = classifier.meanPredict(cp);
if (er) {
this.$refs.angry.innerText = `${er[0].value}`;
this.$refs.disgusted.innerText = `${er[1].value}`;
this.$refs.fear.innerText = `${er[2].value}`;
this.$refs.sad.innerText = `${er[3].value}`;
this.$refs.surprised.innerText = `${er[4].value}`;
this.$refs.happy.innerText = `${er[5].value}`;
}
}
};
}
};
</script>
<style scoped>
.section {
position: relative;
height: 100vh;
width: 100vw;
}
#video,
#canvas {
position: absolute;
left: 0;
top: 0;
height: 500px;
width: 700px;
bottom: 0;
margin: auto;
margin-left: 20px;
border: 1px solid black;
}
#video {
opacity: 0;
}
#emotions {
position: absolute;
top: 0;
bottom: 0;
right: 0;
margin: auto;
height: 170px;
width: 320px;
}
</style>
ProxyProdigy commented
There's apparently a bug in the clmtrackr.min.js file. Replacing it with the non-minimized version of the library got rid of this error for me.
tetreault commented
Amazing @ProxyProdigy that fixed it for me!