xxxxxxxxxx
let sora;
let video;
let handpose;
let hands;
// Font laden
function preload() {
sora = loadFont('PressStart2P-Regular.ttf');
}
function setup() {
createCanvas(640, 480);
video = createCapture(VIDEO);
video.size(640, 480);
video.hide();
handpose = ml5.handpose(video, modelLoaded);
textFont(sora);
}
function draw() {
background(0);
tint(100);
image(video, 0, 0);
noStroke();
fill(240);
textSize(20);
textAlign(CENTER,CENTER);
filter(GRAY);
text("left hand",100,450 );
if (hands && hands.length > 0) {
fill(0);
stroke(0);
textSize(80);
textAlign(CENTER, BOTTOM);
//fill abhängig von y Achse
let s = map(hands[0].landmarks[0][1], 0, height, 10, 250);
fill(100/s, 0, 300);
//Hintergrund x Achse
let b = map(hands[0].landmarks[0][0], 0, width, 10, 250);
background(100/b, 0, 300, 50);
// Finger tips: 4, 8, 12, 16, 20
text("a", hands[0].landmarks[20][0], hands[0].landmarks[20][1]);
text("e", hands[0].landmarks[16][0], hands[0].landmarks[16][1]);
text("d", hands[0].landmarks[12][0], hands[0].landmarks[12][1]);
text("i", hands[0].landmarks[8][0], hands[0].landmarks[8][1]);
text("my", hands[0].landmarks[4][0], hands[0].landmarks[4][1]);
}
}
// When the model is loaded
function modelLoaded() {
console.log('Handpose loaded!');
handpose.on('predict', gotHands);
}
// Listen to new 'predict' events
function gotHands(results) {
hands = results;
}
// Listen to new 'predict' events
handpose.on('predict', results => {
predictions = results;
});