'l' to draw labels, 'v' to draw video 't' to transmit pose stream to server.
xxxxxxxxxx
// posenet stuff taken from https://github.com/ml5js/ml5-examples/blob/master/p5js/PoseNet/PoseNet_webcam/sketch.js
// (and simplified)
//(Note; this only works with ML5 v0.3.1 - later versions don't work)
// "use strict";
// Socket for communication with other running instances of the sketch
const socket = io.connect($OP.getEchoServerURL(951961));
let video; // the webcam video capture we will analyse (and maybe draw to screen)
let localPoses = []; // an array of {myHue, poseAndSkel:{pose, skeleton}} objects we will refresh frequently.
let remotePoses = {}; // a map of clientId to {myHue, poseAndSkel:{pose, skeleton}}
let myId;
let myHue;
let settings = {
shouldShowVideo: false,
shouldTransmitPoses: false,
shouldShowPartLabels: false,
shouldShowSillyEyes: true,
shouldShowKeypoints: false,
shouldFillFaceOval: false,
shouldShowHelp: false,
shouldDetectMultiplePoses: true
}
const confidences = {
forMouth: 0.3,
forTorso: 0.2,
forGeneralKeypoint: 0.4,
forPose: 0.3,
forEarsForFace: 0.1
};
function setup() {
//TODO: make a UUID or better still, have socket.io provide this
myId = floor(random(9999999999));
myHue = random(360);
createCanvas(640, 480);
video = createCapture(VIDEO);
video.size(width, height);
video.hide();
setupPoseNet(video, handleDetectedPoses);
socket.on('remotePose', storeRemotePose);
}
function draw() {
if (settings.shouldShowVideo) {
//draw the video image in the background
image(video, 0, 0, width, height);
} else {
colorMode(RGB);
background(100);
}
drawPosesAndSkeletons(localPoses);
drawPosesAndSkeletons(Object.values(remotePoses));
drawDebug();
if (settings.shouldShowHelp || frameCount < 30) {
drawHelp();
}
}
function handleDetectedPoses(results) {
//decorate the results with a hue to indicate local pose(s)
results.forEach((obj, ix) => obj.hue = (ix * 60) % 360);
// and store them for later drawing, etc.
// (if we have at least some confidence in them)
localPoses = results.filter(({
pose
}) => pose.score > confidences.forPose);
if (localPoses.length > 0 && settings.shouldTransmitPoses) {
socket.emit("remotePose", myId, myHue, localPoses[0]);
}
}
function storeRemotePose(id, theHue, {
pose,
skeleton
}) {
remotePoses[id] = {
pose,
skeleton,
hue: theHue
};
}
function mousePos() {
return createVector(mouseX, mouseY);
}
function toggleSetting(optionName) {
settings[optionName] = !settings[optionName];
}
const keyConfigs = [
["f", () => toggleSetting("shouldFillFaceOval"), "fill face"],
["v", () => toggleSetting("shouldShowVideo"), "show original video"],
["l", () => toggleSetting("shouldShowPartLabels"), "show labels"],
["k", () => toggleSetting("shouldShowKeypoints"), "show keypoints"],
["e", () => toggleSetting("shouldShowSillyEyes"), "show silly eyes"],
["t", () => toggleSetting("shouldTransmitPoses"), "transmit via socket.io"],
["h", () => toggleSetting("shouldShowHelp"), "show help"],
//Can't do this easily - it'd need a restart.
// ["m", () => toggleSetting("shouldDetectMultiplePoses"), "detect multiple poses"],
];
function keyPressed() {
for (let [triggerKey, fn] of keyConfigs) {
if (key === triggerKey) {
fn();
break;
}
}
}
function drawHelp() {
push();
const helpText = keyConfigs.map(([k, fn, desc]) => `${k}: ${desc}`).join("\n");
textAlign(CENTER);
textSize(20);
noStroke();
fill('white');
text(helpText, width / 2, height / 2);
pop();
}
function drawDebug() {
fill('white');
noStroke();
textSize(16);
const lines = [
`Num poses detected: ${localPoses.length}`,
`Num remote poses stored: ${Object.keys(remotePoses).length}`
].join("\n");
text(lines, 100, height - 50);
if (settings.shouldTransmitPoses){
fill('red');
noStroke();
textSize(12);
text("transmitting", width-100, height-30);
}
}
var socket = io.connect($OP.getEchoServerURL(951961));
Learn more See an example