Oh, that naughty sketch! Please let us know what the issue is below.
Apply Template
Applying this template will reset your sketch and remove all your changes. Are you sure you would like to continue?
Report Sketch
Report Comment
Please confirm that you would like to report the comment below.
We will review your submission and take any actions necessary per our Community Guidelines. In addition to reporting this comment, you can also block the user to prevent any future interactions.
Please report comments only when necessary. Unnecessary or abusive use of this tool may result in your own account being suspended.
Are you sure you want to delete your sketch?
Any files uploaded will be deleted as well.
Forks of this sketch will become the forks of "ml5-RockPaperScissors-2024".
Delete Comment?
This will also delete all the replies to this comment.
Delete this tab? Any code in it will be deleted as well.
Select a collection to submit your sketch
We Need Your Support
Since 2008, OpenProcessing has provided tools for creative coders to learn, create, and share over a million open source projects in a friendly environment.
Niche websites like ours need your continued support for future development and maintenance, while keeping it an ad-free platform that respects your data and privacy!
Please consider subscribing below to show your support with a "Plus" badge on your profile and get access to many other features!
A fork of ml5-RockPaperScissors-2024 by Golan Levin
CC Attribution NonCommercial ShareAlike
ml5-BodyPoseRecog-2024
Levin
xxxxxxxxxx
// Body Pose Recognition using bodyPose & neural network
// Requires: https://unpkg.com/ml5@1/dist/ml5.js
// Documentation: https://docs.ml5js.org/#/reference/handpose
// Documentation: https://docs.ml5js.org/#/reference/neural-network
// See: https://openprocessing.org/sketch/2417039
let video;
let bodyPose;
let poses = [];
let connections;
let bodyPoseOptions = { maxPoses: 1, flipHorizontal: true, runtime: "mediapipe"};
let brain; // the neural network
// Interface
let dataLabelDropdown;
let dataButton;
let trainButton;
const N_LANDMARKS = 33;
const CATEGORIES = ["T-Pose", "SexyPose", "NoPose"];
let sampleCounts = [0, 0, 0];
let bTrainingCompleted = false;
let theResults;
//-----------------------------------------------
function preload() {
// Load the handPose model.
// https://docs.ml5js.org/#/reference/bodypose?id=ml5bodypose
console.log('ml5 version:', ml5.version);
ml5.setBackend("webgl");
bodyPose = ml5.bodyPose("BlazePose", bodyPoseOptions);
}
//==================================================
// Callback function for when bodyPose outputs data
function gotPoses(results) {
// Save the output to the poses variable
poses = results;
}
//-----------------------------------------------
function setup() {
let myCanvas = createCanvas(640, 480);
myCanvas.position(0, 0);
// Create the webcam video and hide it
video = createCapture(VIDEO);
video.size(640, 480);
video.hide();
// Start detecting poses in the webcam video
bodyPose.detectStart(video, gotPoses);
// Make some buttons and UI elements
dataLabelDropdown = createSelect();
for (var i=0; i<CATEGORIES.length; i++){
dataLabelDropdown.option(CATEGORIES[i]);
}
dataLabelDropdown.position(300, 35);
dataLabelDropdown.size(100,40);
dataButton = createButton('Add example');
dataButton.mousePressed(addExampleFunction);
dataButton.position(420, 35);
dataButton.size(80,40);
trainButton = createButton('Train model');
trainButton.mousePressed(trainModelFunction);
trainButton.position(520, 35);
trainButton.size(80,40);
// Create the neural network model.
let neuralNetworkOptions = {
inputs: (N_LANDMARKS*2),
outputs: CATEGORIES.length,
task: 'classification',
debug: true
}
brain = ml5.neuralNetwork(neuralNetworkOptions);
}
//-----------------------------------------------
function draw() {
background(255);
drawVideo();
drawPoses();
if (bTrainingCompleted) {
classify();
}
drawResults();
}
//-----------------------------------------------
function drawVideo(){
// Draw the webcam video
push();
if (bodyPoseOptions.flipHorizontal){
translate(width,0);
scale(-1,1);
}
let transparency = 50; // reduce this to make video transparent
tint(255,255,255,transparency);
image(video, 0, 0, width, height);
pop();
}
//-----------------------------------------------
function drawPoses(){
// Draw all the tracked landmark points
if (poses.length > 0) {
let pose = poses[0];
for (let j = 0; j < pose.keypoints.length; j++) {
let keypoint = pose.keypoints[j];
// Only draw a circle if the keypoint's confidence is bigger than 0.1
if (keypoint.confidence > 0.1) {
fill('red');
stroke('black');
circle(keypoint.x, keypoint.y, 10);
}
}
}
}
//------------------------------------------
function drawResults(){
noStroke();
fill('lightgray');
rect(0,0,width,110);
textSize(14);
if (bTrainingCompleted){
if (theResults && (theResults.length > 0)){
for (var j=0; j<CATEGORIES.length; j++){
var jthCategory = CATEGORIES[j];
for (var i=0; i<theResults.length; i++){
var ithLabel = theResults[i].label;
if (ithLabel === jthCategory){
var ithConfidence = theResults[i].confidence;
var str = ithLabel + ": ";
str += nf(ithConfidence,1,2);
fill('black');
noStroke();
text(str, 120, 25+j*30);
stroke('black');
fill('white');
rect(10,10+j*30, 100,20);
fill('darkgray');
rect(10,10+j*30, 100*ithConfidence,20);
}
}
}
}
} else {
for (var j=0; j<CATEGORIES.length; j++){
var str = CATEGORIES[j] + " samples: " + sampleCounts[j];
fill('black');
noStroke();
text(str, 10, 25+j*30);
}
}
}
//------------------------------------------
function getLandmarks(){
// Reformat the landmarks, and center at an origin!
// Instead of an array of keypoints[21][2]
// the neural net wants an interleaved array, landmarks[21*2]
if (poses.length > 0){
let pose0 = poses[0];
// compute centroid of pose
let cx = 0;
let cy = 0;
for (let i=0; i<N_LANDMARKS; i++) {
let keypoint = pose0.keypoints[i];
cx += keypoint.x;
cy += keypoint.y;
}
cx /= N_LANDMARKS;
cy /= N_LANDMARKS;
// return pose data, centered on centroid.
// This makes it work everywhere the hand appears.
const landmarkData = [];
for (var i=0; i<N_LANDMARKS; i++) {
var px = pose0.keypoints[i].x;
var py = pose0.keypoints[i].y;
landmarkData.push( px - cx );
landmarkData.push( py - cy );
}
return landmarkData;
}
return null;
}
//------------------------------------------
// Add a training example
function addExampleFunction() {
let inputs = getLandmarks();
if (inputs && inputs.length > 0) {
// Associate the current data with the current label
let currentLabel = dataLabelDropdown.value();
brain.addData(inputs, [currentLabel]);
for (var i=0; i<CATEGORIES.length; i++){
if (currentLabel === CATEGORIES[i]){
sampleCounts[i]++;
}
}
}
}
//------------------------------------------
// Train the model
function trainModelFunction() {
let bDataExistsForAllCategories = true;
for (var i=0; i<CATEGORIES.length; i++){
if (sampleCounts[i] === 0){
bDataExistsForAllCategories = false;
}
}
let brainData = brain.neuralNetworkData.data.raw;
if ((brainData.length > 0) || bDataExistsForAllCategories){
brain.normalizeData();
let trainingOptions = {
epochs: 32,
batchSize: 12
}
brain.train(trainingOptions, finishedTrainingCallback);
} else {
print("Make sure data exists for all categories");
}
}
//------------------------------------------
// Begin prediction
function finishedTrainingCallback() {
bTrainingCompleted = true;
print("Finished Training");
}
//------------------------------------------
function classify() {
if (bTrainingCompleted){
let inputs = getLandmarks();
if (inputs && inputs.length > 0) {
brain.classify(inputs, gotResultsCallback);
}
}
}
//------------------------------------------
function keyPressed(){
if (key == 's'){
brain.saveData("myTrainingData.json");
} else if (key == 'l'){
brain.loadData("body-pose-data.json");
}
}
//------------------------------------------
function gotResultsCallback(results, error) {
if (error) {
console.error(error);
return;
}
if (results){
theResults = results;
// print(results[0].label);
}
}
See More Shortcuts
Please verify your email to comment
Verify Email