Spaces:
Runtime error
Runtime error
Update tests
Browse files- public/index.html +25 -20
- public/sketch4.js +59 -0
- public/tests/index.js +73 -0
- public/tests/index_old.html +33 -0
- public/tests/index_posenet.html +26 -0
- public/tests/index_segmegtation.html +26 -0
- public/{sketch.js → tests/sketch.js} +0 -0
- public/tests/sketch2.js +118 -0
- public/tests/sketch3.js +99 -0
public/index.html
CHANGED
@@ -1,33 +1,38 @@
|
|
1 |
-
|
2 |
-
|
|
|
|
|
3 |
<link rel="stylesheet" href="https://cdn.jsdelivr.net/gh/mvoloskov/potion/potion.min.css">
|
4 |
-
|
5 |
<style>
|
6 |
body {
|
7 |
background: #0b0f19;
|
8 |
color: white;
|
9 |
}
|
10 |
</style>
|
11 |
-
|
12 |
-
|
13 |
-
|
|
|
|
|
14 |
<header>
|
15 |
-
|
16 |
-
|
17 |
-
|
18 |
-
|
19 |
</header>
|
20 |
|
21 |
-
<script src="sketch.js"></script>
|
22 |
-
<!--
|
23 |
-
<p>Blur Size: <input type="range" id="blurSize" value="30"></p>
|
24 |
-
<p>Low Threshold: <input type="range" id="lowThreshold" value="15"></p>
|
25 |
-
<p>High Threshold: <input type="range" id="highThreshold" value="25"></p>
|
26 |
-
-->
|
27 |
<div>
|
28 |
-
|
29 |
-
|
30 |
-
|
31 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
32 |
|
33 |
</html>
|
|
|
1 |
+
<!DOCTYPE html>
|
2 |
+
|
3 |
+
<head>
|
4 |
+
<title>Docker Tensorflow</title>
|
5 |
<link rel="stylesheet" href="https://cdn.jsdelivr.net/gh/mvoloskov/potion/potion.min.css">
|
6 |
+
|
7 |
<style>
|
8 |
body {
|
9 |
background: #0b0f19;
|
10 |
color: white;
|
11 |
}
|
12 |
</style>
|
13 |
+
<!-- Load p5.js -->
|
14 |
+
<script src="https://cdn.jsdelivr.net/npm/p5@1.4.0/lib/p5.js"></script>
|
15 |
+
</head>
|
16 |
+
|
17 |
+
<body>
|
18 |
<header>
|
19 |
+
<h1>Docker Opencv APP</h1>
|
20 |
+
<p>A App developed for apply IA in streaming video in JavaScript</p>
|
21 |
+
<p>This is a work in progress project that want to work with P5.js, OpenCV, Tensorflow.js, Node.js and Docker</p>
|
22 |
+
<p>Right now, only apply a halftone over the webcam video </p> <a href="tests/index_old.html">Halftone</a>
|
23 |
</header>
|
24 |
|
|
|
|
|
|
|
|
|
|
|
|
|
25 |
<div>
|
26 |
+
|
27 |
+
<canvas id="canvas" width="640" height="480"></canvas>
|
28 |
+
<main></main>
|
29 |
+
</div>
|
30 |
+
|
31 |
+
<script src="https://unpkg.com/@tensorflow/tfjs-core@3.3.0/dist/tf-core.js"></script>
|
32 |
+
<script src="https://unpkg.com/@tensorflow/tfjs-converter@3.3.0/dist/tf-converter.js"></script>
|
33 |
+
<script src="https://unpkg.com/@tensorflow/tfjs-backend-webgl@3.3.0/dist/tf-backend-webgl.js"></script>
|
34 |
+
<script src="https://unpkg.com/@tensorflow-models/deeplab@0.2.1/dist/deeplab.js"></script>
|
35 |
+
<script src="sketch4.js" defer></script>
|
36 |
+
</body>
|
37 |
|
38 |
</html>
|
public/sketch4.js
ADDED
@@ -0,0 +1,59 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
|
2 |
+
let capture;
|
3 |
+
let font;
|
4 |
+
|
5 |
+
let segmenter;
|
6 |
+
let people;
|
7 |
+
let model;
|
8 |
+
const canvas = document.getElementById('canvas');
|
9 |
+
let ctx = canvas.getContext('2d');
|
10 |
+
|
11 |
+
function setup() {
|
12 |
+
|
13 |
+
createCanvas(640, 480);
|
14 |
+
|
15 |
+
capture = createCapture(VIDEO, initModel);
|
16 |
+
|
17 |
+
capture.hide();
|
18 |
+
background(255);
|
19 |
+
|
20 |
+
}
|
21 |
+
|
22 |
+
async function initModel() {
|
23 |
+
model = await deeplab.load({ "base": 'pascal', "quantizationBytes": 2 });
|
24 |
+
|
25 |
+
}
|
26 |
+
|
27 |
+
async function predict() {
|
28 |
+
console.log('ok')
|
29 |
+
segmenter = await model.segment(capture.elt);
|
30 |
+
renderPrediction(segmenter);
|
31 |
+
|
32 |
+
}
|
33 |
+
|
34 |
+
function renderPrediction(prediction) {
|
35 |
+
const { legend, height, width, segmentationMap } = prediction;
|
36 |
+
//console.log(`prediction: ${JSON.stringify(prediction)}`);
|
37 |
+
|
38 |
+
let segmentationMapData = new ImageData(segmentationMap, width, height);
|
39 |
+
//console.log(segmentationMapData)
|
40 |
+
ctx.putImageData(segmentationMapData, 0, 0);
|
41 |
+
|
42 |
+
}
|
43 |
+
|
44 |
+
function draw() {
|
45 |
+
background(0,1);
|
46 |
+
|
47 |
+
if (model) {
|
48 |
+
predict();
|
49 |
+
console.log('ok')
|
50 |
+
}
|
51 |
+
|
52 |
+
}
|
53 |
+
|
54 |
+
|
55 |
+
|
56 |
+
|
57 |
+
function keyPressed() {
|
58 |
+
noLoop();
|
59 |
+
}
|
public/tests/index.js
ADDED
@@ -0,0 +1,73 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
let model;
|
2 |
+
const chooseFiles = document.getElementById('chooseFiles');
|
3 |
+
const modelNameSelect = document.getElementById("modelNameSelect");
|
4 |
+
const segmentImageButton = document.getElementById("segmentImage");
|
5 |
+
const legendsDiv = document.getElementById("legends");
|
6 |
+
const image = document.getElementById('image');
|
7 |
+
const canvas = document.getElementById('canvas');
|
8 |
+
const ctx = canvas.getContext('2d');
|
9 |
+
|
10 |
+
chooseFiles.onchange = () => {
|
11 |
+
const [file] = chooseFiles.files
|
12 |
+
if (file) {
|
13 |
+
image.src = URL.createObjectURL(file);
|
14 |
+
}
|
15 |
+
};
|
16 |
+
|
17 |
+
segmentImageButton.onclick = predict;
|
18 |
+
|
19 |
+
document.getElementById("loadModel").onclick = async () => {
|
20 |
+
segmentImageButton.disabled = true;
|
21 |
+
updateModelLoadStatus("Model Loading...");
|
22 |
+
|
23 |
+
const modelName = modelNameSelect.options[modelNameSelect.selectedIndex].value;
|
24 |
+
await loadModel(modelName);
|
25 |
+
updateModelLoadStatus(modelName + " model loaded!");
|
26 |
+
|
27 |
+
segmentImageButton.disabled = false;
|
28 |
+
};
|
29 |
+
|
30 |
+
async function loadModel(modelName) {
|
31 |
+
model = await deeplab.load({ "base": modelName, "quantizationBytes": 2 });
|
32 |
+
}
|
33 |
+
|
34 |
+
function updateModelLoadStatus(status) {
|
35 |
+
document.getElementById("modelLoadedStatus").innerHTML = status;
|
36 |
+
}
|
37 |
+
|
38 |
+
async function predict() {
|
39 |
+
let prediction = await model.segment(image);
|
40 |
+
renderPrediction(prediction);
|
41 |
+
}
|
42 |
+
|
43 |
+
function renderPrediction(prediction) {
|
44 |
+
const { legend, height, width, segmentationMap } = prediction;
|
45 |
+
//console.log(`prediction: ${JSON.stringify(prediction)}`);
|
46 |
+
|
47 |
+
const segmentationMapData = new ImageData(segmentationMap, width, height);
|
48 |
+
console.log(segmentationMapData)
|
49 |
+
canvas.width = width;
|
50 |
+
canvas.height = height;
|
51 |
+
ctx.putImageData(segmentationMapData, 0, 0);
|
52 |
+
|
53 |
+
displayLegends(legend);
|
54 |
+
}
|
55 |
+
|
56 |
+
|
57 |
+
function displayLegends(legendObj) {
|
58 |
+
legendsDiv.innerHTML = "";
|
59 |
+
document.getElementById("legendLabel").style.visibility = "visible";
|
60 |
+
|
61 |
+
Object.keys(legendObj).forEach((legend) => {
|
62 |
+
const [red, green, blue] = legendObj[legend];
|
63 |
+
|
64 |
+
const span = document.createElement('span');
|
65 |
+
span.innerHTML = legend;
|
66 |
+
span.style.backgroundColor = `rgb(${red}, ${green}, ${blue})`;
|
67 |
+
span.style.padding = '10px';
|
68 |
+
span.style.marginRight = '10px';
|
69 |
+
span.style.color = '#ffffff';
|
70 |
+
|
71 |
+
legendsDiv.appendChild(span);
|
72 |
+
});
|
73 |
+
}
|
public/tests/index_old.html
ADDED
@@ -0,0 +1,33 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
<html>
|
2 |
+
<head>
|
3 |
+
<link rel="stylesheet" href="https://cdn.jsdelivr.net/gh/mvoloskov/potion/potion.min.css">
|
4 |
+
|
5 |
+
<style>
|
6 |
+
body {
|
7 |
+
background: #0b0f19;
|
8 |
+
color: white;
|
9 |
+
}
|
10 |
+
</style>
|
11 |
+
<script src="https://cdn.jsdelivr.net/npm/p5@1.8.0/lib/p5.js"></script>
|
12 |
+
</head>
|
13 |
+
<body>
|
14 |
+
<header>
|
15 |
+
<h1>Docker Opencv APP</h1>
|
16 |
+
<p>A App developed for apply IA in streaming video in JavaScript</p>
|
17 |
+
<p>This is a work in progress project that want to work with P5.js, OpenCV, Tensorflow.js, Node.js and Docker</p>
|
18 |
+
<p>Right now, only apply a halftone over the webcam video</p>
|
19 |
+
</header>
|
20 |
+
|
21 |
+
<script src="sketch.js"></script>
|
22 |
+
<!--
|
23 |
+
<p>Blur Size: <input type="range" id="blurSize" value="30"></p>
|
24 |
+
<p>Low Threshold: <input type="range" id="lowThreshold" value="15"></p>
|
25 |
+
<p>High Threshold: <input type="range" id="highThreshold" value="25"></p>
|
26 |
+
-->
|
27 |
+
<div>
|
28 |
+
<main>
|
29 |
+
</main>
|
30 |
+
</div>
|
31 |
+
</body>
|
32 |
+
|
33 |
+
</html>
|
public/tests/index_posenet.html
ADDED
@@ -0,0 +1,26 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
<!DOCTYPE html>
|
2 |
+
<html>
|
3 |
+
<head>
|
4 |
+
<meta charset="UTF-8">
|
5 |
+
<meta name="viewport" content="width=device-width, user-scalable=no, minimum-scale=1.0, maximum-scale=1.0, initial-scale=1">
|
6 |
+
|
7 |
+
<title>3D Pose Estimation Sample by BlazePose & p5.js</title>
|
8 |
+
<!-- Load p5.js -->
|
9 |
+
<script src="https://cdn.jsdelivr.net/npm/p5@1.4.0/lib/p5.js"></script>
|
10 |
+
<!-- Load MediaPipe Pose Using TensorFlow.js runtime -->
|
11 |
+
<script src="https://cdn.jsdelivr.net/npm/@tensorflow/tfjs-core"></script>
|
12 |
+
<script src="https://cdn.jsdelivr.net/npm/@tensorflow/tfjs-converter"></script>
|
13 |
+
<script src="https://cdn.jsdelivr.net/npm/@tensorflow/tfjs-backend-webgl"></script>
|
14 |
+
<script src="https://cdn.jsdelivr.net/npm/@tensorflow-models/pose-detection"></script>
|
15 |
+
<script src="https://cdn.jsdelivr.net/npm/@mediapipe/pose"></script>
|
16 |
+
<script src="https://cdn.jsdelivr.net/npm/@tensorflow-models/pose-detection"></script>
|
17 |
+
<!-- Load MediaPipe Pose Using MediaPipe runtime -->
|
18 |
+
<!-- <script src="https://cdn.jsdelivr.net/npm/@mediapipe/pose"></script>
|
19 |
+
<script src="https://cdn.jsdelivr.net/npm/@tensorflow-models/pose-detection"></script> -->
|
20 |
+
<!-- My Sketch -->
|
21 |
+
</head>
|
22 |
+
<body>
|
23 |
+
|
24 |
+
<script src="sketch2.js"></script>
|
25 |
+
</body>
|
26 |
+
</html>
|
public/tests/index_segmegtation.html
ADDED
@@ -0,0 +1,26 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
<!DOCTYPE html>
|
2 |
+
<html>
|
3 |
+
<head>
|
4 |
+
<meta charset="UTF-8">
|
5 |
+
<meta name="viewport" content="width=device-width, user-scalable=no, minimum-scale=1.0, maximum-scale=1.0, initial-scale=1">
|
6 |
+
|
7 |
+
<title>3D Pose Estimation Sample by BlazePose & p5.js</title>
|
8 |
+
<!-- Load p5.js -->
|
9 |
+
<script src="https://cdn.jsdelivr.net/npm/p5@1.4.0/lib/p5.js"></script>
|
10 |
+
<!-- Load MediaPipe Pose Using TensorFlow.js runtime -->
|
11 |
+
<script src="https://cdn.jsdelivr.net/npm/@tensorflow/tfjs-core"></script>
|
12 |
+
<script src="https://cdn.jsdelivr.net/npm/@tensorflow/tfjs-converter"></script>
|
13 |
+
<script src="https://cdn.jsdelivr.net/npm/@tensorflow/tfjs-backend-webgl"></script>
|
14 |
+
<script src="https://cdn.jsdelivr.net/npm/@tensorflow-models/body-segmentation"></script>
|
15 |
+
|
16 |
+
<!-- Optional: Include below scripts if you want to use TensorFlow.js runtime.
|
17 |
+
<script src="https://cdn.jsdelivr.net/npm/@tensorflow/tfjs-converter"></script>-->
|
18 |
+
|
19 |
+
<!-- Optional: Include below scripts if you want to use MediaPipe runtime.
|
20 |
+
<script src="https://cdn.jsdelivr.net/npm/@mediapipe/selfie_segmentation"></script>-->
|
21 |
+
</head>
|
22 |
+
<body>
|
23 |
+
<canvas id="myCanvas" width="640" height="480" style="border:1px solid #000000;"></canvas>
|
24 |
+
<script src="sketch3.js"></script>
|
25 |
+
</body>
|
26 |
+
</html>
|
public/{sketch.js → tests/sketch.js}
RENAMED
File without changes
|
public/tests/sketch2.js
ADDED
@@ -0,0 +1,118 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
// // Import TFJS runtime with side effects.
|
2 |
+
// import '@tensorflow/tfjs-backend-webgl';
|
3 |
+
// import * as poseDetection from '@tensorflow-models/pose-detection';
|
4 |
+
// // import 'https://cdn.jsdelivr.net/npm/@tensorflow/tfjs-backend-webgl';
|
5 |
+
// // import * as poseDetection from 'https://cdn.jsdelivr.net/npm/@tensorflow-models/pose-detection';
|
6 |
+
|
7 |
+
/*
|
8 |
+
=============
|
9 |
+
Params
|
10 |
+
=============
|
11 |
+
*/
|
12 |
+
|
13 |
+
let capture;
|
14 |
+
let font;
|
15 |
+
|
16 |
+
let detector;
|
17 |
+
let poses;
|
18 |
+
|
19 |
+
let angle = 0.0;
|
20 |
+
|
21 |
+
/*
|
22 |
+
=============
|
23 |
+
Function
|
24 |
+
=============
|
25 |
+
*/
|
26 |
+
|
27 |
+
function preload() {
|
28 |
+
|
29 |
+
}
|
30 |
+
|
31 |
+
function setup() {
|
32 |
+
|
33 |
+
createCanvas(640, 480);
|
34 |
+
|
35 |
+
capture = createCapture(VIDEO, captureLoaded);
|
36 |
+
|
37 |
+
background(255);
|
38 |
+
|
39 |
+
}
|
40 |
+
|
41 |
+
function captureLoaded() {
|
42 |
+
console.log("capture loaded...");
|
43 |
+
initModel();
|
44 |
+
}
|
45 |
+
|
46 |
+
async function initModel() {
|
47 |
+
const _model = poseDetection.SupportedModels.MoveNet;
|
48 |
+
console.log("model:", _model);
|
49 |
+
const detectorConfig = {
|
50 |
+
runtime: "tfjs", // 'mediapipe', 'tfjs'
|
51 |
+
//modelType: "lite", // 'lite', 'full', 'heavy'
|
52 |
+
};
|
53 |
+
detector = await poseDetection.createDetector(_model, detectorConfig);
|
54 |
+
}
|
55 |
+
|
56 |
+
async function getPose() {
|
57 |
+
poses = await detector.estimatePoses(capture.elt);
|
58 |
+
}
|
59 |
+
|
60 |
+
function draw() {
|
61 |
+
background(0,1);
|
62 |
+
if (detector) {
|
63 |
+
getPose();
|
64 |
+
}
|
65 |
+
drawPoseInfo();
|
66 |
+
}
|
67 |
+
|
68 |
+
|
69 |
+
function drawPoseInfo() {
|
70 |
+
noStroke();
|
71 |
+
fill(255, 0, 0, 128);
|
72 |
+
|
73 |
+
|
74 |
+
if (poses && poses.length > 0) {
|
75 |
+
|
76 |
+
for (var i = 0; i < poses.length; i++) {
|
77 |
+
for (var j = 0; j<poses[i].keypoints.length; j++) {
|
78 |
+
if (poses[i].keypoints[j].score > 0.1) {
|
79 |
+
let posX = width-int(poses[i].keypoints[j].x);
|
80 |
+
let posY = height-int(poses[i].keypoints[j].y);
|
81 |
+
|
82 |
+
//circle(posX, posY, 10);
|
83 |
+
|
84 |
+
}
|
85 |
+
}
|
86 |
+
}
|
87 |
+
|
88 |
+
|
89 |
+
stroke(255);
|
90 |
+
if (poses.length > 0) {
|
91 |
+
|
92 |
+
let l = poses[0].keypoints.length;
|
93 |
+
|
94 |
+
for (var j = 4; j<l; j++) {
|
95 |
+
for (var i = j+1; i<l; i++) {
|
96 |
+
let d = dist(int(poses[0].keypoints[i].x),
|
97 |
+
int(poses[0].keypoints[i].y),
|
98 |
+
int(poses[0].keypoints[j].x),
|
99 |
+
int(poses[0].keypoints[j].y));
|
100 |
+
|
101 |
+
if(d > 150){
|
102 |
+
line( int(poses[0].keypoints[i].x),
|
103 |
+
int(poses[0].keypoints[i].y),
|
104 |
+
int(poses[0].keypoints[j].x),
|
105 |
+
int(poses[0].keypoints[j].y));
|
106 |
+
}
|
107 |
+
|
108 |
+
|
109 |
+
}
|
110 |
+
}
|
111 |
+
}
|
112 |
+
}
|
113 |
+
}
|
114 |
+
|
115 |
+
|
116 |
+
function keyPressed() {
|
117 |
+
noLoop();
|
118 |
+
}
|
public/tests/sketch3.js
ADDED
@@ -0,0 +1,99 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
|
2 |
+
let capture;
|
3 |
+
let font;
|
4 |
+
|
5 |
+
let segmenter;
|
6 |
+
let people;
|
7 |
+
|
8 |
+
function setup() {
|
9 |
+
|
10 |
+
createCanvas(640, 480);
|
11 |
+
|
12 |
+
capture = createCapture(VIDEO, captureLoaded);
|
13 |
+
|
14 |
+
background(255);
|
15 |
+
|
16 |
+
}
|
17 |
+
|
18 |
+
function captureLoaded() {
|
19 |
+
console.log("capture loaded...");
|
20 |
+
initModel();
|
21 |
+
}
|
22 |
+
|
23 |
+
async function initModel() {
|
24 |
+
const model = bodySegmentation.SupportedModels.MediaPipeSelfieSegmentation; // or 'BodyPix'
|
25 |
+
|
26 |
+
const segmenterConfig = {
|
27 |
+
runtime: 'tfjs', // or 'tfjs'
|
28 |
+
modelType: 'general' // or 'landscape'
|
29 |
+
};
|
30 |
+
|
31 |
+
segmenter = await bodySegmentation.createSegmenter(model, segmenterConfig);
|
32 |
+
|
33 |
+
|
34 |
+
}
|
35 |
+
|
36 |
+
async function getPose() {
|
37 |
+
people = await segmenter.segmentPeople(capture.elt);
|
38 |
+
}
|
39 |
+
|
40 |
+
function draw() {
|
41 |
+
background(0,1);
|
42 |
+
if (segmenter) {
|
43 |
+
getPose();
|
44 |
+
getPose2();
|
45 |
+
}
|
46 |
+
drawPoseInfo();
|
47 |
+
}
|
48 |
+
|
49 |
+
|
50 |
+
function drawPoseInfo() {
|
51 |
+
noStroke();
|
52 |
+
fill(255, 0, 0, 128);
|
53 |
+
|
54 |
+
if (people && people.length > 0) {
|
55 |
+
//let img = people[0].mask.toImageData().ImageData;
|
56 |
+
//console.log(img)
|
57 |
+
/*
|
58 |
+
for (var i = 0; i < people.length; i++) {
|
59 |
+
for (var j = 0; j<people[i].keypoints.length; j++) {
|
60 |
+
if (people[i].keypoints[j].score > 0.1) {
|
61 |
+
let posX = width-int(people[i].keypoints[j].x);
|
62 |
+
let posY = height-int(people[i].keypoints[j].y);
|
63 |
+
|
64 |
+
circle(posX, posY, 10);
|
65 |
+
|
66 |
+
}
|
67 |
+
}
|
68 |
+
}*/
|
69 |
+
|
70 |
+
|
71 |
+
}
|
72 |
+
}
|
73 |
+
|
74 |
+
|
75 |
+
function keyPressed() {
|
76 |
+
noLoop();
|
77 |
+
}
|
78 |
+
|
79 |
+
async function getPose2(){
|
80 |
+
|
81 |
+
const img = capture.elt;
|
82 |
+
|
83 |
+
|
84 |
+
const segmentation = await segmenter.segmentPeople(img);
|
85 |
+
|
86 |
+
// The mask image is an binary mask image with a 1 where there is a person and
|
87 |
+
// a 0 where there is not.
|
88 |
+
const coloredPartImage = await bodySegmentation.toBinaryMask(segmentation);
|
89 |
+
const opacity = 0.7;
|
90 |
+
const flipHorizontal = false;
|
91 |
+
const maskBlurAmount = 0;
|
92 |
+
const canvas = document.getElementById('canvas');
|
93 |
+
// Draw the mask image on top of the original image onto a canvas.
|
94 |
+
// The colored part image will be drawn semi-transparent, with an opacity of
|
95 |
+
// 0.7, allowing for the original image to be visible under.
|
96 |
+
bodySegmentation.drawMask(
|
97 |
+
canvas, img, coloredPartImage, opacity, maskBlurAmount,
|
98 |
+
flipHorizontal);
|
99 |
+
}
|