createCanvas(windowWidth, windowHeight, WEBGL)
fbo = createFramebuffer()
capture = createCapture({
depthFbo = createFramebuffer({ width: capture.width, height: capture.height })
camFbo = createFramebuffer({ width: capture.width, height: capture.height })
const model = depthEstimation.SupportedModels.ARPortraitDepth;
estimator = await depthEstimation.createEstimator(model);
config = { minDepth: 0.1, maxDepth: 0.99 }
depthShader = baseMaterialShader().modify({
'sampler2D depthMap': () => depthFbo,
'vec2 size': () => [camFbo.width, camFbo.height],
'Inputs getPixelInputs': `(Inputs inputs) {
float depth = texture(depthMap, inputs.texCoord).r;
for (int i = 0; i < 12; i++) {
float a = float(i)/12. * ${TWO_PI};
vec2 coord = inputs.texCoord + 8.*vec2(cos(a), sin(a))/size;
if (texture(depthMap, coord).r < 0.1) {
function mouseClicked() {
fbo.draw(() => clearDepth())
if (!task && estimator) {
if (!capture.elt.width || !capture.elt.height) return
image(capture, 0, 0, camFbo.width, camFbo.height)
task = estimator.estimateDepth(capture.elt, config)
.then((depthMap) => depthMap.toCanvasImageSource())
depthCnv = new p5.Element(img)
depthCnv.width = img.width
depthCnv.height = img.height
image(depthCnv, 0, 0, depthFbo.width, depthFbo.height)
scale(max(width/camFbo.width, height/camFbo.height))
plane(camFbo.width, camFbo.height)