• fullscreen
  • MusicVisualizer.pde
  • _interfaces.pde
  • _iso.pde
  • _radar.pde
  • _vortex.pde
  •  //////////////////////////////////////////////////////////////
     //                                                          //
     //  Music Visualizer                                        //
     //                                                          //
     //  a quick sketch to do WimAmp-style music visualization   //
     //  using Processing and the Minim Library ...              //
     //                                                          //
     //  (c) Martin Schneider 2009                               //
     //                                                          //
    import ddf.minim.*;
    Minim minim;
    AudioPlayer groove;
    AudioRenderer radar, vortex, iso;
    AudioRenderer[] visuals; 
    int select;
    void setup()
      // setup graphics
      size(512, 512, P3D);
      // setup player
      minim = new Minim(this);
      groove = minim.loadFile("groove.mp3", 1024);
      // setup renderers
      vortex = new VortexRenderer(groove);
      radar = new RadarRenderer(groove);
      iso = new IsometricRenderer(groove);
      visuals = new AudioRenderer[] {radar, vortex,  iso};
      // activate first renderer in list
      select = 0;
    void draw()
    void keyPressed() {
       select %= visuals.length;
    void stop()
    /// abstract class for audio visualization
    abstract class AudioRenderer implements AudioListener {
      float[] left;
      float[] right;
      synchronized void samples(float[] samp) { left = samp; }
      synchronized void samples(float[] sampL, float[] sampR) { left = sampL; right = sampR; }
      abstract void setup();
      abstract void draw(); 
    // abstract class for FFT visualization
    import ddf.minim.analysis.*;
    abstract class FourierRenderer extends AudioRenderer {
      FFT fft; 
      float maxFFT;
      float[] leftFFT;
      float[] rightFFT;
      FourierRenderer(AudioSource source) {
        float gain = .125;
        fft = new FFT(source.bufferSize(), source.sampleRate());
        maxFFT =  source.sampleRate() / source.bufferSize() * gain;
      void calc(int bands) {
        if(left != null) {
          leftFFT = new float[bands];
          for(int i = 0; i < bands; i++) leftFFT[i] = fft.getAvg(i);   
    // the code for the isometric renderer was deliberately taken from Jared C.'s wavy sketch 
    // ( http://www.openprocessing.org/visuals/?visualID=5671 )
    class IsometricRenderer extends FourierRenderer {
      int r = 7;
      float squeeze = .5;
      float a, d;
      float val[];
      int n;
      IsometricRenderer(AudioSource source) {
        n = ceil(sqrt(2) * r);
        d = min(width,height) / r / 5;
        val = new float[n];
      void setup() { 
        colorMode(RGB, 6, 6, 6); 
      void draw() {
        if(left != null) {
          // actual values react with a delay
          for(int i=0; i<n; i++) val[i] = lerp(val[i], pow(leftFFT[i], squeeze), .1);
          a -= 0.08; 
          for (int x = -r; x <= r; x++) { 
            for (int z = -r; z <= r; z++) { 
              int y = int( height/3 * val[(int) dist(x,z,0,0)]); 
              float xm = x*d - d/2; 
              float xt = x*d + d/2; 
              float zm = z*d - d/2; 
              float zt = z*d + d/2; 
              int w0 = (int) width/2; 
              int h0 = (int) height * 2/3; 
              int isox1 = int(xm - zm + w0); 
              int isoy1 = int((xm + zm) * 0.5 + h0); 
              int isox2 = int(xm - zt + w0); 
              int isoy2 = int((xm + zt) * 0.5 + h0); 
              int isox3 = int(xt - zt + w0); 
              int isoy3 = int((xt + zt) * 0.5 + h0); 
              int isox4 = int(xt - zm + w0); 
              int isoy4 = int((xt + zm) * 0.5 + h0); 
              fill (2); 
              quad(isox2, isoy2-y, isox3, isoy3-y, isox3, isoy3+d, isox2, isoy2+d); 
              fill (4); 
              quad(isox3, isoy3-y, isox4, isoy4-y, isox4, isoy4+d, isox3, isoy3+d); 
              fill(4 + y / 2.0 / d); 
              quad(isox1, isoy1-y, isox2, isoy2-y, isox3, isoy3-y, isox4, isoy4-y); 
    class RadarRenderer extends AudioRenderer {
      float aura = .25;
      float orbit = .25;
      int delay = 2;
      int rotations;
      RadarRenderer(AudioSource source) {
        rotations =  (int) source.sampleRate() / source.bufferSize();
      void setup() {
        colorMode(RGB, TWO_PI * rotations, 1, 1);
      synchronized void draw()
        if(left != null) {
          float t = map(millis(),0, delay * 1000, 0, PI);   
          int n = left.length;
          // center 
          float w = width/2 + cos(t) * width * orbit;
          float h = height/2 + sin(t) * height * orbit; 
          // size of the aura
          float w2 = width * aura, h2 = height * aura;
          // smoke effect
          if(frameCount % delay == 0 ) image(g,0,0, width+1, height+1); 
          // draw polar curve 
          float r1=0, a1=0, x1=0, y1=0, r2=0, a2=0, x2=0, y2=0; 
          for(int i=0; i <= n; i++)
            r1 = r2; a1 = a2; x1 = x2; y1 = y2;
            r2 = left[i % n] ;
            a2 = map(i,0, n, 0, TWO_PI * rotations);
            x2 = w + cos(a2) * r2 * w2;
            y2 = h + sin(a2) * r2 * h2;
            stroke(a1, 1, 1, 30);
            // strokeWeight(dist(x1,y1,x2,y2) / 4);
            if(i>0) line(x1, y1, x2, y2);
    class VortexRenderer extends FourierRenderer {
      int n = 48;
      float squeeze = .5;
      float val[];
      VortexRenderer(AudioSource source) {
        val = new float[n];
      void setup() {
        colorMode(HSB, n, n, n);
      synchronized void draw() {
        if(left != null) {  
          float t = map(millis(),0, 3000, 0, TWO_PI);
          float dx = width / n;
          float dy = height / n * .5;
          // rotate slowly
          background(0); lights();
          translate(width/2, height, -width/2);
          rotateY(-2.2 - HALF_PI + float(mouseY)/height * HALF_PI);
          // draw coloured slices
          for(int i=0; i < n; i++)
            val[i] = lerp(val[i], pow(leftFFT[i] * (i+1), squeeze), .1);
            float x = map(i, 0, n, height, 0);
            float y = map(val[i], 0, maxFFT, 0, width/2);
              translate(x, 0, 0);
              rotateX(PI/16 * i);
              fill(i, n * .7 + i * .3, n-i);
              box(dy, dx + y, dx + y);


    tweaks (0)

    about this sketch

    This sketch is running as Java applet, exported from Processing.



    Report Sketch

    Report for inappropriate content

    Please provide details if possible:

    Your have successfully reported the sketch. Thank you very much for helping to keep OpenProcessing clean and tidy :)

    Make a Copyright Infringement claim


    Music Visualizer

    Add to Faves Me Likey@! 105
    You must login/register to add this sketch to your favorites.

    Winamp-Style music visualization using the Minim Library.

    Hit any key to switch between visualizations ...

    Robin Gower
    13 Nov 2009
    Excellent. Any suggestions on how to adapt this to listen to the audio in instead? SuperCollider bridge?
    Jared Counts
    15 Nov 2009
    Fight With Dogma
    22 Nov 2009
    Ooo the isometric thingie is lovely.
    rob goldman
    5 Dec 2009
    Wow, this is great! So responsive :)
    John Nordstrom
    25 Apr 2010

    Really like all three options of this music visualizer, but I especially like the smoke effect of the radar option. So elegant and quick!

    Two questions: How did you discover that "g" is the image of the current graphics (at least when drawing in P2D or P3D)? I couldn't find this documented anywhere on the Processing site. Also, can you explain the big difference in how your smoke algorithm behaves in P3D (scattering outward) vs P2D (simply disappearing)?

    I'm asking you because when I search OpenProcessing for "image(g" you are only 1 of 4 people using this technique, and you've used it very creatively.


    11 May 2010
    Hi John!

    Adding a slightly scaled version of a slightly transparent image ontop itself is the key to the smoke effect.
    The differences between P2D and P3D are the result of different implementation details inside Processing...

    I discovered about the secret of "g" when trying to squeeze a piece of code into less than 200 chars.
    'g' is the variable that holds the current PImage of the sketch.
    You may also use get() which returns a copy of g rather than using g directly.

    The g-spot is hidden inside the processing source code for PApplet which is really just a wrapper around a regular Java applet, adding all kinds of processing magic. You can browse the source code online at http://dev.processing.org :
    There is a whole bunch of variables declared public, making them accessible to the processing environment, and 'g' is just one of them.
    11 May 2010
    Now how does the smoke effect work?

    Usually image(get(), 0, 0, width, height) would add an exact copy of the image ontop of itself.
    But using other values for width and height will scale your image before mapping it back onto itself.

    And this is where I make use of a built-in Processing-bug :)
    Internally Processing does all image scaling using the blend( ..., REPLACE) function, and
    there seems to be a bug inside the bilinear filtering, since all color-values - including alpha -
    will always be reduced by exactly 1.

    I'm pretty sure this behaviour is not intended and may be fixed in future versions of Processing, but as as a very nice side effect, we get an image that has an alpha of 254, and is slightly transparent.

    When using P2D or P3D with smooth() all colors will slightly darken at every frame and will fade to black over time. (This effect does not occur when using P3D in noSmooth mode.)

    While P2D copies the image into the target image, P3D really displays it as a textured rectangle in space.There are tiny differences in the rendering results, which don't matter when the effect is only applied once, but which do have an exponential effect when applied inside the image feedback loop.

    If you want to achieve a consistent smoke effect across P2D & P3D, smooth & noSmooth
    you should rely on 2d image operations only.
    11 May 2010
    Like this:

    void setup() {

    void draw() {
    // get smokey
    blend(g, 1, 1, width-1, height-1, 0, 0, width, height, REPLACE);
    // brighten up
    loadPixels(); for(int i=0; i&lt;pixels.length; i++) pixels[i] += 0x010101;
    jeremy a
    31 Jan 2011
    Great work! hope you don't mind if i use your code/architecture for mixing in audio and visual params.

    If anyone wants to replace the song with input for a mic, you can just replace..
    groove = minim.loadFile("groove.mp3.mp3", 1024);
    groove = minim.getLineIn(Minim.STEREO, 512);

    and replace AudioPlayer groove to AudioInput groove.

    I'm going to add some more visual classes to this. will share soon!

    1 Feb 2011
    Of course I don't mind. It's all free and open source!
    Also, very good idea to point out how to use it for live visuals :)
    6 Jul 2011
    Great visualization:) But I have a question: how I could apply similar graphic effect to a solid sphere for example?
    thiiird Musique
    17 Sep 2011
    Very nice piece! Really useful too, I've been working on implementing a few of the ideas presented here for some audio reactivity... but I was wondering, how did you come up with lerp(val[i], pow(leftFFT[i], squeeze), .1)? It's definitely a lot better for getting data than just leftFFT[i] I see, and I just wanted to know how and what it's based off of for further dissection and customizing.

    Also something else that didn't click with me, the val[] array apparently is null before the line val[i] = lerp(val[i], pow(leftFFT[i] * (i+1), squeeze), .1);. so how would that work the first time running through draw(), if it's setting val using a null? I'm sure I'm just missing something small..

    Suhui Hee
    15 Nov 2011
    I'm a total noob: does anyone know how to process both audio line in and audio from an mp3 at the same time such that one overlays another?
    totally awesome. better than fireworks.
    dan smith
    12 Feb 2012
    cannot find a class or type named "AudioRenderer" --- am I falling at the first hurdle here?
    17 May 2012
    Hey, Amazing work!
    But apart from this, can i somehow take live input? As in midi, or audio from like a guitar or a mic? Can that be done? If yes, how?
    Vincent Doorman
    13 Mar 2014
    Can't get any of these visualisers to work what am I doing wrong
    13 Mar 2014
    Hi Vincent. You need to adapt the sketch a little to work with the current version of Processing (2.1.1).

    - Processing 2 ignores files that start with underscores, so that's basically all of the visualizers.
    - Minim is now part of the Processing core libraries. The new version of Minim that comes with Processing 2.1.1 may interfere with the old version of Minim included inside the code folder of this sketch.

    I updated the Music Visualizer for Processing 2.1.1, and uploaded it to github:

    miyo won
    5 Jan 2015
    cannot find a class or type named "AudioRenderer" ::: where can I find it? there are no file named "AudioRenderer"
    You need to login/register to comment.