xxxxxxxxxx
// An animated drawing of a Neural Network
// Based on Daniel Shiffman's example in "The Nature of Code", http://natureofcode.com
// Modified by Blake Porter
// www.blakeporterneuro.com
// Creates a network of neurons using a specified number of layers of neurons, inputs, and axons
// Connection synapse weights are created randomly and adjust based on activity levels, showing long term potentiation for active neural pairs and long term depression for inactive synapses
// for button
int rectX, rectY;
int rectSize = 100;
color rectColor;
color rectHighlight;
boolean rectOver = false;
int layerFont = 30;
int neuronID = 0;
int numEC = 0;
int numDG = 0;
int numHPC = 0;
int numNeurons = numEC + numDG + numHPC;
int axons = 1; // the number of axons a neuron has
int currLayer = 0;
float randWMax = 0.001;
String input;
int baseDelay = 30;
// initialize
int startRand = 0;
int endRand = 0;
int lastMsec = 0;
int currMsec = 0;
Network network;
void setup() {
size(600, 400);
rectColor = color(0);
rectHighlight = color(5, 178, 255);
rectX = 10;
rectY = 8;
// Create the Network object
network = new Network(width/2, height/2);
}
void draw() {
background(255);
// AP generation box
update(mouseX, mouseY);
if (rectOver) {
currMsec = millis();
fill(rectHighlight);
if (currMsec > lastMsec + baseDelay) {
for (int i = 0; i < network.neurons.size(); i++) {
Neuron currN = network.neurons.get(i);
if (currN.layer == 0) {
currN.feedForward(50);
lastMsec = millis();
}
}
}
} else {
fill(rectColor);
}
stroke(255);
rect(rectX, rectY, rectSize, rectSize-40);
String APprompt = "Generate Input";
pushStyle();
fill(255, 255, 255);
textSize(20);
textAlign(CENTER);
text(APprompt, rectX, rectY, rectSize, rectSize);
popStyle();
// Current ayer info
textSize(layerFont);
String currLayerStr = str(currLayer);
String layerText = "Current layer: " + currLayerStr;
fill(0, 0, 0);
text(layerText, 20+rectSize, layerFont);
if (keyPressed) {
int keyVal = key - 48;
if (keyVal >= 0 && keyVal <= 9) {
currLayer = keyVal;
}
if (currLayer < 0){
currLayer = 0;
}
}
// Update and display the Network
network.update();
network.display();
} // end draw
void mouseReleased () {
float x = mouseX - width/2;
float y = mouseY - height/2;
Neuron n = new Neuron(x, y, neuronID, currLayer);
neuronID = neuronID + 1;
network.addNeuron(n);
if (network.neurons.size() > 1 && currLayer != 0) {
int[] possPost = {}; // possible postsynpatic neurons
//int currNeurons = network.neurons.size()-1;
for (int i = 0; i <= network.neurons.size()-1; i++) {
Neuron postN = network.neurons.get(i);
if (postN.layer == currLayer-1) {
possPost = append(possPost,postN.neuronNum);
//possPost.append(postN.neuronNum);
}
}
if (possPost.length > 0) {
float randPostf = random(0, possPost.length-1);
int randPosti = round(randPostf);
int randPostConnect = possPost[randPosti];
Neuron randN = network.neurons.get(randPostConnect);
int newConnection = randN.neuronNum;
n.addConnection(newConnection, random(0, randWMax));
randN.addPostSyn(neuronID-1);
float randPost2 = random(0, 1);
if (randPost2 <= 0.5) {
randPostf = random(0, possPost.length-1);
randPosti = round(randPostf);
randPostConnect = possPost[randPosti];
Neuron randN2 = network.neurons.get(randPostConnect);
newConnection = randN2.neuronNum;
n.addConnection(newConnection, random(0, randWMax));
randN2.addPostSyn(neuronID-1);
float randPost3 = random(0, 1);
if (randPost3 <= 0.2) {
randPostf = random(0, possPost.length-1);
randPosti = round(randPostf);
randPostConnect = possPost[randPosti];
Neuron randN3 = network.neurons.get(randPostConnect);
newConnection = randN3.neuronNum;
n.addConnection(newConnection, random(0, randWMax));
randN3.addPostSyn(neuronID-1);
}
}
}
}
}
void update(int x, int y) {
if ( overRect(rectX, rectY, rectSize, rectSize) ) {
rectOver = true;
} else {
rectOver = false;
}
}
boolean overRect(int x, int y, int width, int height) {
if (mouseX >= x && mouseX <= x+width &&
mouseY >= y && mouseY <= y+height) {
return true;
} else {
return false;
}
}
class ActionPotentials{
PVector location;
PVector receiver;
int recNum;
float weight;
int lerpCounter;
ActionPotentials(PVector loc, PVector rec, int recNumber, float w,int LC) {
location = new PVector(loc.x,loc.y);
receiver = new PVector(rec.x,rec.y);
recNum = recNumber;
weight = w;
lerpCounter = LC;
}
}
// An animated drawing of a Neural Network
// Based on Daniel Shiffman's example in "The Nature of Code", http://natureofcode.com
// Modified by Blake Porter
// www.blakeporterneuro.com
// Creates a network of neurons using a specified number of layers of neurons, inputs, and axons
// Connection synapse weights are created randomly and adjust based on activity levels, showing long term potentiation for active neural pairs and long term depression for inactive synapses
int baseCurrent = 150;
float normInput = 200;
int leak = 6;
float LTP = 1.1;
float LTD = 0.25;
int maxW = 5;
int updateTime = 500;
int prevUpdate = 0;
int currUpdate = 0;
class Network {
// The Network has a list of neurons
ArrayList<Neuron> neurons;
// The Network now keeps a duplicate list of all Connection objects.
// This makes it easier to draw everything in this class
PVector location;
Network(float x, float y) {
location = new PVector(x, y);
neurons = new ArrayList<Neuron>();
}
// We can add a Neuron
void addNeuron(Neuron n) {
neurons.add(n);
}
// Sending an input to the first layer of neurons
void baseInput(int inputs) {
for (int i = 0; i < inputs; i++) {
Neuron n1 = neurons.get(i);
n1.feedForward(baseCurrent);
}
}
// Update connections
void update() {
if (network.neurons.size() > 1) {
currUpdate = millis();
if (currUpdate > prevUpdate + updateTime) {
for (Neuron n : neurons) {
n.sum = n.sum - leak;
n.sum = constrain(n.sum,n.RMP,n.spkT);
ArrayList currAPs = n.APs;
int countAPs = n.APs.size();
if (countAPs > 0) {
for (int i = countAPs-1; i >= 0; i--) {
ActionPotentials currAP = n.APs.get(i);
if (currAP.location.x >= currAP.receiver.x-n.APr && currAP.location.y >= currAP.receiver.y-n.APr) {
Neuron recN = network.neurons.get(currAP.recNum);
float currW = currAP.weight;
currW = constrain(currW, 1, maxW);
recN.feedForward(normInput*currW);
n.APs.remove(i);
} else {
currAP.location.x = lerp(currAP.location.x, currAP.receiver.x, 0.1);
currAP.location.y = lerp(currAP.location.y, currAP.receiver.y, 0.1);
}
}
}
for (int i = 0; i < n.connections.length; i++) {
Neuron preN = network.neurons.get(n.connections[i]);
if (preN.layer == 0) {
if (n.spkCount > 0) {
n.weights[i] = n.weights[i] + LTP;
//n.weights.add(i, LTP);
float currW = n.weights[i];
currW = constrain(currW, 0, maxW);
n.weights[i] = currW;
} else {
n.weights[i] = n.weights[i] - LTD;
//n.weights.sub(i, LTD);
float currW = n.weights[i];
currW = constrain(currW, 0, maxW);
n.weights[i] = currW;
} // spike count
} else {
if (n.spkCount >= preN.spkCount && n.spkCount > 0) {
n.weights[i] = n.weights[i] + LTP;
float currW = n.weights[i];
currW = constrain(currW, 0, maxW);
n.weights[i] = currW;
} else {
n.weights[i] = n.weights[i] - LTD;
//n.weights.sub(i, LTD);
float currW = n.weights[i];
currW = constrain(currW, 0, maxW);
n.weights[i] = currW;
} // spike count
}
prevUpdate = millis();
} // for all connections
n.spkCount = 0;
} // all neurons
} else {
for (Neuron n : neurons) {
n.sum = n.sum - leak*0.2;
n.sum = constrain(n.sum,n.RMP,n.spkT);
ArrayList currAPs = n.APs;
int countAPs = n.APs.size();
if (countAPs > 0) {
for (int i = countAPs-1; i >= 0; i--) {
ActionPotentials currAP = n.APs.get(i);
if (currAP.location.x >= currAP.receiver.x-n.APr && currAP.location.y >= currAP.receiver.y-n.APr) {
Neuron recN = network.neurons.get(currAP.recNum);
float currW = currAP.weight;
currW = constrain(currW, 1, maxW);
recN.feedForward(normInput*currW);
n.APs.remove(i);
} else {
currAP.location.x = lerp(currAP.location.x, currAP.receiver.x, 0.1);
currAP.location.y = lerp(currAP.location.y, currAP.receiver.y, 0.1);
}
}
}
}
} // time
}// size
}// update
// Draw everything
void display() {
pushMatrix();
translate(location.x, location.y);
for (Neuron n : neurons) {
n.displayAx();
}
for (Neuron n : neurons) {
n.displayN();
}
for (Neuron n : neurons) {
n.displayAP();
}
popMatrix();
}
}
// An animated drawing of a Neural Network
// Based on Daniel Shiffman's example in "The Nature of Code", http://natureofcode.com
// Modified by Blake Porter
// www.blakeporterneuro.com
// Creates a network of neurons using a specified number of layers of neurons, inputs, and axons
// Connection synapse weights are created randomly and adjust based on activity levels, showing long term potentiation for active neural pairs and long term depression for inactive synapses
class Neuron {
int neuronNum;
PVector location; // Neuron has a start location, x and y
PVector centerLoc; // Neuron has a central location, x and y, in order to draw it's axon from
int prevSpkCount = 0; // how many spikes did it have
int spkCount = 0; // how many times it has fired since we last checked?
float sum = 0; // the intracellular voltage value, start at 0
int layer;
boolean justFired = false;
int RMP = 0;
int spkT = 1000;
// Neuron has a list of connections
int[] connections = {};
float[] weights = {};
int[] postSyn = {};
ArrayList<ActionPotentials> APs = new ArrayList<ActionPotentials>();
// The Neuron's size can be animated
float r_base = 20; // how big it is at the start
float r_pop = 25; // how big it grows to when it fires
float r = r_base; // for reference
float APr = r_base*0.5;
// Center of triangle of the neuron
float centerX;
float centerY;
Neuron(float x, float y, int neuronID, int currLayer) {
location = new PVector(x, y); // get start position
centerLoc = new PVector(location.x, location.y); //calculate center
//centerLoc = new PVector(((location.x + (location.x+r) + (location.x+(r/2)))/3),(location.y + location.y + (location.y-r))/3); //calculate center
layer = currLayer;
neuronNum = neuronID;
}
// Add a Connection
void addConnection(int c, float w) {
connections = append(connections,c);
weights = append(weights,w);
//connections.append(c);
//weights.append(w);
}
void addPostSyn(int x) {
postSyn = append(postSyn,x);
//postSyn.append(x);
}
// Receive an input
void feedForward(float input) {
// Accumulate it
sum += input;
sum = constrain(sum, RMP, spkT+1);
// did it reach the action potential threshold (of 1)?
if (sum >= spkT) {
fire();
justFired = true;
sum = 0; // Reset the sum to 0 if it fires
spkCount++; // add a spike to the spike count
float apW = 0.0;
for (int i = 0; i <= postSyn.length-1; i++) {
int currConnect = postSyn[i];
Neuron recN = network.neurons.get(currConnect);
PVector rec = recN.location;
for (int j = 0; j < recN.connections.length; j++) {
int isThisConnected = recN.connections[j];
if (isThisConnected == neuronNum) {
apW = recN.weights[j];
}
}
APs.add(new ActionPotentials(location, rec, recN.neuronNum, apW, 1));
}
}
}
// The Neuron fires
void fire() {
r = r_pop; // It suddenly is bigger
// We send the output through all connections
}
void displayN() {
// neurons
pushStyle();
stroke(0);
strokeWeight(1);
//float b = map(sum, 0, spkT, 0, 255);
//if (justFired) {
// b = 255;
// justFired = false;
//}
//color fromC = color(11, 107, 191);
//color toC = color(242, 223, 0);
//float layerF = layer;
//color neuronC = lerpColor(fromC, toC, layerF/9);
//fill(neuronC, b);
color fromC = color(255, 255, 255);
color startC = color(11, 107, 191);
color endC = color(250, 218, 94);
float layerF = layer;
color toC = lerpColor(startC, endC, layerF/9);
float memP = map(sum, 0, spkT, 0, 1);
color neuronC = lerpColor(fromC, toC, memP);
fill(neuronC);
ellipse(location.x, location.y, r, r);
r = lerp(r, r_base, 0.1);
popStyle();
} // display N
void displayAx() {
// Axons
pushStyle();
stroke(0, 0, 0);
for (int i = 0; i < connections.length; i++) {
int currPost = connections[i];
Neuron post = network.neurons.get(currPost);
float currW = weights[i];
strokeWeight(currW);
line(centerLoc.x, centerLoc.y, post.centerLoc.x, post.centerLoc.y);
}
popStyle();
} // display axon
void displayAP() {
// AP
pushStyle();
for (int i = 0; i < APs.size(); i++) {
ActionPotentials currAP = APs.get(i);
//color fromAPC = color(242, 223, 0);
//color toAPC = color(11, 107, 191);
//float layerAPF = layer;
//color apC = lerpColor(fromAPC, toAPC, layerAPF/9);
stroke(100);
strokeWeight(1);
color apC = color(255,255,0);
fill(apC);
ellipse(currAP.location.x, currAP.location.y, APr, APr); // draw APs as X% the neuron size
}
popStyle();
} // display ap
}