📄 smpspikeloop.cpp
字号:
/*************************************************************************** smpspikeloop.cpp - description ------------------- begin : Mon Feb 29 2002 copyright : (C) 2002 by R黡iger Koch email : rkoch@rkoch.org ***************************************************************************//*************************************************************************** * * * This program is free software; you can redistribute it and/or modify * * it under the terms of the GNU General Public License as published by * * the Free Software Foundation; either version 2 of the License, or * * (at your option) any later version. * * * ***************************************************************************/#include <iostream.h>#include <stdlib.h>#include <stdexcept>#include <string>#include <vector>#include <amygdala/basicneuron.h>#include <amygdala/network.h>#include <amygdala/layer.h>#include <amygdala/types.h>#include <amygdala/statisticsoutput.h>#include <amygdala/netloader.h>#include <amygdala/types.h>#include <amygdala/mpnetwork.h>#include "smpspikeloop.h"SmpSpikeLoop::SmpSpikeLoop(int layerSize, int numLayers): poolSize(layerSize), numLayers(numLayers){ noisyPulse = false; // SMP: First obtain a reference to the Node object theNode = Node::GetNodeRef();}SmpSpikeLoop::~SmpSpikeLoop(){ for(AmIdInt instId=1; instId <= numLayers+1; instId++) theNode->DeleteInstance(instId);}void SmpSpikeLoop::Run(AmTimeInt runTime){ cout << "Starting spikeloop...\n"; cout << "Building the network.\n"; try{ BuildNetwork(); } catch (string e) { cout << e << endl; exit(-1); } catch (runtime_error e) { cout << e.what() << endl; exit(-1); } // Turn on output for the output layer // Neuron::CaptureOutput( OUTPUT_LAYERS ); // Turn on output for ALL neurons. This is used to create a complete log // using StatisticsOutput. If you don't want this turn on output only for // OUTPUT_LAYERS. See also the end of the file Neuron::CaptureOutput( ALL ); // Schedule the input for (unsigned int i=1; i<=poolSize; i++) { //net->ScheduleNEvent(INPUTSPIKE, 1000 + (i*500), i); inputNet->ScheduleNEvent(INPUTSPIKE, 1000, i); } cout << "running..."<<endl; // SMP: The way to run all partitions of a Network at once is this: theNode->Run(runTime*1000); cout << "...done"<<endl;/* delete inputNet; for (uint i=0; i<layer.size(); i++) { delete layer[i]; }*/ cout << "Exiting." << endl;}void SmpSpikeLoop::BuildNetwork(){ unsigned int startId = 10, layerCount = 1; LayerConstants layerConst; // SMP: Make a new MpNetwork instance. The InstanceID will be the same as the LayerID. // SMP: This is arbitrary, of course. The only rules for numbering IDs is that the // SMP: InstanceIDs must be unique and LayerIDs must be unique within Instances. cout << "Making network instance: " << layerCount << endl; inputNet = theNode->MakeNetwork(layerCount); // SMP: Set to streaming input. This would happen automatically if // SMP: the default SpikeInput would be replaced by a MpSpikeInput // SMP: object. Since we usually don't want to use the MpSpikeInput // SMP: but a custom SpikeInput class we have to set this manually inputNet->StreamingInput(true); // Set up the constants struct layerConst.type = INPUTLAYER; layerConst.layerId = layerCount; layerConst.learningConst = 1e-3; layerConst.membraneTimeConst = 10.0; layerConst.synapticTimeConst = 2.0; layerConst.restPtnl = 0.0; layerConst.thresholdPtnl = 5.0; // Build the input layer NetLoader inputNL(inputNet); Layer *tmpLayer = inputNL.BuildLayer(poolSize, 1, layerConst, "BasicNeuron"); tmpLayer->LayerName("Input Layer"); tmpLayer->SetSynapticDelay(100); layerCount++; // Build the hidden layers layerConst.type = HIDDENLAYER; while (layerCount < numLayers + 1) { layerConst.layerId = layerCount; cout << "Making network instance: " << layerCount << endl; // SMP: Making as many MpNetwork Instances as there are layers. MpNetwork *tmpNet = theNode->MakeNetwork(layerCount); // SMP: And don't forget to set the correct SpikeInput new MpSpikeInput(tmpNet); NetLoader hiddenNL(tmpNet); tmpLayer = hiddenNL.BuildLayer(poolSize, startId, layerConst, "BasicNeuron"); tmpLayer->LayerName("hidden Layer"); tmpLayer->SetSynapticDelay(100); layerCount++; // SMP: This is not actually necessary. We could use neuron IDs from 1..poolSize // SMP: for each Instance, since neuron IDs don't need to be unique across Instances // SMP: but then the StatisticsOutput would treat all the neurons with the same ID the same startId+=10; } // Build the output layer cout << "Making network instance: " << layerCount << endl; // SMP: Again: Creating a MpNetwork and a SpikeInput for it MpNetwork *tmpNet = theNode->MakeNetwork(layerCount); new MpSpikeInput(tmpNet); layerConst.type = OUTPUTLAYER; layerConst.layerId = layerCount; NetLoader outputNL(tmpNet); tmpLayer = outputNL.BuildLayer(poolSize, startId, layerConst, "BasicNeuron"); tmpLayer->SetSynapticDelay(100); tmpLayer->LayerName("Output Layer"); // set up the connection parameters GaussConnectType conParms; conParms.meanWeight = 1.4/(float)poolSize; conParms.stdDev = 0.01; conParms.pctConnect = 100.0; // Fully connected // Build all forward connections. for(unsigned int i=1; i<layerCount; i++){ theNode->ConnectLayers(i, i, i+1, i+1, conParms); } // Connect the last layer to the first hidden layer (since input layers // cannot receive connections yet). theNode->ConnectLayers(numLayers, numLayers, 2, 2, conParms); // create a complete log of all spikes if gnuplot mode is turned on if (gnuplot) { StatisticsOutput *spikeOut = new StatisticsOutput(); spikeOut->LogSpikeTimes(string("spikes.log")); Neuron::SetSpikeOutput(spikeOut); cout << "Logging output to spikes.log\n"; cout << "Use the command 'gnuplot spikes.gnuplot' to generate spikeloop.png\n"; cout << "Run 'gimp spikeloop.png' to view the png file.\n"; }}
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -