⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 network.cpp

📁 amygdata的神经网络算法源代码
💻 CPP
📖 第 1 页 / 共 2 页
字号:
        nextInputTime = 0;    }}// FIXME: Commented out until I know what to do with it.  Has to be updated to match the// new connection system./*void Network::ConnectNeurons(AmIdInt preInstId, AmIdInt preNId, AmIdInt postInstId, AmIdInt postNId, float weight, AmTimeInt delay){    if(preInstId == postInstId) {        Network *net = instances[preInstId]->GetNetwork();        net->ConnectNeurons(preNId, postNId, weight, delay);    } else {        instances[preInstId]->GetNetwork()->ConnectNeurons(instances[postInstId], preNId, postNId, weight, delay);    }}*/Network* Network::GetNetworkRef(){    if (theNetwork == NULL) {        string errstr = "No Network object defined. Call Network::Init(argc, argv) first";        std::cerr << errstr << std::endl;        throw runtime_error(errstr);    }    return theNetwork;}void Network::UdpListener(int port){    // TODO: Implement this to enable clustering}unsigned int Network::GetMaxRunTime() { return maxRunTime; }/*bool Network::ThreadSleep(unsigned int simTime){    LOGGER(2, "Need to put threads to sleep. Maybe the partitioning is not optimal?");    pthread_mutex_lock(&mut_sleeper);    if(numThreads - sleepers == 1) {     // we are the last thread        pthread_mutex_unlock(&mut_sleeper);        return false;    } else {        sleepers++;        pthread_cond_wait (&cond_sleeper, &mut_sleeper);        pthread_mutex_unlock(&mut_sleeper);        return true;    }}*//*void Network::ThreadWakeUp(){    pthread_mutex_lock(&mut_sleeper);    pthread_cond_broadcast (&cond_sleeper);    sleepers = 0;    pthread_mutex_unlock(&mut_sleeper);}*/void Network::Save(string filename, bool compress){    if(running && numThreads > 1)            throw runtime_error("Network::Save(): Cannot save, multithreaded Network is running");    NetLoader nl;    nl.SaveXML(filename, compress);}void Network::Load(string filename, bool loadPhysProps){    if(running && numThreads > 1)            throw runtime_error("Network::Load(): Cannot load, multithreaded Network is running");    NetLoader nl;    nl.LoadXML(filename, loadPhysProps);}// TODO: This will have to be changed with the new threading model.// Comment it out for now until everything works in single-threaded modevoid Network::Schedule(){/*    while(simTime < maxRunTime){        bool didRun = false;        for(map <AmIdInt, NetworkPartition*>::iterator part = partitions.begin(); part != partitions.end(); part++){            NetworkPartition *partition = part->second;            partition->RunLock();            if(!partition->IsRunning() && (partition->SimTime() == simTime)) {                partition->Running(true);                partition->RunUnlock();                partition->TimeStep();                partition->Running(false);   // we should not need a lock to set running to false                didRun = true;            } else {                partition->RunUnlock();            }        }        if(!didRun) {  // couldn't find a Network to run. Going for the next timeStep            for(map <AmIdInt, NetworkPartition*>::iterator part = partitions.begin(); part != partitions.end(); part++){                NetworkPartition *partition = part->second;                partition->RunLock();                if(!partition->IsRunning() && (partition->SimTime() == simTime + Network::TimeStepSize())) {                    partition->Running(true);                    partition->RunUnlock();                    partition->TimeStep();                    partition->Running(false);   // we should not need a lock to set running to false                    didRun = true;                } else {                    partition->RunUnlock();                }            }        }        if(!didRun) {  // even in the next timeStep there is no Network to run. Go to sleep.            ThreadSleep(simTime);        }        bool incrementSimTime = true;        pthread_mutex_lock(&mut_simtime);        for(map <AmIdInt, NetworkPartition*>::iterator part = partitions.begin(); part != partitions.end(); part++){            NetworkPartition *partition = part->second;            if(partition->SimTime() == simTime) // only increment simTime when all                incrementSimTime = false;   // Network::simTime are larger        }        if(incrementSimTime) {            simTime += Network::TimeStepSize();            LOGGER(3, "Incremented global Simtime to " << simTime)            if(sleepers > 0)                ThreadWakeUp();        }        pthread_mutex_unlock(&mut_simtime);    }*/}void Network::SetNumThreads(unsigned int threads){    if(running) throw runtime_error("Cannot change the number of threads while running");    numThreads = threads;}void Network::SimpleRun(){    //NetworkPartition *partition = partitions.begin()->second;    if(visualStub) {        visualStub->RunVisual();    }    cout << "Running single threaded" << endl;    while(simTime < maxRunTime){        TimeStep();    }}void Network::Init(int & argc, char * argv[]){    if (theNetwork != NULL) throw runtime_error("There is a Network already");    theNetwork = new Network(argc, argv);}VisualStub * Network::GetVisualStub(){    return visualStub;}Topology* Network::GetTopology(Neuron* nrn){    return nrnMap[nrn->GetId()].topology;}void Network::AddNeuron(Neuron* nrn, Topology* top){    if(nrnMap.find(nrn->GetId()) != nrnMap.end())            throw runtime_error("Neuron " + Utilities::itostr(nrn->GetId()) + " exists already");    NeuronTopologyPtr ntp;    ntp.neuron = nrn;    ntp.topology = top;    nrnMap[nrn->GetId()] = ntp;    if(nrn->GetId() >= nextNeuronId) nextNeuronId = nrn->GetId() + 1;}void Network::AddTrainer(const std::string& trainerName, Trainer* t){    if (trainerMap.find(trainerName) != trainerMap.end())        throw runtime_error("Trainer " + trainerName + " exists already");        trainerMap[trainerName] = t;}void Network::AddTopology(Topology* top){    topologyMap[top->GetName()] = top;}void Network::AddSpikeInput(SpikeInput* si){	spikeInputVec.push_back(si);}/********************************************************** * Stuff from NetworkPartition.cpp **********************************************************/void Network::ScheduleSpike(AmTimeInt spikeTime, SpikingNeuron* nrn){    SpikeRequest newSpike;    // initialize a new event request    newSpike.requestTime = simTime;    newSpike.requestor = nrn;    newSpike.spikeTime = spikeTime;    newSpike.requestOrder = eventRequestCount++;    // insert into the queue    eventQ.push(newSpike);}void Network::ScheduleSpike(AmTimeInt spikeTime, InputNeuron* nrn){    InputSpikeRequest newSpike;    // initialize a new event request    newSpike.requestTime = simTime;    newSpike.requestor = nrn;    newSpike.spikeTime = spikeTime;    newSpike.requestOrder = eventRequestCount++;    // insert into the queue    inputQ.push(newSpike);}void Network::ScheduleSpikeDelay(Axon* axon){    LOGGER(6, "ScheduleSpikeDelay")    for (Axon::iterator it= axon->begin(); it != axon->end(); it++) {        LOGGER(6, "AxonNode size: " << (*it)->Size())        AmTimeInt offset = (*it)->GetOffset();        LOGGER(6, "AxonNode offset: " << offset)        offset += currSpikeDelayOffset;        // If the offset goes past the end of the queue, then        // start back at the beginning        if (offset > maxOffset) {            offset -= (maxOffset + 1);	        LOGGER(6, "Resetting offset to " << offset)        }        LOGGER(6, "Adding AxonNode to delayedSpikeQ")        delayedSpikeQ[offset].push_back(*it);    }    spikeBatchCount += axon->size();    LOGGER(6, "spikeBatchCount: " << spikeBatchCount)}void Network::SendDelayedSpikes(){	LOGGER(6, "currSpikeDelayOffset " << currSpikeDelayOffset)    vector<AxonNode*>& nodes = delayedSpikeQ[currSpikeDelayOffset];    if (!nodes.size()) {        // Nothing to do        IncrementDelayOffset();        return;    }    LOGGER(6, "Parsing " << nodes.size() << " axon nodes")    // parse each AxonNode and add each Synapse to a neuron    // input processing queue.  neurons will call ScheduleNeuronProcess()    // to trigger a call to Neuron::InputSpike() after the queueing process    // is complete.    for (unsigned int i=0; i<nodes.size(); ++i) {        //AxonNode::const_iterator synItr = nodes[i]->begin();	    //LOGGER(6, "Transmitting spikes from neuron " << nodes[i]->GetNeuron()->GetId())	    LOGGER(6, "Getting AxonNodeIterator")        AxonNodeIterator synItr = nodes[i]->Begin();         Synapse* syn = 0;        while ((syn = synItr++)) {	        LOGGER(6, "Transmitting spike via synapse " << syn)           	syn->TransmitSpike(simTime);        }    }    // Allow each Neuron with input queued up to do    // its processing    LOGGER(6, processQ.size() << " items in processQ")    for (unsigned int i=0; i<processQ.size(); ++i) {        LOGGER(6, "Processing nrn " << processQ[i]->GetId())        processQ[i]->ProcessInput(simTime);    }    processQ.clear();    nodes.clear();    IncrementDelayOffset();}void Network::InitializeDelayedSpikeQ(){	LOGGER(6, "Initializing delayedSpikeQ")    maxOffset = maxSpikeDelay/TimeStepSize();    LOGGER(6, "maxOffset: " << maxOffset)    //delayedSpikeQ.reserve(maxOffset+1);    //delayedSpikeQ.reserve(30);    //LOGGER(6, "Space reserved for delayedSpikeQ")    delayedSpikeQ.resize(maxOffset+1);    //delayedSpikeQ.resize(30);    LOGGER(6, "delayedSpikeQ resized")    LOGGER(6, "Initializing delayedSpikeQ elements")    for (unsigned int i=0; i<maxOffset+1; ++i) {        // TODO: Assuming 100 delayed spikes per offset for now.        // A more intelligent algorithm to determine proper        // sizing should be developed later on.        //vector<AxonNode*> tmpVec;        //tmpVec.reserve(100);        //delayedSpikeQ[i].push_back(tmpVec);        delayedSpikeQ[i].reserve(100);    }}void Network::SetMaxSpikeDelay(AmTimeInt _delay){    if (_delay > maxSpikeDelay)        maxSpikeDelay = _delay;}void Network::SetTrainerCallback(Trainer* t, AmTimeInt callbackTime){    TrainerCallback tc;    tc.trainer = t;    tc.callbackTime = callbackTime;    trainerCallbackQ.push(tc);}    

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -