⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 nakayama.java

📁 一个纯java写的神经网络源代码
💻 JAVA
📖 第 1 页 / 共 4 页
字号:
     * j and j' is defined as Rjj' = 1 - |Gammajj'|. Rjmin (which this function calculates     * is defined as Rjmin = min-j{Rjj'}. It also returns the index of the neuron j that is the     * argument to this function as well as j' that is the minimum.     *     * @param anLayer the index of the layer of the neuron <code>aNeuron</code>.     * @param aNeuron the neuron within the layer (j).     * @return the minimum correlation Rjmin, together with the index of the neuron as an argument     * and the neuron of the minimum (layer and neuron). The lower index of the two neurons is at position      * 1, 2 and the higher index is at position 3, 4. The minimum itself is at position 0. Finally at position     * 5 we will indicate if the argument was the lower index neuron (<0, so it is now at position 1, 2) or      * if the argument was the higher index neuron (>0, so now it is at position 3, 4).     */    protected double[] getMinCorrelation(int aLayer, int aNeuron) {        double [] myReturnValue = new double[] {2, -1, -1, -1, -1, 0}; // 2 => 0 <= min <= 1        List[] myNeurons;        double myCorrelation;                // check neurons before aLayerIndex and aNeuron        for(int l = 0; l <= aLayer; l++) {            myNeurons = (List[])gamma.get(l);            for(int n = 0; n < (l == aLayer ? aNeuron : myNeurons.length); n++) {                myCorrelation = 1 - Math.abs(((double[])myNeurons[n].get(aLayer))[aNeuron]);                if(myReturnValue[0] > myCorrelation) {                    myReturnValue[0] = myCorrelation;                    myReturnValue[1] = l;       // the lower index neuron                    myReturnValue[2] = n;                    myReturnValue[3] = aLayer;  // the higher index neuron                    myReturnValue[4] = aNeuron;                    myReturnValue[5] = 1;       // argument is higher index neuron                }            }        }                List myLayers;        double[] myNeurons2;        // check neurons after aLayerIndex and aNeuron        myLayers = ((List[])gamma.get(aLayer))[aNeuron];        for(int l = aLayer; l < myLayers.size(); l++) {            myNeurons2 = (double[])myLayers.get(l);            for(int n = (l == aLayer ? aNeuron + 1 : 0); n < myNeurons2.length; n++) {                myCorrelation = 1 - Math.abs(myNeurons2[n]);                if(myReturnValue[0] > myCorrelation) {                    myReturnValue[0] = myCorrelation;                    myReturnValue[1] = aLayer;  // the lower index neuron                    myReturnValue[2] = aNeuron;                    myReturnValue[3] = l;       // the higher index neuron                    myReturnValue[4] = n;                    myReturnValue[5] = -1;      // argument is lower neuron                }            }        }        return myReturnValue;    }        /**     * Sums up the (normal and absolute) values of the outputs of a neuron over all patterns.     *     * @param aLayer an index of the layer to retrieve the outputs of that layer.     * @param aNeuron the neuron in the layer.     * @return the sum of (index 0: normal, index 1: absolute) outputs of neuron <code>aNeuron</code>.     */    protected double[] getSumOutputs(int aLayer, int aNeuron) {        List myOutputs;        double myOutput;        double [] mySum = new double[2]; // index 0 normal sum, index 1 absolute sum                        for(int i = 0; i < outputsAfterPattern.size(); i++) {            // for all patterns            myOutputs = (List)outputsAfterPattern.get(i);            myOutput = ((double[])myOutputs.get(aLayer))[aNeuron];            mySum[0] += myOutput;            mySum[1] += Math.abs(myOutput);        }        return mySum;    }        /**     * Sums up all the absolute values of the output weights of a neuron within a layer.     *     * @param aLayer the layer holding neuron <code>aNeuron</code>.     * @param aNeuron the neuron in the layer.     * @return the sum of absolute values of the output weights of neuron      * <code>aNeuron</code> within layer <code>aLayer</code>.     */    protected double getSumAbsoluteWeights(Layer aLayer, int aNeuron) {        double mySum = 0;        OutputPatternListener myListener;        Synapse mySynapse;                for(int i = 0; i < aLayer.getAllOutputs().size(); i++) {            myListener = (OutputPatternListener)aLayer.getAllOutputs().get(i);            if(!(myListener instanceof Synapse)) {                // TODO how to deal with outputs that are not synpases?                throw new org.joone.exception.JooneRuntimeException("Unable to optimize. Output of layer is not a synapse.");            }            mySynapse = (Synapse)myListener;            for(int j = 0; j < mySynapse.getOutputDimension(); j++) {                mySum += Math.abs(mySynapse.getWeights().value[aNeuron][j]);            }        }        return mySum;    }    public void cicleTerminated(NeuralNetEvent e) {    }    public void errorChanged(NeuralNetEvent e) {    }    public void netStarted(NeuralNetEvent e) {    }    public void netStopped(NeuralNetEvent e) {        log.debug("Network stopped.");        runValidation();    }    public void netStoppedError(NeuralNetEvent e, String error) {    }        public void netValidated(NeuralValidationEvent event) {        // validation is finished, so we should have collected all the information        // to optimize the activation functions        log.debug("Network validated.");        doOptimize();    }        /**     * Removes all the listeners from the neural network (temporarely). They will     * be added again after optimization and the network is restarted.     */    protected void removeAllListeners() {        Vector myListeners = net.getListeners();                while(myListeners.size() > 0) {            NeuralNetListener myListener = (NeuralNetListener)myListeners.get(myListeners.size() - 1);            listeners.add(myListener);            net.removeNeuralNetListener(myListener);        }    }        /**     * Restore all the listeners to the neural network.     */    protected void restoreAllListeners() {        Iterator myIterator = listeners.iterator();        while(myIterator.hasNext()) {            NeuralNetListener myListener = (NeuralNetListener)myIterator.next();            net.addNeuralNetListener(myListener);        }        listeners = new Vector(); // clear the list    }        /**     * This method is called after every pattern, so we can retrieve information     * from the network that is related to the pattern that was just forwarded      * through the network.     */    void patternFinished() {        Layer myLayer;        List myOutputs = new ArrayList(); // the outputs of the neurons after a pattern                // log.debug("Single pattern has been forwarded through the network.");                // in this stage we only need to save the outputs of the neurons after        // each pattern, then later we can calculate all the necessary information                for(int i = 0; i < layers.size(); i++) {            myLayer = findClonedLayer((Layer)layers.get(i));            myOutputs.add(myLayer.getLastOutputs());        }        outputsAfterPattern.add(myOutputs);    }        /**     * Finds the cloned equal layer from the cloned neuron network given its corresponding      * layer from the normal neural network.     *     * @param aLayer the layer to find its cloned version in <code>clone</code>.     * @return the cloned layer from <code>clone</code> corresponding to <code>aLayer</code>.     */    private Layer findClonedLayer(Layer aLayer) {        for(int i = 0; i < net.getLayers().size(); i++) {            if(net.getLayers().get(i) == aLayer) {                // index of layer found                return (Layer)clone.getLayers().get(i);            }        }        return null;    }        /**     * Gets epsilon, the threshold to decide if a neuron should be deleted or not.     *     * @return the threshold epsilon.     */    public double getEpsilon() {        return epsilon;    }        /**     * Sets epsilon, the threshold to decide if a neuron should be deleted or not.     *     * @param anEpsilon the new epsilon.     */    public void setEpsilon(double anEpsilon) {        epsilon = anEpsilon;    }        public void netConverged(ConvergenceEvent anEvent, ConvergenceObserver anObserver) {        // whenever this object is added to a convegence observer, this method will be called        // when convergence is reached, otherwise the user itself should call optimize()        // based on some criteria        if(!optimize()) {            // the network was not optimized, so the network stayes probably in the same             // convergence state, so new event shouldn't be created until we move out of            // the convergence state            anObserver.disableCurrentConvergence();        }    }}/** * This class/synapse is only used to inform a Nakayama object whenever a single  * patterns has been forwarded through the network. */class PatternForwardedSynapse extends Synapse {        /** The nakayama object that needs to be informed. */    protected Nakayama nakayama;        /**     * Constructor     *     * @param aNakayama the object that needs to be informed whenever a pattern     * has been forwarded through the network.     */    public PatternForwardedSynapse(Nakayama aNakayama) {        nakayama = aNakayama;            }        public synchronized void fwdPut(Pattern pattern) {        if(pattern.getCount() > -1) {            nakayama.patternFinished();            items++;        }    }    protected void backward(double[] pattern) {    }    protected void forward(double[] pattern) {    }    protected void setArrays(int rows, int cols) {    }    protected void setDimensions(int rows, int cols) {    }}

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -