⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 neuron.java

📁 用Java实现的多层BP神经网络
💻 JAVA
字号:
/*  AI ANN Backpropagation    Copyright (C) 2002-2003  Wim Gillis <d.e.m@gmx.net>    http://sourceforge.net/projects/crap    This program is free software; you can redistribute it and/or modify    it under the terms of the GNU General Public License as published by    the Free Software Foundation.    This program is distributed in the hope that it will be useful,    but WITHOUT ANY WARRANTY; without even the implied warranty of    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the    GNU General Public License for more details.    You should have received a copy of the GNU General Public License    along with this program (see COPYING); if not, check out     http://www.gnu.org/licenses/gpl.html    or write to the Free Software Foundation,     Inc., 59 Temple Place, Suite 330, Boston,     MA  02111-1307  USA*/package cx.ma.ai.ann.backprop;import java.lang.Math;import java.io.Serializable;public class Neuron implements Serializable{       public final static int LINEAR_ACTIVATION           = 0;    public final static int BINARY_SIGMOID_ACTIVATION   = 1;    public final static int BIPOLARY_SIGMOID_ACTIVATION = 2;        private double flatness = 1.0;    private double biasWeight;        private double[] weights;    private double output;    private double net;    private double error;    private int activationType;        public Neuron(int noOfWeights){        this (noOfWeights, 0.5, BINARY_SIGMOID_ACTIVATION);    }           public Neuron(int noOfWeights, double weightDefaultValue, int activationType) {        weights = new double[noOfWeights];        for (int i = 0; i < weights.length; ++i)            weights[i] = weightDefaultValue + ((0.5 - Math.random()) / 10);        biasWeight = weightDefaultValue + ((0.5 - Math.random()) / 10);        this.activationType = activationType;            output = 0.5;         net = 0.0;         error = 0.0;    }        public Neuron(Neuron neuron) {        weights = new double[neuron.getNoOfWeights()];        for (int i = 0; i < weights.length; ++i)             weights[i] = neuron.getWeight (i);        activationType = neuron.getActivationType();                flatness       = neuron.getFlatness();        biasWeight     = neuron.getBias();        output         = neuron.getOutput();        net            = neuron.getNet();        error          = neuron.getError();    }        public double getFlatness(){        return flatness;    }    public void setFlatness(double flatness){        this.flatness = flatness;    }         public double getError(){        return error;    }        public int getNoOfWeights(){        return weights.length;    }        public int getActivationType() {        return activationType;    }        public double getBias(){        return biasWeight;    }    public void setBias(double bias){        biasWeight = bias;    }        public double getWeight(int index){        return weights[index];    }    public void setWeight(int index, double value){        weights[index] = value;    }        public double getOutput(){        return output;    }    public void setOutput(double output){        this.output = output;    }    public double getNet(){        return net;    }    public void setActivationType(int type){        activationType = type;    }           public void calculateNet(Layer previousLayer){        net = 0.0;        for (int i = 0; i < weights.length; ++i)            net += (previousLayer.getNeuron (i).getOutput() * weights[i]);        net += 1 * biasWeight;    }        public void calculateError(double desiredOutput){        // Error calculation for OUTPUT layer        error = (desiredOutput - output) * derivatedActivationFunction();    }    public void calculateError(Layer nextLayer, int index){        // Error calculation for hidden layer        double nextLayerError = 0.0;        int neurons = nextLayer.getNoOfNeurons();          for (int i = 0; i < neurons; ++i)            nextLayerError += nextLayer.getNeuron (i).getError() * nextLayer.getNeuron (i).getWeight (index);        error = derivatedActivationFunction() * nextLayerError;    }        public void updateWeights(double learningRate, Layer previousLayer){        for (int i = 0; i < weights.length; ++i)            weights[i] += (learningRate * error * previousLayer.getNeuron (i).getOutput()) ;        biasWeight += learningRate * error * 1 ;                 }    public double activationFunction(double net){        double activated = 0.0;        switch (activationType){            case LINEAR_ACTIVATION:                activated = net; break;            case BINARY_SIGMOID_ACTIVATION:                activated = Math.pow (1 + Math.exp (-1 * flatness * net), -1); break;            case BIPOLARY_SIGMOID_ACTIVATION:                activated = (2 / (1 + Math.exp ( -1 * flatness * net))) -1 ; break;            default:                activated = 0;        }        return activated;    }        public double derivatedActivationFunction(){        double derivated = 0.0;        double out = output;         switch (activationType){            case LINEAR_ACTIVATION:                derivated = 1; break;            case BINARY_SIGMOID_ACTIVATION:                derivated = flatness * out * (1 - out); break;            case BIPOLARY_SIGMOID_ACTIVATION:                derivated = flatness * (1 - Math.pow (out, 2)) ; break;            default:                derivated = 0;        }        return derivated;    }        public String toString(){        String out = "\t\t  outputvalue: " + output + " - error: " + error + " - net: " + net + " - bias: " + biasWeight + "\n";        for (int i = 0; i < weights.length; ++i)            out += "\t\t\tweight " + i + ": " + weights[i] + "\n";        return out;    }}

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -