⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 perceptron.java

📁 Single-layer neural networks can be trained using various learning algorithms. The best-known algori
💻 JAVA
字号:
	import java.util.*;public class Perceptron{    Vector			layers;    //static Vector   inputSamples;    //static Vector	outputSamples;	static Vector	alpha;    public Layer    inputLayer;    public Layer    outputLayer;    static public int   algorithm;	static public int	kernel;	static public String	applet_mode; //should it run as a SVM- or as a Perceptron-Applet	static public double P;	static public double C;	static public double S;    public double   error;	static double 	threshold;	static double 	threshold_weight;	static Neuron 	threshold_neuron;	static double[][] sample_sum = new double[2][2];	static boolean weights_set = false;	static double z_temp = 0.0;	static int num_samples = 0;		//	static final double eta = 0.01;	    final public static int PERCEPTRON 	= 1;    final public static int ADALINE    	= 2;    final public static int BACKPROP  	= 3;    final public static int POCKET     	= 4;	final public static int OPTIMAL		= 5;	final public static int SVM    		= 6;		final public static int GAUSS		= 1;	final public static int POLY		= 2;    public Perceptron(int i,int o,int algo)    {		layers          = new Vector();		//inputSamples    = new Vector();		//outputSamples   = new Vector();		alpha			= new Vector(); //for the optimal perceptron		inputLayer      = new Layer("I", i + 1); // bias		outputLayer     = new Layer("O", o);		layers.addElement(inputLayer);		layers.addElement(outputLayer);		algorithm       = algo;		error = 0.0;    }	    public void addLayer(int n)    {		layers.insertElementAt(new Layer("H",n),layers.size()-1);    }	    public Layer getLayer(int i)    {		int         j=0;		boolean     found=false;		Layer       layer=null;		Enumeration e = layers.elements();		while(e.hasMoreElements())	    {			layer = (Layer)e.nextElement();			if (i==j)		    {				found = true;				break;		    } else j++;	    }		if (found==false) layer = null;		return layer;    }	    public void connect(int sourceLayer,int sourceNeuron,			int destLayer,int destNeuron)    {		new Synapse(getLayer(sourceLayer).getNeuron(sourceNeuron),		    getLayer(destLayer).getNeuron(destNeuron));    }	    public void biasConnect(int destLayer,int destNeuron)    {		Synapse s = new Synapse(inputLayer.getNeuron(inputLayer.size-1),				getLayer(destLayer).getNeuron(destNeuron));		s.weight = Perceptron.threshold_weight;    }	/*    public void removeSamples()    {		inputSamples.removeAllElements();		outputSamples.removeAllElements();    }	*/		/*    public void addSample(Vector i,Sample o)    {		inputSamples.addElement(i);		outputSamples.addElement(o);		//Double init_alpha = new Double(1.0);		//alpha.addElement(init_alpha);    }    */		/*    public void printSamples()    {		System.out.println(inputSamples+"->"+outputSamples);    }    */	    public Vector recognize(Sample sample)    {		initInputs(sample);		propagate(sample);		Vector oS = getOutput();		return oS;    }	    public void learn(int iterations)    {		//Enumeration iS;		//Enumeration oS;		Enumeration samples;		Enumeration a;				boolean last_sample = false;				for(int i = 0; i < iterations; i++)	    {			if (i == iterations - 1) {last_sample = true;}			else {last_sample = false;}			//iS = inputSamples.elements();			//oS = outputSamples.elements();			samples = InputSpaceCanvas.points.elements();			while(samples.hasMoreElements()) {				Sample sample = (Sample) samples.nextElement ();				learnPattern(sample, last_sample);			}	    }		error = 0.0;		//iS = inputSamples.elements();		//oS = outputSamples.elements();		samples = InputSpaceCanvas.points.elements();		while(samples.hasMoreElements()) {			Sample sample = (Sample) samples.nextElement();			//Vector temp = new Vector();			//Double d_temp = new Double(sample.out);			//temp.addElement(d_temp); 						    recognize(sample);			error += computeError (sample);		}    }		    void learnPattern(Sample sample, boolean last_sample)    {		//System.out.println("learnPattern");		initInputs(sample);		propagate(sample);		Vector out = new Vector();		Double d_out = new Double(sample.out);		out.addElement(d_out);		switch(algorithm)	    {			case BACKPROP:				bpAdjustWeights(out);				break;			case ADALINE:				outputLayer.computeAdaline(out);				break;			case PERCEPTRON:				outputLayer.computePerceptron(out);				break;			case POCKET:				outputLayer.computePocket(out);				break;			case OPTIMAL:				outputLayer.computeOptimal(sample, last_sample);				break;			case SVM:				outputLayer.computeSVM(sample);				break;		}    }		    void initInputs(Sample sample)    {		Neuron neuron;		Enumeration e = inputLayer.neurons.elements();		//Enumeration eS = iS.elements();		/*		while (eS.hasMoreElements())	    {			neuron = (Neuron)e.nextElement();			neuron.output = ((Double)eS.nextElement()).doubleValue();	    }*/		neuron = (Neuron)e.nextElement();		neuron.output = sample.in[0];		neuron = (Neuron)e.nextElement();		neuron.output = sample.in[1];		neuron = (Neuron)e.nextElement(); // bias;		neuron.output = -1.0;		Perceptron.threshold_neuron = neuron;    }	    void propagate(Sample sample)    {		Layer layer;		Enumeration e = layers.elements();		e.nextElement(); // skip the input layer		while(e.hasMoreElements())	    {			layer = (Layer)e.nextElement();			layer.computeOutputs(sample);	    }    }	    public Vector getOutput()    {		Vector oS = new Vector();		Neuron neuron;		Enumeration e = outputLayer.neurons.elements();		while(e.hasMoreElements())		{			neuron = (Neuron) e.nextElement();			switch(algorithm) {				case BACKPROP:	// sigmoid					oS.addElement(new Double(neuron.getOutput()));					break;				case PERCEPTRON:				case OPTIMAL:				case SVM:				case POCKET:					oS.addElement(new Double(neuron.getOutput()));					break;				case ADALINE:	// weighted sum					oS.addElement(new Double(neuron.getLinearOutput()+0.5));					break;				default:					System.out.println("no algorithm given!");			}		}		return oS;    }		double computeError(Sample sample)	{		Neuron neuron;		double sum = 0.0;		double tmp;		Enumeration e = outputLayer.neurons.elements();		//Enumeration eS = oS.elements();		while (e.hasMoreElements())		{			neuron = (Neuron)e.nextElement();			switch(algorithm) {				case BACKPROP:	// sigmoid					tmp = sample.out - neuron.getOutput();						sum += tmp * tmp / 2.0;					break;				case ADALINE:	// weighted sum; convert from {-1,1}					tmp = sample.out - 0.5*neuron.getLinearOutput() - 0.5;						sum += 0.5*tmp * tmp;					break;				case PERCEPTRON:				case OPTIMAL:				case POCKET:				default:					tmp = sample.out - neuron.getThresholdedOutput();						sum += tmp * tmp;			}		}		return sum;	}		double currentError () {		return error;	}		void bpAdjustWeights(Vector oS)	{		outputLayer.computeBackpropDeltas(oS);		for(int i=layers.size()-2; i>=1; i--)			((Layer)layers.elementAt(i)).computeBackpropDeltas();		outputLayer.computeWeights();		for(int i=layers.size()-2; i>=1; i--)			((Layer)layers.elementAt(i)).computeWeights();	}		void print()	{		Layer layer;		Enumeration e = layers.elements();		while(e.hasMoreElements())		{			layer = (Layer)e.nextElement();			layer.print();		}	}		public void restorePocket()	{		outputLayer.restorePocket();	}		public void initPocket()	{		outputLayer.initPocket();	}		public void initOptimal()	{		Perceptron.threshold = -1.0;		outputLayer.initOptimal();	}		/*	public void learnOptimal(int iterations) {		for (int i = 0; i < iterations; i++) { 			for (int j = 0; j < Layer.points.size(); i++) {				Point point = (Point) Layer.points.get(j);				outputLayer.computeOptimal(point);			}		}	}	*/		}

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -