⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 plvq.cpp

📁 模糊K近邻分类器
💻 CPP
字号:
//该函数用PLVQ实现对prototype部分的训练
/* 输入参数包括:
     prototype -- 指向prototype部分的指针
	 N -- prototype部分的结点个数
	 TrainNum -- 训练样本的个数
	 L -- 训练样本的维数
	 Train -- 训练样本集合
 */
#include <iostream.h>
#include <stdio.h>
#include "FuzzyNN.h"
#include "feedfoward.h"
#include "tools.h"
#include <math.h>

void plvq(ProtoNode *prototype,int N,int TrainNum,int L,FuzzyNum **Train)
{
	double alpha0;
	int T;
	int i,j,t,k;
	double alpha;
	double D;
	double *s;
	double *w;
	ProtoNode *prototypebak;
	double error;
	double theta;
	double precision;

	prototypebak=new ProtoNode[N+1];
	mcheck(prototypebak);
	for(j=1;j<=N;j++)
	{
		prototypebak[j].w=new FuzzyNum[L+1];
		mcheck(prototypebak[j].w);
		prototypebak[j].g=new int[N+1];
		mcheck(prototypebak[j].g);
	}

	s=new double[N+1];
	mcheck(s);
	w=new double[N+1];
	mcheck(w);

	cout<<"==================================="<<endl;
	cout<<"现在开始对神经网络进行第一趟的训练."<<endl;

	cout<<"请输入初始学习速率的大小:"<<endl;
	cin>>alpha0;

	T=500;
	theta=0.25*sqrt(L);
	precision=1e-10;

	//初始化prototype部分
	for(j=1;j<=N;j++)
	{
		for(i=1;i<=L;i++)
		{
			(prototype[j].w)[i].w1=frand(0.45,0.55);
			(prototype[j].w)[i].w2=frand(0.45,0.55);
			(prototype[j].w)[i].a=frand(0.45,0.55);
			(prototype[j].w)[i].b=frand(0.45,0.55);
		}
		for(i=1;i<=N;i++)
			if(i==j)(prototype[j].g)[i]=1;
			else (prototype[j].g)[i]=0;
	}
    
	//对结点的权值进行调整
	for(t=0;t<T;t++)
	{
		 alpha=alpha0*(1-(double)t/T);

		 for(j=1;j<=N;j++)
		 {
	    	for(i=1;i<=L;i++)
			{
		    	(prototypebak[j].w)[i].w1=(prototype[j].w)[i].w1;
		    	(prototypebak[j].w)[i].w2=(prototype[j].w)[i].w2;
		    	(prototypebak[j].w)[i].a=(prototype[j].w)[i].a;
		    	(prototypebak[j].w)[i].b=(prototype[j].w)[i].b;
			}
	    	for(i=1;i<=N;i++)
		    	if(i==j)(prototypebak[j].g)[i]=1;
		     	else (prototypebak[j].g)[i]=0;
		 }

    	 for(k=1;k<=TrainNum;k++)
		 {
     		for(j=1;j<=N;j++)
			{
	        	s[j]=0;
	        
				for(i=1;i<=L;i++)
		        	s[j]+=(coa(Train[k][i])-coa((prototype[j].w)[i]))*(coa(Train[k][i])-coa((prototype[j].w)[i]));
			}

			D=0;
			for(j=1;j<=N;j++)
				D+=exp(-s[j]/(2*theta*theta));

			for(j=1;j<=N;j++)
				w[j]=exp(-s[j]/(2*theta*theta))/D;
            
			for(j=1;j<=N;j++)
			{
				for(i=1;i<=L;i++)
				{
			    	(prototype[j].w)[i].w1+=alpha*w[j]*(Train[k][i].w1-(prototype[j].w)[i].w2);
			    	(prototype[j].w)[i].w2+=alpha*w[j]*(Train[k][i].w2-(prototype[j].w)[i].w1);
			    	(prototype[j].w)[i].a+=alpha*w[j]*(Train[k][i].a+(prototype[j].w)[i].b);
			    	(prototype[j].w)[i].b+=alpha*w[j]*(Train[k][i].b+(prototype[j].w)[i].a);
				}
			}
		 }
		 //计算误差,判断是否可以结束训练
		for(j=1;j<=N;j++)
		{
	        s[j]=0;
	        
			for(i=1;i<=L;i++)
		        s[j]+=fabs(coa((prototypebak[j].w)[i])-coa((prototype[j].w)[i]));
		}

		error=0;
		for(j=1;j<=N;j++)
	    	error+=s[j];
		if(error<precision)break;
	}
	if(t==T)cout<<"网络未能收敛"<<endl;
	cout<<"神经网络第一趟训练完毕."<<endl;
	cout<<"==================================="<<endl;
	delete []s;
	delete []w;
}

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -