📄 bpnet.cs
字号:
using System;
using System.Collections.Generic;
using System.IO;
using System.Text;
namespace BpNetp2
{
/// <summary>
/// BpNet 的摘要说明。
/// </summary>
public class BpNet
{
public int inNum;//输入节点数
int hideNum;//隐层节点数
public int outNum;//输出层节点数
public int sampleNum;//样本总数
Random R;
double[] x;//输入节点的输入数据
double[] x1;//隐层节点的输出
double[] x2;//输出节点的输出
double[] o1;//隐层的输入
double[] o2;//输出层的输入
public double[,] w;//权值矩阵w
public double[,] v;//权值矩阵V
public double[,] dw;//权值矩阵w,批量
public double[,] dv;//权值矩阵V,批量
public double[,] sv;//共轭梯度,方向向量
public double[,] sw;//共轭梯度,方向向量
public double rate;//学习率
public double[] b1;//隐层阈值矩阵
public double[] b2;//输出层阈值矩阵
public double[] db1;//隐层阈值矩阵,批量
public double[] db2;//输出层阈值矩阵,批量
public double[] sb1;//共轭梯度,方向向量
public double[] sb2;//共轭梯度,方向向量
double[] pp;//隐层的误差
double[] qq;//输出层的误差
double[] yd;//输出层的教师数据,训练数据的输出
public double e;//均方误差
double in_rate;//归一化比例系数
public int computeHideNum(int m, int n) //计算隐层神经元数
{
//double s = Math.Sqrt(0.43 * m * n + 0.12 * n * n + 2.54 * m + 0.77 * n + 0.35) + 0.51;
double s = 2 * m + 1;
int ss = Convert.ToInt32(s);
return ((s - (double)ss) > 0.5) ? ss + 1 : ss;
}
public BpNet(double[,] p, double[,] t)
{
// 构造函数逻辑
R = new Random();
this.inNum = p.GetLength(1);
this.outNum = t.GetLength(1);
this.hideNum = computeHideNum(inNum, outNum);
// this.hideNum=18;
this.sampleNum = p.GetLength(0); //样本数
Console.WriteLine("输入节点数目: " + inNum);
Console.WriteLine("隐层节点数目:" + hideNum);
Console.WriteLine("输出层节点数目:" + outNum);
Console.ReadLine();
x = new double[inNum];
x1 = new double[hideNum];
x2 = new double[outNum];
o1 = new double[hideNum];
o2 = new double[outNum];
w = new double[inNum, hideNum]; //输入层和隐层之间的连接权值
v = new double[hideNum, outNum]; //隐层和输出层的链接权值
dw = new double[inNum, hideNum];
dv = new double[hideNum, outNum];
sw = new double[inNum, hideNum];
sv = new double[hideNum, outNum];
b1 = new double[hideNum];
b2 = new double[outNum];
db1 = new double[hideNum];
db2 = new double[outNum];
sb1 = new double[hideNum];
sb2 = new double[outNum];
pp = new double[hideNum];
qq = new double[outNum];
yd = new double[outNum];
//初始化w
for (int i = 0; i < inNum; i++)
{
for (int j = 0; j < hideNum; j++)
{
// if (i != 0&&j!=0)
// {
// w[i, j] = 0.7 * (Math.Pow(j, 1 / i)) * R.NextDouble(); //Widrow-Nguyen权值初始化方法
// Console.WriteLine(""+w[i,j]);
// }
// else
// {
// w[i, j] = (R.NextDouble() * 2 - 1.0) / 2;
// }
w[i, j] = (R.NextDouble() * 2 - 1.0) / 2;
}
}
//初始化v
for (int i = 0; i < hideNum; i++)
{
for (int j = 0; j < outNum; j++)
{
v[i, j] = (R.NextDouble() * 2 - 1.0) / 2; //生成-0.5到0.5的随机值
}
}
rate = 0.9;
e = 0.0;
in_rate = 1.0;
}
//训练函数,针对训练样本进行训练,批量方式
public void train1(double[,] p, double[,] t)
{
e = 0.0;
resetWV(dv);
resetWV(dw);
resetWV(db1);
resetWV(db2);
//求p,t中的最大值,得到归一化的比例系数
double pMax = 0.0;
for (int isamp = 0; isamp < sampleNum; isamp++)
{
for (int i = 0; i < inNum; i++)
{
if (Math.Abs(p[isamp, i]) > pMax)
{
pMax = Math.Abs(p[isamp, i]);
}
}
for (int j = 0; j < outNum; j++)
{
if (Math.Abs(t[isamp, j]) > pMax)
{
pMax = Math.Abs(t[isamp, j]);
}
}
in_rate = pMax;
}//end isamp
for (int isamp = 0; isamp < sampleNum; isamp++)
{
//数据归一化
for (int i = 0; i < inNum; i++)
{
x[i] = p[isamp, i] / in_rate;
}
for (int i = 0; i < outNum; i++)
{
yd[i] = t[isamp, i] / in_rate;
}
//计算隐层的输入和输出
for (int j = 0; j < hideNum; j++)
{
o1[j] = 0.0;
for (int i = 0; i < inNum; i++)
{
o1[j] += w[i, j] * x[i];
}
x1[j] = 1.0 / (1.0 + Math.Exp(-o1[j] - b1[j])); //隐层输出
}
//计算输出层的输入和输出
for (int k = 0; k < outNum; k++)
{
o2[k] = 0.0;
for (int j = 0; j < hideNum; j++)
{
o2[k] += v[j, k] * x1[j];
}
x2[k] = 1.0 / (1.0 + Math.Exp(-o2[k] - b2[k])); //网络输出
}
//计算输出层误差和均方差
for (int k = 0; k < outNum; k++)
{
qq[k] = (yd[k] - x2[k]) * x2[k] * (1.0 - x2[k]); //输出层的误差,用于更新V,很重要
//更新dv
for (int j = 0; j < hideNum; j++)
{
dv[j, k] += qq[k] * x1[j];
}
db2[k] += qq[k];
e += (yd[k] - x2[k]) * (yd[k] - x2[k]);
}
//计算隐层误差
for (int j = 0; j < hideNum; j++)
{
pp[j] = 0.0;
for (int k = 0; k < outNum; k++)
{
pp[j] += qq[k] * v[j, k]; //误差的反向传递
}
pp[j] = pp[j] * x1[j] * (1 - x1[j]);
//更新dw
for (int i = 0; i < inNum; i++)
{
dw[i, j] += pp[j] * x[i];
}
db1[j] += pp[j];
}
}//end isamp
adjustWV(v, dv);
adjustWV(w, dw);
adjustWV(b2, db2);
adjustWV(b1, db1);
e = Math.Sqrt(e);
}//end train
//训练函数,针对训练样本进行训练,批量方式,共轭梯度
public void train2(double[,] p, double[,] t)
{
e = 0.0;
double[,] dv0 = new double[hideNum,outNum];
double[,] dw0 = new double[inNum, hideNum];
for (int i = 0; i < hideNum; i++)
{
for (int j = 0; j < outNum; j++)
{
dv0[i, j] = dv[i, j];
}
}
for (int i = 0; i < inNum; i++)
{
for (int j = 0; j < hideNum; j++)
{
dw0[i, j] = dw[i, j];
}
}
double[] db10 = new double[hideNum];
double[] db20 = new double[outNum];
for (int i = 0; i < hideNum; i++)
{
db10[i] = db1[i];
}
for (int i = 0; i < outNum; i++)
{
db20[i] = db2[i];
}
resetWV(dv);
resetWV(dw);
resetWV(db1);
resetWV(db2);
//求p,t中的最大值,得到归一化的比例系数
double pMax = 0.0;
for (int isamp = 0; isamp < sampleNum; isamp++)
{
for (int i = 0; i < inNum; i++)
{
if (Math.Abs(p[isamp, i]) > pMax)
{
pMax = Math.Abs(p[isamp, i]);
}
}
for (int j = 0; j < outNum; j++)
{
if (Math.Abs(t[isamp, j]) > pMax)
{
pMax = Math.Abs(t[isamp, j]);
}
}
in_rate = pMax;
}//end isamp
for (int isamp = 0; isamp < sampleNum; isamp++)
{
//数据归一化
for (int i = 0; i < inNum; i++)
{
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -