⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 gneuralnet.cpp

📁 一个非常有用的开源代码
💻 CPP
📖 第 1 页 / 共 2 页
字号:
	GAssert(nInternalIndex == m_pInternalRelation->GetAttributeCount(), "error");}void GNeuralNet::OutputsToExternal(double* pInternal, double* pExternal){	GAssert(m_pMinAndRanges, "min and ranges not calculated yet");	GArffAttribute* pAttr;	int nValueCount;	int nOutputCount = m_pRelation->GetOutputCount();	int nInternalIndex = m_pInternalRelation->GetOutputIndex(0);	int n, i, nExternalIndex;	double dVal, dHighestVal;	for(n = 0; n < nOutputCount; n++)	{		nExternalIndex = m_pRelation->GetOutputIndex(n);		pAttr = m_pRelation->GetAttribute(nExternalIndex);		if(pAttr->IsContinuous())			pExternal[nExternalIndex] = GArffData::Normalize(pInternal[nInternalIndex++], OUTPUT_MIN, OUTPUT_RANGE, m_pMinAndRanges[nExternalIndex + nExternalIndex], m_pMinAndRanges[nExternalIndex + nExternalIndex + 1]);		else		{			nValueCount = pAttr->GetValueCount();			if(nValueCount <= 2)				pExternal[nExternalIndex] = (pInternal[nInternalIndex++] >= OUTPUT_MIDDLE ? 1 : 0);			else			{				pExternal[nExternalIndex] = 0;				dHighestVal = pInternal[nInternalIndex++];				for(i = 1; i < nValueCount; i++)				{					dVal = pInternal[nInternalIndex++];					if(dVal > dHighestVal)					{						pExternal[nExternalIndex] = i;						dHighestVal = dVal;					}				}			}		}	}	GAssert(nInternalIndex == m_pInternalRelation->GetAttributeCount(), "error");}void GNeuralNet::AddLayer(int nNodes){	int nPrevLayerStart = m_nLayerStart;	int nPrevLayerSize = m_nLayerSize;	m_nLayerStart = m_pNeurons->GetSize();	m_nLayerSize = nNodes;	int n, i;	for(n = 0; n < nNodes; n++)	{		GStandardNeuron* pNewNeuron = new GStandardNeuron();		m_pNeurons->AddPointer(pNewNeuron);		for(i = 0; i < nPrevLayerSize; i++)		{			GNeuron* pOldNeuron = (GNeuron*)m_pNeurons->GetPointer(nPrevLayerStart + i);			pOldNeuron->AddInput(pNewNeuron);		}	}}int GNeuralNet::GetWeightCount(){	if(m_nWeightCount == 0)	{		int n;		int nCount = m_pNeurons->GetSize();		GNeuron* pNeuron;		for(n = 0; n < nCount; n++)		{			pNeuron = (GNeuron*)m_pNeurons->GetPointer(n);			m_nWeightCount += pNeuron->SerializeWeights(NULL);		}	}	return m_nWeightCount;}void GNeuralNet::GetWeights(double* pOutWeights){	// Serialize the weights	int nCount = m_pNeurons->GetSize();	int nPos = 0;	int n;	GNeuron* pNeuron;	for(n = 0; n < nCount; n++)	{		pNeuron = (GNeuron*)m_pNeurons->GetPointer(n);		nPos += pNeuron->SerializeWeights(&pOutWeights[nPos]);	}	GAssert(nPos == m_nWeightCount, "serialization size inconsistent");}void GNeuralNet::SetWeights(double* pWeights){	int n;	int nCount = m_pNeurons->GetSize();	GNeuron* pNeuron;	int nPos = 0;	for(n = 0; n < nCount; n++)	{		pNeuron = (GNeuron*)m_pNeurons->GetPointer(n);		nPos += pNeuron->DeserializeWeights(&pWeights[nPos]);	}	GAssert(nPos == m_nWeightCount, "serialization size inconsistent");}void GNeuralNet::UpdateBestWeights(){	if(!m_pBestSet)		m_pBestSet = new double[GetWeightCount()];	GetWeights(m_pBestSet);}void GNeuralNet::RestoreBestWeights(){	SetWeights(m_pBestSet);}void GNeuralNet::EvalInternal(double* pRow){	// Clear the outputs of all non-input neurons	GNeuron* pNeuron;	int n;	for(n = 0; n < m_nInputStart; n++)	{		pNeuron = (GNeuron*)m_pNeurons->GetPointer(n);		pNeuron->SetOutput(1e50);	}	// Copy inputs into input neurons	int nInputs = m_pInternalRelation->GetInputCount();	int nCount = m_nInputStart + nInputs;	GAssert(nCount == m_pNeurons->GetSize(), "neurons added after input neurons?");	int nInput = 0;	for( ; n < nCount; n++)	{		pNeuron = (GNeuron*)m_pNeurons->GetPointer(n);		pNeuron->SetOutput(pRow[nInput++]);	}	// Pull the evaluation downstream to the output nodes	int nOutputs = m_pInternalRelation->GetOutputCount();	for(n = 0; n < nOutputs; n++)	{		pNeuron = (GNeuron*)m_pNeurons->GetPointer(n);		pNeuron->PullEvalDownStream();	}}void GNeuralNet::Eval(double* pRow){	// Convert to internal data	double* pInternalRow = (double*)alloca(sizeof(double) * m_pInternalRelation->GetAttributeCount());	InputsToInternal(pRow, pInternalRow);	// Do the evaluation	EvalInternal(pInternalRow);	// Extract the output values from the output nodes	GNeuron* pNeuron;	int n;	int nOutputs = m_pInternalRelation->GetOutputCount();	int nIndex = m_pInternalRelation->GetOutputIndex(0);	for(n = 0; n < nOutputs; n++)	{		pNeuron = (GNeuron*)m_pNeurons->GetPointer(n);		pInternalRow[nIndex++] = pNeuron->GetOutput();	}	// Convert outputs to external data	OutputsToExternal(pInternalRow, pRow);}void GNeuralNet::Criticize(double* pModel){	// Calculate the error on all output nodes	GNeuron* pNeuron = NULL;	int n;	double dOutput;	int nOutputs = m_pInternalRelation->GetOutputCount();	int nIndex = m_pInternalRelation->GetOutputIndex(0);	for(n = 0; n < nOutputs; n++)	{		pNeuron = (GNeuron*)m_pNeurons->GetPointer(n);		dOutput = pNeuron->GetOutput();		pNeuron->SetError((pModel[nIndex++] - dOutput) * dOutput * (1.0 - dOutput));	}	// Clear the error on the rest of the nodes	for( ; n <= m_nInputStart; n++)	{		pNeuron = (GNeuron*)m_pNeurons->GetPointer(n);		pNeuron->SetError(1e50);	}	// Backpropagate the error (we only need to call PullErrorBackUpStream	// on one input neuron because any input is connected to all the nodes	// in the next layer and we don't need the error value for the inputs	pNeuron->PullErrorBackUpStream();}void GNeuralNet::MeasureMinAndRanges(GArffData* pTrainingData){	int nAttrCount = m_pRelation->GetAttributeCount();	delete(m_pMinAndRanges);	m_pMinAndRanges = new double[2 * nAttrCount];	GArffAttribute* pAttr;	int n;	for(n = 0; n < nAttrCount; n++)	{		pAttr = m_pRelation->GetAttribute(n);		if(pAttr->IsContinuous())		{			pTrainingData->GetMinAndRange(n, &m_pMinAndRanges[2 * n], &m_pMinAndRanges[2 * n + 1]);			if(m_pMinAndRanges[2 * n + 1] < .00001)				m_pMinAndRanges[2 * n + 1] = .00001;		}		else		{			m_pMinAndRanges[2 * n] = 0;			m_pMinAndRanges[2 * n + 1] = 0;		}	}}void GNeuralNet::ExternalToInternalData(GArffData* pExternal, GArffData* pInternal){	double* pExternalRow;	double* pInternalRow;	int n;	int nInternalAttributeCount = m_pInternalRelation->GetAttributeCount();	int nRowCount = pExternal->GetSize();	for(n = 0; n < nRowCount; n++)	{		pExternalRow = pExternal->GetVector(n);		pInternalRow = new double[nInternalAttributeCount];		InputsToInternal(pExternalRow, pInternalRow);		OutputsToInternal(pExternalRow, pInternalRow);		pInternal->AddVector(pInternalRow);	}}double GNeuralNet::TrainValidate(){	int n, i, nIndex;	GNeuron* pNeuron;	double* pRow;	double d;	double dError = 0;	int nCount = m_pValidationDataInternal->GetSize();	int nOutputs = m_pInternalRelation->GetOutputCount();	for(n = 0; n < nCount; n++)	{		pRow = m_pValidationDataInternal->GetVector(n);		EvalInternal(pRow);		nIndex = m_pInternalRelation->GetOutputIndex(0);		for(i = 0; i < nOutputs; i++)		{			pNeuron = (GNeuron*)m_pNeurons->GetPointer(i);			d = pRow[nIndex++] - pNeuron->GetOutput();			d *= d;			dError += d;		}	}	dError /= (nCount * nOutputs);	return dError;}void GNeuralNet::PrintNeurons(){	printf("-----------------\n");	GNeuron* pNeuron;	int n;	for(n = 0; n < m_nInputStart; n++)	{		pNeuron = (GNeuron*)m_pNeurons->GetPointer(n);		printf("Neuron %d\n", n);		pNeuron->Print();	}	printf("-----------------\n");}void GNeuralNet::Train(GArffData* pData){	int nTrainRows = (int)(m_dTrainingPortion * pData->GetSize());	GArffData* pValidateData = pData->SplitBySize(nTrainRows);	Train(pData, pValidateData);}int GNeuralNet::Train(GArffData* pTrainingData, GArffData* pValidationData){	TrainInit(pTrainingData, pValidationData);	// Do the epochs	int nEpochs;	double dBestError = 1e20;	int nEpochsSinceValidationCheck = 0;	int nBestEpoch = 0;	for(nEpochs = 0; true; nEpochs++)	{		TrainEpoch();		// Check for termination condition		nEpochsSinceValidationCheck++;		if(nEpochsSinceValidationCheck >= m_nEpochsPerValidationCheck)		{			nEpochsSinceValidationCheck = 0;			double dMeanSquareError = TrainValidate();//printf("Epoch: %d\tError=%lf\n", nEpochs, dBestError);			if(dMeanSquareError < dBestError)			{				// Found a new best set of weights				dBestError = dMeanSquareError;				nBestEpoch = nEpochs;				UpdateBestWeights();				if(dMeanSquareError <= m_dAcceptableMeanSquareError)					break;			}			else			{				// Test for termination condition				if(nEpochs - nBestEpoch >= m_nRunEpochs)					break;			}			if(nEpochs >= m_nMaximumEpochs)				break;		}	}	if(dBestError < 1e20)		RestoreBestWeights();	else	{		GAssert(false, "Total failure!");	}	ReleaseInternalData();	return nEpochs;}void GNeuralNet::TrainInit(GArffData* pTrainingData, GArffData* pValidationData){	GAssert(m_nRunEpochs <= m_nMaximumEpochs, "conflicting settings");	// Add the input layer	MakeInputLayer();	// Make the internal data	MeasureMinAndRanges(pTrainingData);	ReleaseInternalData();	m_pTrainingDataInternal = new GArffData(pTrainingData->GetSize());	ExternalToInternalData(pTrainingData, m_pTrainingDataInternal);	if(pTrainingData == pValidationData)		m_pValidationDataInternal = m_pTrainingDataInternal;	else	{		m_pValidationDataInternal = new GArffData(pValidationData->GetSize());		ExternalToInternalData(pValidationData, m_pValidationDataInternal);	}}void GNeuralNet::TrainEpoch(){	// Do a single epoch	double* pRow;	GNeuron* pNeuron;	int n, i;	int nRowCount = m_pTrainingDataInternal->GetSize();	for(n = 0; n < nRowCount; n++)	{		// Compute output for this row and update the weights		pRow = m_pTrainingDataInternal->GetVector(n);		EvalInternal(pRow);		// Backpropagate the error		Criticize(pRow);		// Ajust the weights in a gradient descent manner		for(i = 0; i < m_nInputStart; i++)		{			pNeuron = (GNeuron*)m_pNeurons->GetPointer(i);			pNeuron->AjustWeights(m_dLearningRate, m_dMomentum);		}	}	m_dLearningRate *= m_dLearningDecay;	m_pTrainingDataInternal->Shuffle();}int GNeuralNet::TrainBatch(GArffData* pTrainingData, GArffData* pValidationData){	TrainInit(pTrainingData, pValidationData);	// Do the epochs	double* pRow;	GNeuron* pNeuron;	int n, i, nEpochs;	double dBestError = 1e20;	int nRowCount = m_pTrainingDataInternal->GetSize();	int nEpochsSinceValidationCheck = 0;	int nBestEpoch = 0;	for(nEpochs = 0; true; nEpochs++)	{		// Train with each of the training examples (one epoch)		for(n = 0; n < nRowCount; n++)		{			// Compute output for this row and update the weights			pRow = m_pTrainingDataInternal->GetVector(n);			EvalInternal(pRow);			// Backpropagate the error			Criticize(pRow);			// Ajust the weight delta in a gradient descent manner			for(i = 0; i < m_nInputStart; i++)			{				pNeuron = (GNeuron*)m_pNeurons->GetPointer(i);				pNeuron->BatchUpdateDeltas(m_dLearningRate);			}		}		// Ajust the weights by the sum weight delta		for(i = 0; i < m_nInputStart; i++)		{			pNeuron = (GNeuron*)m_pNeurons->GetPointer(i);			pNeuron->BatchUpdateWeights();		}		m_dLearningRate *= m_dLearningDecay;		// Check for termination condition		nEpochsSinceValidationCheck++;		if(nEpochsSinceValidationCheck >= m_nEpochsPerValidationCheck)		{			nEpochsSinceValidationCheck = 0;			double dMeanSquareError = TrainValidate();//printf("Epoch: %d\tError=%lf\n", nEpochs, dBestError);			if(dMeanSquareError < dBestError)			{				// Found a new best set of weights				dBestError = dMeanSquareError;				nBestEpoch = nEpochs;				UpdateBestWeights();				if(dMeanSquareError <= m_dAcceptableMeanSquareError)					break;			}			else			{				// Test for termination condition				if(nEpochs - nBestEpoch >= m_nRunEpochs)					break;			}			if(nEpochs >= m_nMaximumEpochs)				break;		}	}	if(dBestError < 1e20)		RestoreBestWeights();	else	{		GAssert(false, "Total failure!");	}	ReleaseInternalData();	return nEpochs;}

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -