⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 mnistdoc.cpp

📁 基于神经网络的手写体识别程序
💻 CPP
📖 第 1 页 / 共 5 页
字号:
// MNistDoc.cpp : implementation of the CMNistDoc class
//

#include "stdafx.h"
#include "MNist.h"

#include "MNistDoc.h"
#include "CntrItem.h"

extern CMNistApp theApp;

UINT CMNistDoc::m_iBackpropThreadIdentifier = 0;  // static member used by threads to identify themselves
UINT CMNistDoc::m_iTestingThreadIdentifier = 0;  

#include "SHLWAPI.H"	// for the path functions
#pragma comment( lib, "shlwapi.lib" )

#ifdef _DEBUG
#define new DEBUG_NEW
#undef THIS_FILE
static char THIS_FILE[] = __FILE__;
#endif

/////////////////////////////////////////////////////////////////////////////
// CMNistDoc

IMPLEMENT_DYNCREATE(CMNistDoc, COleDocument)

BEGIN_MESSAGE_MAP(CMNistDoc, COleDocument)
//{{AFX_MSG_MAP(CMNistDoc)
ON_BN_CLICKED(IDC_BUTTON_OPEN_MNIST_FILES, OnButtonOpenMnistFiles)
ON_BN_CLICKED(IDC_BUTTON_CLOSE_MNIST_FILES, OnButtonCloseMnistFiles)

//}}AFX_MSG_MAP
// Enable default OLE container implementation
ON_UPDATE_COMMAND_UI(ID_EDIT_PASTE, COleDocument::OnUpdatePasteMenu)
ON_UPDATE_COMMAND_UI(ID_EDIT_PASTE_LINK, COleDocument::OnUpdatePasteLinkMenu)
ON_UPDATE_COMMAND_UI(ID_OLE_EDIT_CONVERT, COleDocument::OnUpdateObjectVerbMenu)
ON_COMMAND(ID_OLE_EDIT_CONVERT, COleDocument::OnEditConvert)
ON_UPDATE_COMMAND_UI(ID_OLE_EDIT_LINKS, COleDocument::OnUpdateEditLinksMenu)
ON_COMMAND(ID_OLE_EDIT_LINKS, COleDocument::OnEditLinks)
ON_UPDATE_COMMAND_UI_RANGE(ID_OLE_VERB_FIRST, ID_OLE_VERB_LAST, COleDocument::OnUpdateObjectVerbMenu)
END_MESSAGE_MAP()

/////////////////////////////////////////////////////////////////////////////
// CMNistDoc construction/destruction

CMNistDoc::CMNistDoc()
{
	// Use OLE compound files
	EnableCompoundFile();
	
	// TODO: add one-time construction code here
	
	m_bFilesOpen = FALSE;
	m_bBackpropThreadAbortFlag = FALSE;
	m_bBackpropThreadsAreRunning = FALSE;
	m_cBackprops = 0;
	m_nAfterEveryNBackprops = 1;
	
	m_bTestingThreadsAreRunning = FALSE;
	m_bTestingThreadAbortFlag = FALSE;
	
	m_iNextTestingPattern = 0;
	m_iNextTrainingPattern = 0;
	
	::InitializeCriticalSection( &m_csTrainingPatterns );
	::InitializeCriticalSection( &m_csTestingPatterns );
	
	m_utxNeuralNet = ::CreateMutex( NULL, FALSE, NULL );  // anonymous mutex which is unowned initially
	
	ASSERT( m_utxNeuralNet != NULL );
	
	
	// allocate memory to store the distortion maps
	
	m_cCols = 29;
	m_cRows = 29;
	
	m_cCount = m_cCols * m_cRows;
	
	m_DispH = new double[ m_cCount ];
	m_DispV = new double[ m_cCount ];
	
	
	// create a gaussian kernel, which is constant, for use in generating elastic distortions
	
	int iiMid = GAUSSIAN_FIELD_SIZE/2;  // GAUSSIAN_FIELD_SIZE is strictly odd
	
	double twoSigmaSquared = 2.0 * (::GetPreferences().m_dElasticSigma) * (::GetPreferences().m_dElasticSigma);
	twoSigmaSquared = 1.0 /  twoSigmaSquared;
	double twoPiSigma = 1.0 / (::GetPreferences().m_dElasticSigma) * sqrt( 2.0 * 3.1415926535897932384626433832795 );
	
	for ( int col=0; col<GAUSSIAN_FIELD_SIZE; ++col )
	{
		for ( int row=0; row<GAUSSIAN_FIELD_SIZE; ++row )
		{
			m_GaussianKernel[ row ][ col ] = twoPiSigma * 
				( exp(- ( ((row-iiMid)*(row-iiMid) + (col-iiMid)*(col-iiMid)) * twoSigmaSquared ) ) );
		}
	}
	
	
	
}

CMNistDoc::~CMNistDoc()
{
	if ( m_bFilesOpen != FALSE )
	{
		CloseMnistFiles();
	}
	
	::DeleteCriticalSection( &m_csTrainingPatterns );
	::DeleteCriticalSection( &m_csTestingPatterns );
	
	::CloseHandle( m_utxNeuralNet );
	
	// delete memory of the distortion maps, allocated in constructor
	
	delete[] m_DispH;
	delete[] m_DispV;
	
	
}

//DEL BOOL CMNistDoc::OnOpenDocument(LPCTSTR lpszPathName) 
//DEL {
//DEL 	if (!COleDocument::OnOpenDocument(lpszPathName))
//DEL 		return FALSE;
//DEL 
//DEL 	
//DEL 	// TODO: Add your specialized creation code here
//DEL 	
//DEL 	return TRUE;
//DEL }

void CMNistDoc::DeleteContents() 
{
	// TODO: Add your specialized code here and/or call the base class
	
	COleDocument::DeleteContents();
	
	m_NN.Initialize();
	
}




BOOL CMNistDoc::OnNewDocument()
{
	if (!COleDocument::OnNewDocument())
		return FALSE;
	
	// TODO: add reinitialization code here
	// (SDI documents will reuse this document)
	
	// grab the mutex for the neural network
	
	CAutoMutex tlo( m_utxNeuralNet );
	
	// initialize and build the neural net
	
	NeuralNetwork& NN = m_NN;  // for easier nomenclature
	NN.Initialize();
	
	NNLayer* pLayer;
	
	int ii, jj, kk;
	int icNeurons = 0;
	int icWeights = 0;
	double initWeight;
	CString label;
	
	// layer zero, the input layer.
	// Create neurons: exactly the same number of neurons as the input
	// vector of 29x29=841 pixels, and no weights/connections
	
	pLayer = new NNLayer( _T("Layer00") );
	NN.m_Layers.push_back( pLayer );
	
	for ( ii=0; ii<841; ++ii )
	{
		label.Format( _T("Layer00_Neuron%04d_Num%06d"), ii, icNeurons );
		pLayer->m_Neurons.push_back( new NNNeuron( (LPCTSTR)label ) );
		icNeurons++;
	}
	
#define UNIFORM_PLUS_MINUS_ONE ( (double)(2.0 * rand())/RAND_MAX - 1.0 )
	
	// layer one:
	// This layer is a convolutional layer that has 6 feature maps.  Each feature 
	// map is 13x13, and each unit in the feature maps is a 5x5 convolutional kernel
	// of the input layer.
	// So, there are 13x13x6 = 1014 neurons, (5x5+1)x6 = 156 weights
	
	pLayer = new NNLayer( _T("Layer01"), pLayer );
	NN.m_Layers.push_back( pLayer );
	
	for ( ii=0; ii<1014; ++ii )
	{
		label.Format( _T("Layer01_Neuron%04d_Num%06d"), ii, icNeurons );
		pLayer->m_Neurons.push_back( new NNNeuron( (LPCTSTR)label ) );
		icNeurons++;
	}
	
	for ( ii=0; ii<156; ++ii )
	{
		label.Format( _T("Layer01_Weight%04d_Num%06d"), ii, icWeights );
		initWeight = 0.05 * UNIFORM_PLUS_MINUS_ONE;
		pLayer->m_Weights.push_back( new NNWeight( (LPCTSTR)label, initWeight ) );
	}
	
	// interconnections with previous layer: this is difficult
	// The previous layer is a top-down bitmap image that has been padded to size 29x29
	// Each neuron in this layer is connected to a 5x5 kernel in its feature map, which 
	// is also a top-down bitmap of size 13x13.  We move the kernel by TWO pixels, i.e., we
	// skip every other pixel in the input image
	
	int kernelTemplate[25] = {
		0,  1,  2,  3,  4,
		29, 30, 31, 32, 33,
		58, 59, 60, 61, 62,
		87, 88, 89, 90, 91,
		116,117,118,119,120 };
		
	int iNumWeight;
		
	int fm;
		
	for ( fm=0; fm<6; ++fm)
	{
		for ( ii=0; ii<13; ++ii )
		{
			for ( jj=0; jj<13; ++jj )
			{
				iNumWeight = fm * 26;  // 26 is the number of weights per feature map
				NNNeuron& n = *( pLayer->m_Neurons[ jj + ii*13 + fm*169 ] );
				
				n.AddConnection( ULONG_MAX, iNumWeight++ );  // bias weight
				
				for ( kk=0; kk<25; ++kk )
				{
					// note: max val of index == 840, corresponding to 841 neurons in prev layer
					n.AddConnection( 2*jj + 58*ii + kernelTemplate[kk], iNumWeight++ );
				}
			}
		}
	}
	
	
	// layer two:
	// This layer is a convolutional layer that has 50 feature maps.  Each feature 
	// map is 5x5, and each unit in the feature maps is a 5x5 convolutional kernel
	// of corresponding areas of all 6 of the previous layers, each of which is a 13x13 feature map
	// So, there are 5x5x50 = 1250 neurons, (5x5+1)x6x50 = 7800 weights
	
	pLayer = new NNLayer( _T("Layer02"), pLayer );
	NN.m_Layers.push_back( pLayer );
	
	for ( ii=0; ii<1250; ++ii )
	{
		label.Format( _T("Layer02_Neuron%04d_Num%06d"), ii, icNeurons );
		pLayer->m_Neurons.push_back( new NNNeuron( (LPCTSTR)label ) );
		icNeurons++;
	}
	
	for ( ii=0; ii<7800; ++ii )
	{
		label.Format( _T("Layer02_Weight%04d_Num%06d"), ii, icWeights );
		initWeight = 0.05 * UNIFORM_PLUS_MINUS_ONE;
		pLayer->m_Weights.push_back( new NNWeight( (LPCTSTR)label, initWeight ) );
	}
	
	// Interconnections with previous layer: this is difficult
	// Each feature map in the previous layer is a top-down bitmap image whose size
	// is 13x13, and there are 6 such feature maps.  Each neuron in one 5x5 feature map of this 
	// layer is connected to a 5x5 kernel positioned correspondingly in all 6 parent
	// feature maps, and there are individual weights for the six different 5x5 kernels.  As
	// before, we move the kernel by TWO pixels, i.e., we
	// skip every other pixel in the input image.  The result is 50 different 5x5 top-down bitmap
	// feature maps
	
	int kernelTemplate2[25] = {
		0,  1,  2,  3,  4,
		13, 14, 15, 16, 17, 
		26, 27, 28, 29, 30,
		39, 40, 41, 42, 43, 
		52, 53, 54, 55, 56   };
		
		
	for ( fm=0; fm<50; ++fm)
	{
		for ( ii=0; ii<5; ++ii )
		{
			for ( jj=0; jj<5; ++jj )
			{
				iNumWeight = fm * 26;  // 26 is the number of weights per feature map
				NNNeuron& n = *( pLayer->m_Neurons[ jj + ii*5 + fm*25 ] );
				
				n.AddConnection( ULONG_MAX, iNumWeight++ );  // bias weight
				
				for ( kk=0; kk<25; ++kk )
				{
					// note: max val of index == 1013, corresponding to 1014 neurons in prev layer
					n.AddConnection(       2*jj + 26*ii + kernelTemplate2[kk], iNumWeight++ );
					n.AddConnection( 169 + 2*jj + 26*ii + kernelTemplate2[kk], iNumWeight++ );
					n.AddConnection( 338 + 2*jj + 26*ii + kernelTemplate2[kk], iNumWeight++ );
					n.AddConnection( 507 + 2*jj + 26*ii + kernelTemplate2[kk], iNumWeight++ );
					n.AddConnection( 676 + 2*jj + 26*ii + kernelTemplate2[kk], iNumWeight++ );
					n.AddConnection( 845 + 2*jj + 26*ii + kernelTemplate2[kk], iNumWeight++ );
				}
			}
		}
	}
			
	
	// layer three:
	// This layer is a fully-connected layer with 100 units.  Since it is fully-connected,
	// each of the 100 neurons in the layer is connected to all 1250 neurons in
	// the previous layer.
	// So, there are 100 neurons and 100*(1250+1)=125100 weights
	
	pLayer = new NNLayer( _T("Layer03"), pLayer );
	NN.m_Layers.push_back( pLayer );
	
	for ( ii=0; ii<100; ++ii )
	{
		label.Format( _T("Layer03_Neuron%04d_Num%06d"), ii, icNeurons );
		pLayer->m_Neurons.push_back( new NNNeuron( (LPCTSTR)label ) );
		icNeurons++;
	}
	
	for ( ii=0; ii<125100; ++ii )
	{
		label.Format( _T("Layer03_Weight%04d_Num%06d"), ii, icWeights );
		initWeight = 0.05 * UNIFORM_PLUS_MINUS_ONE;
		pLayer->m_Weights.push_back( new NNWeight( (LPCTSTR)label, initWeight ) );
	}
	
	// Interconnections with previous layer: fully-connected
	
	iNumWeight = 0;  // weights are not shared in this layer
	
	for ( fm=0; fm<100; ++fm )
	{
		NNNeuron& n = *( pLayer->m_Neurons[ fm ] );
		n.AddConnection( ULONG_MAX, iNumWeight++ );  // bias weight
		
		for ( ii=0; ii<1250; ++ii )
		{
			n.AddConnection( ii, iNumWeight++ );
		}
	}
	
			
			
	// layer four, the final (output) layer:
	// This layer is a fully-connected layer with 10 units.  Since it is fully-connected,
	// each of the 10 neurons in the layer is connected to all 100 neurons in
	// the previous layer.
	// So, there are 10 neurons and 10*(100+1)=1010 weights
	
	pLayer = new NNLayer( _T("Layer04"), pLayer );
	NN.m_Layers.push_back( pLayer );
	
	for ( ii=0; ii<10; ++ii )
	{
		label.Format( _T("Layer04_Neuron%04d_Num%06d"), ii, icNeurons );
		pLayer->m_Neurons.push_back( new NNNeuron( (LPCTSTR)label ) );
		icNeurons++;
	}
	
	for ( ii=0; ii<1010; ++ii )
	{
		label.Format( _T("Layer04_Weight%04d_Num%06d"), ii, icWeights );
		initWeight = 0.05 * UNIFORM_PLUS_MINUS_ONE;
		pLayer->m_Weights.push_back( new NNWeight( (LPCTSTR)label, initWeight ) );
	}
	
	// Interconnections with previous layer: fully-connected
	
	iNumWeight = 0;  // weights are not shared in this layer
	
	for ( fm=0; fm<10; ++fm )
	{
		NNNeuron& n = *( pLayer->m_Neurons[ fm ] );
		n.AddConnection( ULONG_MAX, iNumWeight++ );  // bias weight
		
		for ( ii=0; ii<100; ++ii )
		{
			n.AddConnection( ii, iNumWeight++ );
		}
	}
	
	
	SetModifiedFlag( TRUE );
	
	return TRUE;
}



/////////////////////////////////////////////////////////////////////////////
// CMNistDoc serialization

void CMNistDoc::Serialize(CArchive& ar)
{
	if (ar.IsStoring())
	{
		// TODO: add storing code here
		
	}
	else
	{
		// TODO: add loading code here
		
	}
	
	{
		// grab the mutex for the neural network in a local scope, and then serialize
		
		CAutoMutex tlo( m_utxNeuralNet );
		
		m_NN.Serialize( ar );
		
	}
	
	// Calling the base class COleDocument enables serialization
	//  of the container document's COleClientItem objects.
	COleDocument::Serialize(ar);
}

/////////////////////////////////////////////////////////////////////////////
// CMNistDoc diagnostics

#ifdef _DEBUG
void CMNistDoc::AssertValid() const
{
	COleDocument::AssertValid();
}

void CMNistDoc::Dump(CDumpContext& dc) const
{
	COleDocument::Dump(dc);

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -