⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 dhmm_hq.cpp

📁 语音识别配套的VQ及DHMM模型训练程序(C语言)
💻 CPP
📖 第 1 页 / 共 2 页
字号:
//	DHMM_HQ.cpp:
//		Implementation of the DHMM_HQ Module.
//		That is the C rewritten
//			of previous Matlab DHMM program by HeQiang.
//
//	Created 2001/08, By DongMing, MDSR.
//
//////////////////////////////////////////////////////////////////////

#include "stdafx.h"
#include "DHMM_HQ.h"
#include "kwspot.h"
#include <math.h>

#include "DHMM_GL.h"
extern PRO_CONFIG u_Pro_Config;

#define VQ_TRAIN_CODE_BOOK_LOOP_TIME		40		//	设置VQ中K-mean的最大迭代次数
													//	The best loop: 100, better than 80
#define MODEL_TRAIN_CODE_BOOK_LOOP_TIME		32		//	设置模型训练时的最大迭代次数
													//	20, 40 are better

//////////////////////////////////////////////////////////////////////
//	Private Function Head
int DHMM_Model_Calculate_Gamma(WORD_SAMPLE * pu_Word_Sample, DHMM_MODEL * pu_DHMM_Model);

//////////////////////////////////////////////////////////////////////
//	API functions
int DHMM_VQ_Train_Code_Book_HQ(DYNA_2DIM_DOUBLE_ARRAY d2dda_Code_Word, int n_Code_Word_Num, int n_Code_Word_Dim,
							   DYNA_2DIM_DOUBLE_ARRAY d2dda_Initial_Code_Book, DYNA_2DIM_DOUBLE_ARRAY d2dda_Code_Book, int n_Code_Book_Size)
{
	int * pn_Initial_Code_Book_Index;
	int * pn_Code_Word_Min_Index;			//	存储每个特征的最近类中心
	double * pd_Code_Word_Min_Distance;		//	存储每个特征到最近类中心的距离
	int n_Loop_Index, n_Feature_Index, n_Code_Word_Index, n_Dim_Index;
	double dTmp;
	double d_Old_Distance, d_Distance;		//	上次与此次的类内平均距离
	int nTmp;
	BOOL boolTmp;

	PRO_LOG("\tVQ = HQ, loop = %4d.\n", VQ_TRAIN_CODE_BOOK_LOOP_TIME);

	//	若设置初始码本,拷贝之
	if (d2dda_Initial_Code_Book != NULL)
	{
		for (n_Code_Word_Index = 0; n_Code_Word_Index < n_Code_Book_Size; n_Code_Word_Index++)
		{
			memcpy(d2dda_Code_Book[n_Code_Word_Index], d2dda_Initial_Code_Book[n_Code_Word_Index], sizeof(double) * n_Code_Word_Dim);
		}
	}
	//	若没有设置初始码本,则随机生成
	else
	{
		pn_Initial_Code_Book_Index = new int[n_Code_Book_Size];
		ASSERT(pn_Initial_Code_Book_Index);

		for (n_Code_Word_Index = 0; n_Code_Word_Index < n_Code_Book_Size; n_Code_Word_Index++)
		{
			do {
				pn_Initial_Code_Book_Index[n_Code_Word_Index] = int((double(rand()) / (double(RAND_MAX) + 1)) * n_Code_Word_Num);
				boolTmp = FALSE;
				for (nTmp = 0; nTmp < n_Code_Word_Index; nTmp++)
				{
					if (pn_Initial_Code_Book_Index[n_Code_Word_Index] == pn_Initial_Code_Book_Index[nTmp]) boolTmp = TRUE;
				}
			} while (boolTmp);
		}

		for (n_Code_Word_Index = 0; n_Code_Word_Index < n_Code_Book_Size; n_Code_Word_Index++)
		{
			memcpy(d2dda_Code_Book[n_Code_Word_Index], d2dda_Code_Word[pn_Initial_Code_Book_Index[n_Code_Word_Index]], sizeof(double) * n_Code_Word_Dim);
		}

		delete[] pn_Initial_Code_Book_Index;
	}

	pn_Code_Word_Min_Index = new int[n_Code_Word_Num];
	pd_Code_Word_Min_Distance = new double[n_Code_Word_Num];
	ASSERT((pn_Code_Word_Min_Index != NULL) && (pd_Code_Word_Min_Distance != NULL));

	d_Distance = MAX_DOUBLE_VALUE;
	for (n_Loop_Index = 0; n_Loop_Index < VQ_TRAIN_CODE_BOOK_LOOP_TIME; n_Loop_Index++)
	{
		DEBUG_PRINTF("VQ:\tLoop = %4d of %d.\n", n_Loop_Index, VQ_TRAIN_CODE_BOOK_LOOP_TIME);

		//	计算每个特征的最近类中心及距离
		for (n_Feature_Index = 0; n_Feature_Index < n_Code_Word_Num; n_Feature_Index++)
		{
			pn_Code_Word_Min_Index[n_Feature_Index] = -1;
			pd_Code_Word_Min_Distance[n_Feature_Index] = MAX_DOUBLE_VALUE;

			for (n_Code_Word_Index = 0; n_Code_Word_Index < n_Code_Book_Size; n_Code_Word_Index++)
			{
				dTmp = 0.0F;
				for (n_Dim_Index = 0; n_Dim_Index < n_Code_Word_Dim; n_Dim_Index++)
				{
					dTmp += (d2dda_Code_Word[n_Feature_Index][n_Dim_Index] - d2dda_Code_Book[n_Code_Word_Index][n_Dim_Index])
						* (d2dda_Code_Word[n_Feature_Index][n_Dim_Index] - d2dda_Code_Book[n_Code_Word_Index][n_Dim_Index]);
				}

				if (dTmp < pd_Code_Word_Min_Distance[n_Feature_Index])
				{
					pn_Code_Word_Min_Index[n_Feature_Index] = n_Code_Word_Index;
					pd_Code_Word_Min_Distance[n_Feature_Index] = dTmp;
				}
			}
		}

		//	计算类内平均距离
		d_Old_Distance = d_Distance;
		d_Distance = 0.0F;
		for (n_Feature_Index = 0; n_Feature_Index < n_Code_Word_Num; n_Feature_Index++)
		{
			d_Distance += pd_Code_Word_Min_Distance[n_Feature_Index];
		}
		d_Distance /= n_Code_Word_Num;

		DEBUG_PRINTF("\tAverage Distance = %10.4f, Changed %15.4E.\n", d_Distance, ((d_Old_Distance - d_Distance) / d_Old_Distance));

		//	按最近距离原则给每类分派样点,并用该类样点的平均值更新类中心
		for (n_Code_Word_Index = 0; n_Code_Word_Index < n_Code_Book_Size; n_Code_Word_Index++)
		{
			nTmp = 0;
			for (n_Dim_Index = 0; n_Dim_Index < n_Code_Word_Dim; n_Dim_Index++)
			{
				d2dda_Code_Book[n_Code_Word_Index][n_Dim_Index] = 0.0F;
			}
			for (n_Feature_Index = 0; n_Feature_Index < n_Code_Word_Num; n_Feature_Index++)
			{
				if (pn_Code_Word_Min_Index[n_Feature_Index] == n_Code_Word_Index)
				{
					nTmp++;
					for (n_Dim_Index = 0; n_Dim_Index < n_Code_Word_Dim; n_Dim_Index++)
					{
						d2dda_Code_Book[n_Code_Word_Index][n_Dim_Index] += d2dda_Code_Word[n_Feature_Index][n_Dim_Index];
					}
				}
			}

			//	该类分到了样点,直接计算
			if (nTmp != 0)
			{
				for (n_Dim_Index = 0; n_Dim_Index < n_Code_Word_Dim; n_Dim_Index++)
				{
					d2dda_Code_Book[n_Code_Word_Index][n_Dim_Index] /= nTmp;
				}
			}
			//	该类没有分到样点,则丢弃原类中心,随机选中一个样点作为该类中心
			else
			{
				do {
					nTmp = int((double(rand()) / (double(RAND_MAX) + 1)) * n_Code_Word_Num);
					boolTmp = FALSE;
					if (pd_Code_Word_Min_Distance[nTmp] < EPSILON_DOUBLE_VALUE) boolTmp = TRUE;
				} while (boolTmp);

				memcpy(d2dda_Code_Book[n_Code_Word_Index], d2dda_Code_Word[nTmp], sizeof(double) * n_Code_Word_Dim);
			}
		}

	}
	PRO_LOG("VQ:\tLoop = %4d, Average Distance = %10.4f, Changed %15.4E.\n", n_Loop_Index, d_Distance, ((d_Old_Distance - d_Distance) / d_Old_Distance));

	delete[] pn_Code_Word_Min_Index;
	delete[] pd_Code_Word_Min_Distance;

	return 0;
}

int DHMM_Model_Train_DHMM_Model_HQ(WORD_SAMPLE * pu_Word_Sample, int n_Word_Sample_Num,
								   DHMM_MODEL * pu_DHMM_Model)
{
	DYNA_2DIM_DOUBLE_ARRAY d2dda_Nom;	//	状态数 * 输出个数
	double * pd_Denom;					//	状态数
	int n_Loop_Index, n_State_Index, n_Code_Word_Index, n_Word_Sample_Index, n_Frame_Index;
	int nTmp;
	double d_Old_Total_Likelihood, d_Total_Likelihood;	//	上次与此次迭代的模型对所有词条匹配分数的加和
	double dTmp;

	PRO_LOG("\tModel = HQ, loop = %4d.\n", MODEL_TRAIN_CODE_BOOK_LOOP_TIME);

	//	初始化Pi,初始状态为状态0
	for (n_State_Index = 0; n_State_Index < pu_DHMM_Model->n_State_Num; n_State_Index++)
	{
		pu_DHMM_Model->pdPi[n_State_Index] = 0.0F;
	}
	pu_DHMM_Model->pdPi[0] = 1.0F;

	//	初始化A,只能停留或跳向下一个状态,并且等概0.5,最后一个状态只能停留
	for (n_State_Index = 0; n_State_Index < pu_DHMM_Model->n_State_Num; n_State_Index++)
	{
		for (nTmp = 0; nTmp < pu_DHMM_Model->n_State_Num; nTmp++)
		{
			pu_DHMM_Model->d2dda_A[n_State_Index][nTmp] = 0.0F;
		}
	}
	for (n_State_Index = 0; n_State_Index < (pu_DHMM_Model->n_State_Num - 1); n_State_Index++)
	{
			pu_DHMM_Model->d2dda_A[n_State_Index][n_State_Index] = 0.5F;
			pu_DHMM_Model->d2dda_A[n_State_Index][n_State_Index + 1] = 0.5F;
	}
	pu_DHMM_Model->d2dda_A[pu_DHMM_Model->n_State_Num - 1][pu_DHMM_Model->n_State_Num - 1] = 1.0F;

	//	初始化B,均匀初始化,每个状态输出各个输出码字的概率均等,为码字总数的倒数
	for (n_State_Index = 0; n_State_Index < pu_DHMM_Model->n_State_Num; n_State_Index++)
	{
		for (n_Code_Word_Index = 0; n_Code_Word_Index < pu_DHMM_Model->n_Code_Book_Size; n_Code_Word_Index++)
		{
			pu_DHMM_Model->d2dda_B[n_State_Index][n_Code_Word_Index] = 1.0F / pu_DHMM_Model->n_Code_Book_Size;
		}
	}

	d2dda_Nom = d2dda_New(pu_DHMM_Model->n_State_Num, pu_DHMM_Model->n_Code_Book_Size);
	pd_Denom = new double[pu_DHMM_Model->n_State_Num];
	ASSERT((d2dda_Nom != NULL) && (pd_Denom != NULL));
	for (n_Word_Sample_Index = 0; n_Word_Sample_Index < n_Word_Sample_Num; n_Word_Sample_Index++)
	{
		pu_Word_Sample[n_Word_Sample_Index].d2dda_Gamma = d2dda_New(pu_Word_Sample[n_Word_Sample_Index].n_Feature_Sequence_Len, pu_DHMM_Model->n_State_Num);
		ASSERT(pu_Word_Sample[n_Word_Sample_Index].d2dda_Gamma != NULL);
	}

	d_Total_Likelihood = -MAX_DOUBLE_VALUE;
	for (n_Loop_Index = 0; n_Loop_Index < MODEL_TRAIN_CODE_BOOK_LOOP_TIME; n_Loop_Index++)
	{
		//	计算每个词条的Gamma系数,二维数组,帧数 * 状态数
		for (n_Word_Sample_Index = 0; n_Word_Sample_Index < n_Word_Sample_Num; n_Word_Sample_Index++)
		{
			DHMM_Model_Calculate_Gamma(&pu_Word_Sample[n_Word_Sample_Index], pu_DHMM_Model);
		}

		//	准备重估B
		for (n_State_Index = 0; n_State_Index < pu_DHMM_Model->n_State_Num; n_State_Index++)
		{
			pd_Denom[n_State_Index] = 0.0F;

			for (n_Code_Word_Index = 0; n_Code_Word_Index < pu_DHMM_Model->n_Code_Book_Size; n_Code_Word_Index++)
			{
				d2dda_Nom[n_State_Index][n_Code_Word_Index] = 0.0F;
			}
		}

		//	计算重估B时每项的分子,和每行的分母
		for (n_State_Index = 0; n_State_Index < pu_DHMM_Model->n_State_Num; n_State_Index++)
		{
			for (n_Word_Sample_Index = 0; n_Word_Sample_Index < n_Word_Sample_Num; n_Word_Sample_Index++)
			{
				for (n_Frame_Index = 0; n_Frame_Index < pu_Word_Sample[n_Word_Sample_Index].n_Feature_Sequence_Len; n_Frame_Index++)
				{
					d2dda_Nom[n_State_Index][pu_Word_Sample[n_Word_Sample_Index].pn_VQed_Feature_Sequence[n_Frame_Index]]
						+= pu_Word_Sample[n_Word_Sample_Index].d2dda_Gamma[n_Frame_Index][n_State_Index];
					pd_Denom[n_State_Index] += pu_Word_Sample[n_Word_Sample_Index].d2dda_Gamma[n_Frame_Index][n_State_Index];
				}
			}
		}

		//	重新计算B
		for (n_State_Index = 0; n_State_Index < pu_DHMM_Model->n_State_Num; n_State_Index++)
		{
			for (n_Code_Word_Index = 0; n_Code_Word_Index < pu_DHMM_Model->n_Code_Book_Size; n_Code_Word_Index++)
			{
				pu_DHMM_Model->d2dda_B[n_State_Index][n_Code_Word_Index] = 
					d2dda_Nom[n_State_Index][n_Code_Word_Index] / pd_Denom[n_State_Index];
			}
		}

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -