⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 dhmm_model_mfc.cpp

📁 语音识别配套的VQ及DHMM模型训练程序(C语言)
💻 CPP
📖 第 1 页 / 共 2 页
字号:
	}

	fclose(fp_DHMM_Model_File);
	fclose(fp_DHMM_Model_TXT_File);
	fclose(fp_DHMM_Model_BIN_File);
	d2dda_Free(d2dda_LogB, pu_DHMM_Model->n_State_Num, pu_DHMM_Model->n_Code_Book_Size);


	//	释放模型占用内存
	for (n_Model_Index = 0; n_Model_Index < n_DHMM_Model_Num; n_Model_Index++)
	{
		delete[] pu_DHMM_Model[n_Model_Index].pdPi;
		d2dda_Free(pu_DHMM_Model[n_Model_Index].d2dda_A, u_Pro_Config.n_DHMM_Model_State_Num, u_Pro_Config.n_DHMM_Model_State_Num);
		d2dda_Free(pu_DHMM_Model[n_Model_Index].d2dda_B, u_Pro_Config.n_DHMM_Model_State_Num, u_Pro_Config.n_VQ_Code_Book_Size);
	}
	delete[] pu_DHMM_Model;
	if ((u_Pro_Config.l_DHMM_Model_Config & MODEL_CONFIG_GENERATE_DHMM_MODEL_MASK) == (MODEL_CONFIG_TRAIN_WITH_SILENCE_MODEL))
		u_Pro_Config.n_DHMM_Model_State_Num -= 2;
	else if((u_Pro_Config.l_DHMM_Model_Config & MODEL_CONFIG_GENERATE_DHMM_MODEL_MASK) ==  MODEL_CONFIG_LOAD_WITH_SILENCE_MODEL)
		u_Pro_Config.n_DHMM_Model_State_Num -= 2;

	return 0;
}

//////////////////////////////////////////////////////////////////////
//	函数名称:DHMM_Model_Train_All_DHMM_Model
//	函数功能:训练所有词的DHMM模型
//	函数性质:API
//	输入参数:
//		n_DHMM_Model_Num,要训练的模型个数
//	输出参数:
//		pu_DHMM_Model,存放训好的DHMM模型
//	返回值:
//		0 表示成功
//	备注:关于训练的细节参数,均在u_Pro_Config中指定,
//		n_Feature_Person_Num,总的语料库人数
//		n_DHMM_Model_Train_Set_Person_Num,训练集的人数
//		n_DHMM_Model_Person_Start_Index,训练集起始人的标号,
//			训练集将从此人顺序持续指定人数,标号越界自动折返到0
int DHMM_Model_Train_All_DHMM_Model(DHMM_MODEL * pu_DHMM_Model, int n_DHMM_Model_Num)
{
	int nRetCode;
	char sz_Feature_File_Name[256];
	DYNA_2DIM_DOUBLE_ARRAY d2dda_Code_Book;
	WORD_SAMPLE * pu_Word_Sample;
	int n_Word_Sample_Index, n_Person_Index, n_Train_Set_Person_Index;

	//	读入码书
	d2dda_Code_Book = d2dda_New(u_Pro_Config.n_VQ_Code_Book_Size, u_Pro_Config.n_Feature_Dim);
	ASSERT(d2dda_Code_Book != NULL);
	nRetCode = DHMM_VQ_Load_Code_Book_File(u_Pro_Config.sz_Toload_Code_Book_File_Name,
		d2dda_Code_Book, u_Pro_Config.n_VQ_Code_Book_Size, u_Pro_Config.n_Feature_Dim);
	ASSERT(nRetCode == 0);

	ASSERT((0 <= u_Pro_Config.n_DHMM_Model_Person_Start_Index) && (u_Pro_Config.n_DHMM_Model_Person_Start_Index < u_Pro_Config.n_Feature_Person_Num)
		&& (0 <= u_Pro_Config.n_DHMM_Model_Person_End_Index) && (u_Pro_Config.n_DHMM_Model_Person_End_Index < u_Pro_Config.n_Feature_Person_Num));

	//	为训练集的词条准备内存
	pu_Word_Sample = new WORD_SAMPLE[u_Pro_Config.n_DHMM_Model_Train_Set_Person_Num];
	ASSERT(pu_Word_Sample != NULL);

	//	该重循环,训练每一个词条的模型
	for (n_Word_Sample_Index = 0; n_Word_Sample_Index < u_Pro_Config.n_DHMM_Model_Num; n_Word_Sample_Index++)
	{
		int Sen_Index = n_Word_Sample_Index + u_Pro_Config.n_Sentence_Start_Index;
		//	该重循环,对一个词条,用所有训练集进行训练的数据准备
		n_Train_Set_Person_Index = 0;
		for (n_Person_Index = 0; n_Person_Index < u_Pro_Config.n_Feature_Person_Num; n_Person_Index++)
		{
			//	跳过测试集的人
			if (u_Pro_Config.n_DHMM_Model_Person_Start_Index <= u_Pro_Config.n_DHMM_Model_Person_End_Index)
			{
				if ((n_Person_Index < u_Pro_Config.n_DHMM_Model_Person_Start_Index) || (n_Person_Index > u_Pro_Config.n_DHMM_Model_Person_End_Index)) continue;
			}
			else
			{
				if ((n_Person_Index > u_Pro_Config.n_DHMM_Model_Person_End_Index) && (n_Person_Index < u_Pro_Config.n_DHMM_Model_Person_Start_Index)) continue;
			}

			//	准备该词条的内存空间
			sprintf(sz_Feature_File_Name, u_Pro_Config.sz_Feature_Origin_File_Name_Format, n_Person_Index);
			pu_Word_Sample[n_Train_Set_Person_Index].n_Feature_Sequence_Len = dfa_Feature_Get_Sentence_Frame_Num(sz_Feature_File_Name, Sen_Index);
			pu_Word_Sample[n_Train_Set_Person_Index].n_Feature_Dim = u_Pro_Config.n_Feature_Dim;
			pu_Word_Sample[n_Train_Set_Person_Index].d2dda_Feature_Sequence =
				d2dda_New(pu_Word_Sample[n_Train_Set_Person_Index].n_Feature_Sequence_Len, pu_Word_Sample[n_Train_Set_Person_Index].n_Feature_Dim);
			pu_Word_Sample[n_Train_Set_Person_Index].pn_VQed_Feature_Sequence = 
				new int[pu_Word_Sample[n_Train_Set_Person_Index].n_Feature_Sequence_Len];
			ASSERT((pu_Word_Sample[n_Train_Set_Person_Index].d2dda_Feature_Sequence  != NULL)
				&& (pu_Word_Sample[n_Train_Set_Person_Index].pn_VQed_Feature_Sequence != NULL));

			//	读入特征参数
			nRetCode = dfa_Feature_Read_A_Sentence(sz_Feature_File_Name, Sen_Index, u_Pro_Config.n_Feature_Dim,
				pu_Word_Sample[n_Train_Set_Person_Index].d2dda_Feature_Sequence);
			ASSERT(nRetCode == pu_Word_Sample[n_Train_Set_Person_Index].n_Feature_Sequence_Len);

			//	VQ特征
			DHMM_VQ_Encode_A_Word_Sample(d2dda_Code_Book, u_Pro_Config.n_VQ_Code_Book_Size, u_Pro_Config.n_Feature_Dim,
				&pu_Word_Sample[n_Train_Set_Person_Index]);

			//	释放原始的特征参数占用空间
			d2dda_Free(pu_Word_Sample[n_Train_Set_Person_Index].d2dda_Feature_Sequence,
				pu_Word_Sample[n_Train_Set_Person_Index].n_Feature_Sequence_Len, pu_Word_Sample[n_Train_Set_Person_Index].n_Feature_Dim);

			n_Train_Set_Person_Index++;
		}
		ASSERT(n_Train_Set_Person_Index == u_Pro_Config.n_DHMM_Model_Train_Set_Person_Num);

		PRO_LOG("Model:\tTraining Model %4d of %4d.\n", n_Word_Sample_Index, u_Pro_Config.n_DHMM_Model_Num);

		//	训练一个词条的模型
		DHMM_Model_Train_DHMM_Model(pu_Word_Sample, u_Pro_Config.n_DHMM_Model_Train_Set_Person_Num, &pu_DHMM_Model[n_Word_Sample_Index]);

		//	释放VQ后的特征占用的内存
		for (n_Train_Set_Person_Index = 0; n_Train_Set_Person_Index < u_Pro_Config.n_DHMM_Model_Train_Set_Person_Num; n_Train_Set_Person_Index++)
		{
			delete[] pu_Word_Sample[n_Train_Set_Person_Index].pn_VQed_Feature_Sequence;
		}
	}

	//	释放所有训练集的词条所占内存,释放码书内存
	delete[] pu_Word_Sample;
	d2dda_Free(d2dda_Code_Book, u_Pro_Config.n_VQ_Code_Book_Size, u_Pro_Config.n_Feature_Dim);

	return 0;
}

//////////////////////////////////////////////////////////////////////
//	函数名称:DHMM_Model_Train_Silence_DHMM_Model
//	函数功能:训练静音的单状态DHMM模型
//	函数性质:API
//	输入参数:
//		n_DHMM_Model_Num,要训练的模型个数
//	输出参数:
//		pu_DHMM_Model,存放训好的DHMM模型
//	返回值:
//		0 表示成功
//	备注:关于训练的细节参数,均在u_Pro_Config中指定,
//		n_Feature_Person_Num,总的语料库人数
//		n_DHMM_Model_Train_Set_Person_Num,训练集的人数
//		n_DHMM_Model_Person_Start_Index,训练集起始人的标号,
//			训练集将从此人顺序持续指定人数,标号越界自动折返到0
int DHMM_Model_Train_Silence_DHMM_Model(DHMM_MODEL * pu_DHMM_Model)
{
	int nRetCode;
	char sz_Feature_File_Name[256];
	char sz_Silence_File_Name[256];
	DYNA_2DIM_DOUBLE_ARRAY d2dda_Code_Book;
	WORD_SAMPLE * pu_Word_Sample;
	int n_Word_Sample_Index, n_Person_Index, n_Train_Set_Person_Index;
	int n_DHMM_Model_Train_Set_Person_Num; 

	int     i, j/*, k,  n,  m*/;
	int  Status_num = 1;
	int     Output_num = u_Pro_Config.n_VQ_Code_Book_Size;					// VQ码本大小(码字个数)
	double  *Pi;							// 初始概率
	double  **A;							// A矩阵:Status_num*Status_num
	double  **B;							// B矩阵:Status_num*Output_num
	double  w;

// Generate (A, B, Pi)
	Pi = pu_DHMM_Model->pdPi;	//	The above block transform to this line. #DongMing#

	// allocate memory for A matrix
	A = pu_DHMM_Model->d2dda_A;	//	The above block transform to this line. #DongMing#

	// allocate memory for B matrix
	B = pu_DHMM_Model->d2dda_B;	//	The above block transform to this line. #DongMing#

	// 初始化初始概率Pi:状态0概率为1,其余状态概率为0
	Pi[0] = 1;
	for(i=1; i<Status_num; i++) Pi[i] = 0;
	
	for(i=0; i<Status_num; i++)
	{
		// 初始化A矩阵
		for(j=0; j<Status_num; j++) A[i][j] = 0;
		if(i < Status_num-1)
		{
			A[i][i]   = 0.5;
			A[i][i+1] = 0.5;
		}
		else
		{
			A[i][i]   = 1.0;
		}
		// 归一化A矩阵
		w = 0;
		for(j=0; j<Status_num; j++) w = w + A[i][j];
		for(j=0; j<Status_num; j++) A[i][j] = A[i][j] / w;
		
		w = 0;
		// 初始化B矩阵
		for(j=0; j<Output_num; j++)
		{
			B[i][j] = 1.0 / Output_num * (0.9 + ((rand()%1000) / 5000.0));
			w = w + B[i][j];
		}
		// 归一化B矩阵
		for(j=0; j<Output_num; j++) B[i][j] = 0; //B[i][j] / w;
	}

	//	读入码书
	d2dda_Code_Book = d2dda_New(u_Pro_Config.n_VQ_Code_Book_Size, u_Pro_Config.n_Feature_Dim);
	ASSERT(d2dda_Code_Book != NULL);
	nRetCode = DHMM_VQ_Load_Code_Book_File(u_Pro_Config.sz_Toload_Code_Book_File_Name,
		d2dda_Code_Book, u_Pro_Config.n_VQ_Code_Book_Size, u_Pro_Config.n_Feature_Dim);
	ASSERT(nRetCode == 0);

	ASSERT((0 <= u_Pro_Config.n_DHMM_Model_Person_Start_Index) && (u_Pro_Config.n_DHMM_Model_Person_Start_Index < u_Pro_Config.n_Feature_Person_Num)
		&& (0 <= u_Pro_Config.n_DHMM_Model_Person_End_Index) && (u_Pro_Config.n_DHMM_Model_Person_End_Index < u_Pro_Config.n_Feature_Person_Num));

	//	为训练集的词条准备内存
	pu_Word_Sample = new WORD_SAMPLE[u_Pro_Config.n_DHMM_Model_Train_Set_Person_Num * u_Pro_Config.n_DHMM_Model_Num];
	ASSERT(pu_Word_Sample != NULL);

	n_Train_Set_Person_Index = 0;
	//	该重循环,加载每一个词条的静音数据
	for (n_Word_Sample_Index = 0; n_Word_Sample_Index < u_Pro_Config.n_DHMM_Model_Num; n_Word_Sample_Index++)
	{
		int Sen_Index = n_Word_Sample_Index + u_Pro_Config.n_Sentence_Start_Index;
		//	该重循环,对一个词条,用所有训练集进行训练的数据准备
		for (n_Person_Index = 0; n_Person_Index < u_Pro_Config.n_Feature_Person_Num; n_Person_Index++)
		{
			//	跳过测试集的人
			if (u_Pro_Config.n_DHMM_Model_Person_Start_Index <= u_Pro_Config.n_DHMM_Model_Person_End_Index)
			{
				if ((n_Person_Index < u_Pro_Config.n_DHMM_Model_Person_Start_Index) || (n_Person_Index > u_Pro_Config.n_DHMM_Model_Person_End_Index)) continue;
			}
			else
			{
				if ((n_Person_Index > u_Pro_Config.n_DHMM_Model_Person_End_Index) && (n_Person_Index < u_Pro_Config.n_DHMM_Model_Person_Start_Index)) continue;
			}

			//	修改文件名
			sprintf(sz_Feature_File_Name, u_Pro_Config.sz_Feature_Origin_File_Name_Format, n_Person_Index);
			strcpy(sz_Silence_File_Name, sz_Feature_File_Name);
			int j = strlen(sz_Silence_File_Name) - 4;
			sz_Silence_File_Name[j] = '\0';
			strcat(sz_Silence_File_Name,"_sl.dat");

			//	准备该词条的内存空间
			pu_Word_Sample[n_Train_Set_Person_Index].n_Feature_Sequence_Len = dfa_Feature_Get_Silence_Frame_Num(sz_Silence_File_Name, Sen_Index);
			//pu_Word_Sample[n_Train_Set_Person_Index].n_Feature_Sequence_Len = 2 * RELAX_FRAME;
			pu_Word_Sample[n_Train_Set_Person_Index].n_Feature_Dim = u_Pro_Config.n_Feature_Dim;
			pu_Word_Sample[n_Train_Set_Person_Index].d2dda_Feature_Sequence =
				d2dda_New(pu_Word_Sample[n_Train_Set_Person_Index].n_Feature_Sequence_Len, pu_Word_Sample[n_Train_Set_Person_Index].n_Feature_Dim);
			pu_Word_Sample[n_Train_Set_Person_Index].pn_VQed_Feature_Sequence = 
				new int[pu_Word_Sample[n_Train_Set_Person_Index].n_Feature_Sequence_Len];
			ASSERT((pu_Word_Sample[n_Train_Set_Person_Index].d2dda_Feature_Sequence  != NULL)
				&& (pu_Word_Sample[n_Train_Set_Person_Index].pn_VQed_Feature_Sequence != NULL));

			//	读入特征参数
			nRetCode = dfa_Feature_Read_A_Silence(sz_Silence_File_Name, Sen_Index, u_Pro_Config.n_Feature_Dim,
				pu_Word_Sample[n_Train_Set_Person_Index].d2dda_Feature_Sequence);
			ASSERT(nRetCode == pu_Word_Sample[n_Train_Set_Person_Index].n_Feature_Sequence_Len);

			//	VQ特征
			DHMM_VQ_Encode_A_Word_Sample(d2dda_Code_Book, u_Pro_Config.n_VQ_Code_Book_Size, u_Pro_Config.n_Feature_Dim,
				&pu_Word_Sample[n_Train_Set_Person_Index]);

			//	释放原始的特征参数占用空间
			d2dda_Free(pu_Word_Sample[n_Train_Set_Person_Index].d2dda_Feature_Sequence,
				pu_Word_Sample[n_Train_Set_Person_Index].n_Feature_Sequence_Len, pu_Word_Sample[n_Train_Set_Person_Index].n_Feature_Dim);

			n_Train_Set_Person_Index++;
		}
	}

	PRO_LOG("Model:\tTraining Silence Model...\n");
    n_DHMM_Model_Train_Set_Person_Num = n_Train_Set_Person_Index;

	int fr_num;

	for(i = 0; i < n_DHMM_Model_Train_Set_Person_Num; i++)
	{
		fr_num = pu_Word_Sample[i].n_Feature_Sequence_Len;
		for(j = 0; j < fr_num; j++)
			B[0][pu_Word_Sample[i].pn_VQed_Feature_Sequence[j]]++;
	}

	//	释放VQ后的特征占用的内存
	for (n_Train_Set_Person_Index = 0; n_Train_Set_Person_Index < n_DHMM_Model_Train_Set_Person_Num; n_Train_Set_Person_Index++)
	{
		delete[] pu_Word_Sample[n_Train_Set_Person_Index].pn_VQed_Feature_Sequence;
	}

	for(i = 0; i < Output_num; i++)
	{
		B[0][i] /= (fr_num * n_DHMM_Model_Train_Set_Person_Num);
		if (B[0][i] < Epsilon_B)
			B[0][i] = Epsilon_B;
	}

	for(j=0; j<Output_num; j++)
		w = w + B[0][j];
	// 归一化B矩阵
	for(j=0; j<Output_num; j++) B[0][j] = B[0][j] / w;



		//	释放所有训练集的词条所占内存,释放码书内存
	delete[] pu_Word_Sample;
	d2dda_Free(d2dda_Code_Book, u_Pro_Config.n_VQ_Code_Book_Size, u_Pro_Config.n_Feature_Dim);

	return 0;
}

//////////////////////////////////////////////////////////////////////
//	函数名称:DHMM_Model_Train_DHMM_Model
//	函数功能:训练一个词的DHMM模型
//	函数性质:API
//	输入参数:
//		pu_Word_Sample,存放该词训练集的数据
//		n_Word_Sample_Num,训练集的词数
//	输出参数:
//		pu_DHMM_Model,存放训好的DHMM模型
//	返回值:
//		0 表示成功
//	备注:WORD_SAMPLE的定义,参见公共的.H文件
//		该函数是一个分发函数,按照u_Pro_Config.l_DHMM_Model_Config配置的信息
//		将具体调用不同的函数体
int DHMM_Model_Train_DHMM_Model(WORD_SAMPLE * pu_Word_Sample, int n_Word_Sample_Num,
								DHMM_MODEL * pu_DHMM_Model)
{
	switch (u_Pro_Config.l_DHMM_Model_Config & MODEL_CONFIG_TRAIN_PROCEDURE_MASK)
	{
	case MODEL_CONFIG_TRAIN_PROCEDURE_STD:
		ASSERT(0);
		break;
	case MODEL_CONFIG_TRAIN_PROCEDURE_GL:
		return DHMM_Model_Train_DHMM_Model_GL(pu_Word_Sample, n_Word_Sample_Num, pu_DHMM_Model);
		break;
	case MODEL_CONFIG_TRAIN_PROCEDURE_LHS:
		ASSERT(0);	//LHS的训练过程,与GL的完全相同,故略去
		break;
	case MODEL_CONFIG_TRAIN_PROCEDURE_HQ:
		return DHMM_Model_Train_DHMM_Model_HQ(pu_Word_Sample, n_Word_Sample_Num, pu_DHMM_Model);
		break;
	case MODEL_CONFIG_TRAIN_PROCEDURE_WP:
		ASSERT(0);
		break;
	default:
		ASSERT(0);
		break;
	}

	return 0;
}

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -