⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 dhmm_recog_mfc.cpp

📁 语音识别配套的VQ及DHMM模型训练程序(C语言)
💻 CPP
📖 第 1 页 / 共 2 页
字号:
		pu_DHMM_Model[n_Model_Index].n_State_Num = u_Pro_Config.n_DHMM_Model_State_Num;
		pu_DHMM_Model[n_Model_Index].n_Code_Book_Size = u_Pro_Config.n_VQ_Code_Book_Size;
		pu_DHMM_Model[n_Model_Index].pdPi = new double[u_Pro_Config.n_DHMM_Model_State_Num];
		pu_DHMM_Model[n_Model_Index].d2dda_A = d2dda_New(u_Pro_Config.n_DHMM_Model_State_Num, u_Pro_Config.n_DHMM_Model_State_Num);
		pu_DHMM_Model[n_Model_Index].d2dda_B = d2dda_New(u_Pro_Config.n_DHMM_Model_State_Num, u_Pro_Config.n_VQ_Code_Book_Size);
		ASSERT((pu_DHMM_Model[n_Model_Index].pdPi != NULL)
			&& (pu_DHMM_Model[n_Model_Index].d2dda_A != NULL)
			&& (pu_DHMM_Model[n_Model_Index].d2dda_B != NULL));
	}
	if ((u_Pro_Config.l_DHMM_Model_Config & MODEL_CONFIG_GENERATE_DHMM_MODEL_MASK) == MODEL_CONFIG_LOAD_WITH_SILENCE_MODEL ||
		(u_Pro_Config.l_DHMM_Model_Config & MODEL_CONFIG_GENERATE_DHMM_MODEL_MASK) == MODEL_CONFIG_TRAIN_WITH_SILENCE_MODEL)
		nRetCode = DHMM_Model_Load_DHMM_Model_File_With_Silence(u_Pro_Config.sz_Toload_DHMM_Model_File_Name, pu_DHMM_Model, Total_Model_Num);
	ASSERT(nRetCode == 0);

	//	准备该词条的内存空间
	u_Word_Sample.n_Feature_Sequence_Len = dfa_Feature_Get_Sentence_Frame_Num(sz_Feature_File_Name, n_Word_Sample_Index);
	u_Word_Sample.n_Feature_Dim = u_Pro_Config.n_Feature_Dim;

	int word_len = u_Word_Sample.n_Feature_Sequence_Len;
	//ASSERT(word_len >= 0);
	if(word_len <= 0)
	{
		//	释放模型内存,释放码书内存
		for (n_Model_Index = 0; n_Model_Index < Total_Model_Num; n_Model_Index++)
		{
			delete[] pu_DHMM_Model[n_Model_Index].pdPi;
			d2dda_Free(pu_DHMM_Model[n_Model_Index].d2dda_A, u_Pro_Config.n_DHMM_Model_State_Num, u_Pro_Config.n_DHMM_Model_State_Num);
			d2dda_Free(pu_DHMM_Model[n_Model_Index].d2dda_B, u_Pro_Config.n_DHMM_Model_State_Num, u_Pro_Config.n_VQ_Code_Book_Size);
		}
		if ((u_Pro_Config.l_DHMM_Model_Config & MODEL_CONFIG_GENERATE_DHMM_MODEL_MASK) == (MODEL_CONFIG_TRAIN_WITH_SILENCE_MODEL))
			u_Pro_Config.n_DHMM_Model_State_Num -= 2;
		else if((u_Pro_Config.l_DHMM_Model_Config & MODEL_CONFIG_GENERATE_DHMM_MODEL_MASK) ==  MODEL_CONFIG_LOAD_WITH_SILENCE_MODEL)
			u_Pro_Config.n_DHMM_Model_State_Num -= 2;

		delete[] pu_DHMM_Model;
		d2dda_Free(d2dda_Code_Book, u_Pro_Config.n_VQ_Code_Book_Size, u_Pro_Config.n_Feature_Dim);
		return 0;
	}

	u_Word_Sample.d2dda_Feature_Sequence =
		d2dda_New(u_Word_Sample.n_Feature_Sequence_Len, u_Word_Sample.n_Feature_Dim);

	u_Word_Sample.pn_VQed_Feature_Sequence = 
		new int[u_Word_Sample.n_Feature_Sequence_Len];
	ASSERT((u_Word_Sample.d2dda_Feature_Sequence  != NULL)
		&& (u_Word_Sample.pn_VQed_Feature_Sequence != NULL));

	//	读入词条特征
	nRetCode = dfa_Feature_Read_A_Sentence(sz_Feature_File_Name, n_Word_Sample_Index, u_Pro_Config.n_Feature_Dim,
		u_Word_Sample.d2dda_Feature_Sequence);
	ASSERT(nRetCode == u_Word_Sample.n_Feature_Sequence_Len);

	//	VQ特征
	nRetCode = DHMM_VQ_Encode_A_Word_Sample(d2dda_Code_Book, u_Pro_Config.n_VQ_Code_Book_Size, u_Pro_Config.n_Feature_Dim,
		&u_Word_Sample);
	ASSERT(nRetCode == 0);

	int fea_num = u_Word_Sample.n_Feature_Dim;

	//	释放原始特征参数空间
	d2dda_Free(u_Word_Sample.d2dda_Feature_Sequence, u_Word_Sample.n_Feature_Sequence_Len, u_Word_Sample.n_Feature_Dim);

	//	为匹配分数准备内存
	//pd_DHMM_Model_Probably = new double[u_Pro_Config.n_DHMM_Model_Num];
	d2dna_DHMM_Model_Sequence = d2dna_New(Total_Model_Num, u_Word_Sample.n_Feature_Sequence_Len);
	ASSERT((pd_DHMM_Model_Probably != NULL) && (d2dna_DHMM_Model_Sequence != NULL));

	//	计算该词条与每一个模型的匹配分数
	for (n_Model_Index = 0; n_Model_Index < Total_Model_Num; n_Model_Index++)
	{
		if ((u_Pro_Config.l_DHMM_Recog_Config & RECOG_CONFIG_METHOD_MASK) == RECOG_CONFIG_METHOD_VITERBI_ONLY)
		{
			nRetCode = DHMM_Recog_Viterbi(&pu_DHMM_Model[n_Model_Index], &u_Word_Sample, &pd_DHMM_Model_Probably[n_Model_Index], d2dna_DHMM_Model_Sequence[n_Model_Index]);
			ASSERT(nRetCode == 0);
		}
	}

	//	选出最佳匹配
	n_Recog_Result = -1;
	d_Recog_Result_Probably = -MAX_DOUBLE_VALUE;
	for (n_Model_Index = 0; n_Model_Index < Total_Model_Num; n_Model_Index++)
	{
		pd_DHMM_Model_Probably[n_Model_Index] /= word_len;
		if (pd_DHMM_Model_Probably[n_Model_Index] > d_Recog_Result_Probably)
		{
			n_Recog_Result = n_Model_Index;
			d_Recog_Result_Probably = pd_DHMM_Model_Probably[n_Model_Index];
		}
	}

	//	释放词条VQ后特征参数占用空间
	delete[] u_Word_Sample.pn_VQed_Feature_Sequence;

	//	释放匹配分数占用空间
	//delete[] pd_DHMM_Model_Probably;
	d2dna_Free(d2dna_DHMM_Model_Sequence, Total_Model_Num, u_Word_Sample.n_Feature_Sequence_Len);

	//	释放模型内存,释放码书内存
	for (n_Model_Index = 0; n_Model_Index < Total_Model_Num; n_Model_Index++)
	{
		delete[] pu_DHMM_Model[n_Model_Index].pdPi;
		d2dda_Free(pu_DHMM_Model[n_Model_Index].d2dda_A, u_Pro_Config.n_DHMM_Model_State_Num, u_Pro_Config.n_DHMM_Model_State_Num);
		d2dda_Free(pu_DHMM_Model[n_Model_Index].d2dda_B, u_Pro_Config.n_DHMM_Model_State_Num, u_Pro_Config.n_VQ_Code_Book_Size);
	}
	if ((u_Pro_Config.l_DHMM_Model_Config & MODEL_CONFIG_GENERATE_DHMM_MODEL_MASK) == (MODEL_CONFIG_TRAIN_WITH_SILENCE_MODEL))
		u_Pro_Config.n_DHMM_Model_State_Num -= 2;
	else if((u_Pro_Config.l_DHMM_Model_Config & MODEL_CONFIG_GENERATE_DHMM_MODEL_MASK) ==  MODEL_CONFIG_LOAD_WITH_SILENCE_MODEL)
		u_Pro_Config.n_DHMM_Model_State_Num -= 2;

	delete[] pu_DHMM_Model;
	d2dda_Free(d2dda_Code_Book, u_Pro_Config.n_VQ_Code_Book_Size, u_Pro_Config.n_Feature_Dim);

	return n_Recog_Result;
}



//////////////////////////////////////////////////////////////////////
//	函数名称:DHMM_Recog_Viterbi
//	函数功能:计算Viterbi方法的最大似然值
//	函数性质:API
//	输入参数:
//		pu_DHMM_Model,DHMM模型
//		pu_Word_Sample,要计算的词
//	输出参数:
//		pd_Max_Likelihood,存放计算的似然值
//		pn_Status_Sequence,存放各帧所经历的状态
//	返回值:
//		0 表示成功
//	备注:该函数是一个分发函数,按照u_Pro_Config.l_DHMM_Recog_Config配置的信息
//		将具体调用不同的函数体
int DHMM_Recog_Viterbi(DHMM_MODEL * pu_DHMM_Model,
					   WORD_SAMPLE * pu_Word_Sample,
					   double * pd_Max_Likelihood, int * pn_Status_Sequence)
{
	switch (u_Pro_Config.l_DHMM_Recog_Config & RECOG_CONFIG_RECOG_PROCEDURE_MASK)
	{
	case RECOG_CONFIG_RECOG_PROCEDURE_STD:
		ASSERT(0);
		break;
	case RECOG_CONFIG_RECOG_PROCEDURE_GL:
		ASSERT(0);
		break;
	case RECOG_CONFIG_RECOG_PROCEDURE_LHS:
		return DHMM_Recog_Viterbi_LHS(pu_DHMM_Model, pu_Word_Sample, pd_Max_Likelihood, pn_Status_Sequence);
		break;
	case RECOG_CONFIG_RECOG_PROCEDURE_HQ:
		return DHMM_Recog_Viterbi_HQ(pu_DHMM_Model, pu_Word_Sample, pd_Max_Likelihood, pn_Status_Sequence);
		break;
	case RECOG_CONFIG_RECOG_PROCEDURE_WP:
		ASSERT(0);
		break;
	default:
		ASSERT(0);
		break;
	}

	return 0;
}

//////////////////////////////////////////////////////////////////////
//	函数名称:DHMM_Recog_Forward_Backward
//	函数功能:计算前后向算法的最大似然值
//	函数性质:API
//	输入参数:
//		pu_DHMM_Model,DHMM模型
//		pu_Word_Sample,要计算的词
//	输出参数:
//		pd_Max_Likelihood,存放计算的似然值
//	返回值:
//		0 表示成功
//	备注:该函数是一个分发函数,按照u_Pro_Config.l_DHMM_Recog_Config配置的信息
//		将具体调用不同的函数体
int DHMM_Recog_Forward_Backward(DHMM_MODEL * pu_DHMM_Model,
								WORD_SAMPLE * pu_Word_Sample,
								double * pd_Max_Likelihood)
{
	switch (u_Pro_Config.l_DHMM_Recog_Config & RECOG_CONFIG_RECOG_PROCEDURE_MASK)
	{
	case RECOG_CONFIG_RECOG_PROCEDURE_STD:
		ASSERT(0);
		break;
	case RECOG_CONFIG_RECOG_PROCEDURE_GL:
		return DHMM_Recog_Forward_Backward_GL(pu_DHMM_Model, pu_Word_Sample, pd_Max_Likelihood);
		break;
	case RECOG_CONFIG_RECOG_PROCEDURE_LHS:
		ASSERT(0);
		break;
	case RECOG_CONFIG_RECOG_PROCEDURE_HQ:
		return DHMM_Recog_Forward_Backward_HQ(pu_DHMM_Model, pu_Word_Sample, pd_Max_Likelihood);
		break;
	case RECOG_CONFIG_RECOG_PROCEDURE_WP:
		ASSERT(0);
		break;
	default:
		ASSERT(0);
		break;
	}

	return 0;
}

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -