⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 calibrtwocamadlg.cpp

📁 这是前段时间自己写的一个给两个摄像机定标的对话框程序
💻 CPP
📖 第 1 页 / 共 4 页
字号:
	{
		max_2	   = INT_MIN;
		maxIndex_2 = -1;
		angle2	   = 0;
		
		for( i=0; i<n; i++ )
		{
			if( fabs(angles[i]) > 500 ) continue;
			
			degree1 = fabs(KMathematica::MapAnglePi2mPi(angles[i]-angle1));
			degree2 = fabs(degree1 - PI/2);
			if( angleCounter[i] >= max_2 && ((degree2 < PI/6)&&exact || (!exact)&&( degree1 > diff) ) )
			{
				max_2 = angleCounter[i];
				maxIndex_2 = i;
				angle2 = angles[i];
				diff = degree1; 
			}
		}
		
		if( max_2 >= thr2 && maxIndex_2 != -1 )
			retVal.y = (float)angle2;
	}
	
	//	delete []angles;
	delete []angleCounter;
	
	return retVal;
}
//----------------------------------------------------------------------------//
//
// Function: CalibrateCamera
// Purpose : Finds the A matrix(m_cameraMatrix) and the m_distortion vector from the 
//			 loaded images. There must be at least 4 images in order to get a correct 
//			 A matrix. The more the number  of the images used the more exact results 
//			 are obtained. This function also calculates the recpective rotation and 
//			 translation vectors for the input calibration images used. 
// Output  : m_cameraMatrix : a0 = horizontal scaling, a4= vertical scaling, a1 = skew
//							  (a2,a5) = principal point.
//			 m_distortion     : first two entries are radial m_distortion and last two entries 
//						  	  are tangential distoriton parameters.
//----------------------------------------------------------------------------//

void CCalibrtwocamaDlg::CalibrateCamera()
{
	UpdateData();

	if( m_nEffectiveImageNumber == 0 ) 
		return;

	int* numPoints = new int[m_nEffectiveImageNumber];
	for( int k=0; k < m_nEffectiveImageNumber; k++ ) numPoints[k] = m_nNumCorners;

	delete[]m_distortion  ; m_distortion   = new double[4]; for( k=0; k<4; k++ ) m_distortion  [k] = 0.0;
	delete[]m_cameraMatrix; m_cameraMatrix = new double[9]; for( k=0; k<9; k++ ) m_cameraMatrix[k] = 0.0;
	delete[]m_transVects  ; m_transVects   = new double[m_nEffectiveImageNumber*3];
	delete[]m_rotMatrs    ; m_rotMatrs     = new double[m_nEffectiveImageNumber*9];

	int  useIntrinsicGuess = 0;
	
	CvPoint2D64d* uvinv  = new CvPoint2D64d[m_nEffectiveImageNumber * m_nNumCorners];
	CvPoint3D64d* XYZinv = new CvPoint3D64d[m_nEffectiveImageNumber * m_nNumCorners];
	int size = m_nEffectiveImageNumber * m_nNumCorners;

	int index;

	for(int i=0 ; i<m_nEffectiveImageNumber; i++)
	{
		for( int j=0; j<m_nNumCorners; j++ )
		{
			index = KMathematica::Modulus( i+2, m_nEffectiveImageNumber );
			uvinv [i*m_nNumCorners+j].x = uveff [index*m_nNumCorners+j].x;
			uvinv [i*m_nNumCorners+j].y = uveff [index*m_nNumCorners+j].y;
			XYZinv[i*m_nNumCorners+j].x = XYZeff[index*m_nNumCorners+j].x;
			XYZinv[i*m_nNumCorners+j].y = XYZeff[index*m_nNumCorners+j].y;
			XYZinv[i*m_nNumCorners+j].z = XYZeff[index*m_nNumCorners+j].z;
		}
	}

	CvVect64d INVdistortion   = new double[4];
	CvMatr64d INVcameraMatrix = new double[9];
	CvVect64d INVtransVects   = new double[m_nEffectiveImageNumber*3];
	CvMatr64d INVrotMatrs     = new double[m_nEffectiveImageNumber*9];

	// cvCalibrateCamera_64d SOMEHOW fails to find the first rotation matrix true. 
	// However it gives correct results for the remaining images. Therefore, I invert all
	// the data sets and enter them in reverse order. By doing so, I will take the last 
	// rotation matrix of the inverted data set as the rotation matrix of the normal 
	// data set.

	cvCalibrateCamera_64d(m_nEffectiveImageNumber, 
					  numPoints, 
					  imgsize, 
					  uveff, 
					  XYZeff, 
					  m_distortion, 
					  m_cameraMatrix, 
					  m_transVects, 
					  m_rotMatrs, 
					  useIntrinsicGuess
					  ); 

	cvCalibrateCamera_64d(m_nEffectiveImageNumber, 
					  numPoints, 
					  imgsize, 
					  uvinv, 
					  XYZinv, 
					  INVdistortion, 
					  INVcameraMatrix, 
					  INVtransVects, 
					  INVrotMatrs, 
					  useIntrinsicGuess
					  ); 

	CvVect64d focal = new double[2];
	
	focal[0] = m_cameraMatrix[0];
	focal[1] = m_cameraMatrix[4];
	
	CvPoint2D64d principal;
	principal.x = m_cameraMatrix[2];
	principal.y = m_cameraMatrix[5];

	cvFindExtrinsicCameraParams_64d(m_nNumCorners,
					  imgsize,
					  uveff,
					  XYZeff,
					  focal,
					  principal,
					  m_distortion,
					  m_rotMatrs,
					  m_transVects
					  );

	cvFindExtrinsicCameraParams_64d(m_nNumCorners,
					  imgsize,
					  uvinv,
					  XYZinv,
					  focal,
					  principal,
					  INVdistortion,
					  INVrotMatrs,
					  INVtransVects
					  );

	// Correct first rotation matrix and translation vector!
	for( i=0; i<9; i++ )
	{
		index = KMathematica::Modulus(-2, m_nEffectiveImageNumber );

		m_rotMatrs[i] = INVrotMatrs[index*9+i];		
		if( i<3 ) 
			m_transVects[i] = INVtransVects[3*index+i];

		index = KMathematica::Modulus( m_nEffectiveImageNumber-3, m_nEffectiveImageNumber );

		m_rotMatrs[9*(m_nEffectiveImageNumber-1)+i] = INVrotMatrs[index*9+i];
		if( i<3 ) 
			m_transVects[3*(m_nEffectiveImageNumber-1)+i] = INVtransVects[index*3+i];
	}

	delete[]focal		   ;
	delete[]INVdistortion  ;
	delete[]INVcameraMatrix;
	delete[]INVtransVects  ;
	delete[]INVrotMatrs	   ;

	delete[]numPoints;

	delete []XYZinv;
	delete []uvinv;
	
}

void CCalibrtwocamaDlg::CalculateCalibrationErrors(bool print)
{
	if( m_nEffectiveImageNumber == 0 ) return;
	
	// calculate pixel errors: 
	CvPoint2D64d* uvapp = new CvPoint2D64d[m_nEffectiveImageNumber * m_nNumCorners];

	CString path = m_ImageRoot.folderPath + '\\' + "backprojection.txt";
	FILE* fp;

	if( print )
	{
		fp = fopen(path, "w");
		fprintf(fp,"index\trealx\trealy\tcalcx\tcalcy\terrorx\terrory\n");
	}

	int index;

	CvPoint3D64d backProject;

	double diffx,diffy;

	m_dErrorMean.x = 0;
	m_dErrorMean.y = 0;

	m_dErrorPower.x = 0;
	m_dErrorPower.y = 0;

	m_dErrorMax.x = 0;
	m_dErrorMax.y = 0;

	double r0,r1,r2,r3,r4,r5,r6,r7,r8,t0,t1,t2;
	double r_square, r_quad;

	double xx, yy, temp;

	for( int i=0; i<m_nEffectiveImageNumber; i++ )
	{
		for( int j=0; j<m_nNumCorners; j++)
		{
			index = i*m_nNumCorners+j;

			r0 = m_rotMatrs[i*9  ];
			r1 = m_rotMatrs[i*9+1];
			r2 = m_rotMatrs[i*9+2];
			r3 = m_rotMatrs[i*9+3];
			r4 = m_rotMatrs[i*9+4];
			r5 = m_rotMatrs[i*9+5];
			r6 = m_rotMatrs[i*9+6];
			r7 = m_rotMatrs[i*9+7];
			r8 = m_rotMatrs[i*9+8];

			t0 = m_transVects[3*i  ];
			t1 = m_transVects[3*i+1];
			t2 = m_transVects[3*i+2];

			// multiply with R and add T vector
			backProject.x = r0*XYZeff[index].x +r1*XYZeff[index].y +r2*XYZeff[index].z + t0;
			backProject.y = r3*XYZeff[index].x +r4*XYZeff[index].y +r5*XYZeff[index].z + t1;
			backProject.z = r6*XYZeff[index].x +r7*XYZeff[index].y +r8*XYZeff[index].z + t2;

			if( m_bCompansateDistortion )
			{
				// apply projection 
				uvapp[index].x = backProject.x / backProject.z;
				uvapp[index].y = backProject.y / backProject.z;

				// calculate distorted coordinates
				r_square = pow( uvapp[index].x , 2) +  pow( uvapp[index].y , 2);
				r_quad   = pow( r_square, 2);

				xx = uvapp[index].x*(1 + m_distortion[0]*r_square + m_distortion[1]*r_quad)
					+2*m_distortion[2]*uvapp[index].x*uvapp[index].y 
					+  m_distortion[3]*(r_square + 2*uvapp[index].x *uvapp[index].x);

				yy =   uvapp[index].y*(1+m_distortion[0]*r_square 
									  +m_distortion[1]*r_quad)   
					+  m_distortion[2]*(r_square+2*uvapp[index].y*uvapp[index].y) 
					+2*m_distortion[3]*uvapp[index].x*uvapp[index].y;

				// multiply with camera matrix.
				uvapp[index].x = m_cameraMatrix[0]*xx+m_cameraMatrix[2];	
				uvapp[index].y = m_cameraMatrix[4]*yy+m_cameraMatrix[5];
			}
			else
			{
				uvapp[index].x = m_cameraMatrix[0]*backProject.x+
								 m_cameraMatrix[1]*backProject.y+
								 m_cameraMatrix[2]*backProject.z;

				uvapp[index].y = m_cameraMatrix[3]*backProject.x+
								 m_cameraMatrix[4]*backProject.y+
								 m_cameraMatrix[5]*backProject.z;

				temp = m_cameraMatrix[6]*backProject.x+
					   m_cameraMatrix[7]*backProject.y+
					   m_cameraMatrix[8]*backProject.z;

				uvapp[index].x /= temp;
				uvapp[index].y /= temp;
			}

			diffx = uveff[index].x-uvapp[index].x;
			diffy = uveff[index].y-uvapp[index].y;

			m_dErrorMean.x += fabs(diffx);
			m_dErrorMean.y += fabs(diffy);

			m_dErrorPower.x += pow(diffx,2);
			m_dErrorPower.y += pow(diffy,2);

			if( fabs(diffx) > m_dErrorMax.x ) m_dErrorMax.x = fabs(diffx);
			if( fabs(diffy) > m_dErrorMax.y ) m_dErrorMax.y = fabs(diffy);

			if( print ) 
			{
				fprintf( fp, "%d\t%3.2f\t%3.2f\t%3.2f\t%3.2f\t%3.2f\t%3.2f\t\n",
					          index,
							  uveff[index].x,
							  uveff[index].y,
							  uvapp[index].x,
							  uvapp[index].y,
							  diffx,
							  diffy );
			}
		}
	}

	if( print )
	{
		fclose(fp);
	}


	m_dErrorPower.x  = sqrt(m_dErrorPower.x);
	m_dErrorPower.y  = sqrt(m_dErrorPower.y);
	m_dErrorPower.x /= m_nEffectiveImageNumber*m_nNumCorners;
	m_dErrorPower.y /= m_nEffectiveImageNumber*m_nNumCorners;

	m_dErrorMean.x /= m_nEffectiveImageNumber*m_nNumCorners;
	m_dErrorMean.y /= m_nEffectiveImageNumber*m_nNumCorners;

	delete []uvapp; uvapp = NULL;

}

void CCalibrtwocamaDlg::GenerateReport()
{
	if( m_nEffectiveImageNumber == 0 ) return;
	
	CString path;
	path = m_ImageRoot.folderPath + '\\' + califlag+"calibration_result.txt";
	cps = fopen(path, "w");
	
	fprintf(cps,"Effective Image Number Processed = %d\n\n", m_nEffectiveImageNumber);
	
	fprintf(cps, "Camera Matrix\n%10.2f %10.2f %10.2f\n%10.2f %10.2f %10.2f\n%10.2f %10.2f %10.2f\n", 
		m_cameraMatrix[0],m_cameraMatrix[1],m_cameraMatrix[2], 
		m_cameraMatrix[3],m_cameraMatrix[4],m_cameraMatrix[5], 
		m_cameraMatrix[6],m_cameraMatrix[7],m_cameraMatrix[8]);
	
	fprintf( cps, "\nPixel Error Powers: \n");
	fprintf( cps, "X Error : %3.3f\tY Error : %3.3f\n",m_dErrorPower.x,m_dErrorPower.y);
	
	fprintf( cps, "\nPixel Mean Errors: \n");
	fprintf( cps, "X Error : %3.3f\tY Error : %3.3f\n",m_dErrorMean.x,m_dErrorMean.y);
	
	fprintf( cps, "\nMAX X Error : %3.3f\n",m_dErrorMax.x);
	fprintf( cps, "MAX Y Error : %3.3f\n",m_dErrorMax.y);
	
	fprintf(cps,"\nDistortion\n");
	for( int k=0; k<4; k++)
		fprintf(cps,"%7.2f\t", m_distortion[k] );
	fprintf(cps,"\n\n");
	
	fprintf(cps,"R & T Matrices\n\n");
	
	fprintf(cps, "    Rotation Matrices                Translation Vectors\n");
	fprintf(cps, "------------------------------------------------------------\n");
	
	for(int i=0;i<m_nEffectiveImageNumber;i++)
	{
		for(int j=0; j<9; j++)
		{
			fprintf(cps, "%7.2f\t", m_rotMatrs[i*9+j]);
			if((j+1)%3==0) {
				fprintf(cps,"%25.2f\n", m_transVects[i*3+j/3]);
			}
		}
		fprintf(cps,"\n");
	}
	
	fclose(cps);

}



void CCalibrtwocamaDlg::DisplayReport()
{

	UpdateData();
	
	if( m_nEffectiveImageNumber == 0 ) return;
	
	CString path = m_ImageRoot.folderPath + '\\' +califlag+ "calibration_result.txt";
	
	CStdioFile file(path,CFile::modeRead|CFile::typeText);
	CString strLine,strText;
	while(file.ReadString(strLine)){
		
		if( strcmp(strLine, "R & T Matrices") == 0 )
			break;
		strText+=strLine;
		strText+="\n";
	}
	if (califlag=="left")
	MessageBox(strText, "Left Camera Calibration Results");
	else if(califlag="right")
		MessageBox(strText,"Right Camera Calibration Results");
    else
        MessageBox(strText," Camera Calibration Results");
	
	if( m_bDisplayRT )
	{
		strText = "";
		while(file.ReadString(strLine)){
			strText+=strLine;
			strText+="\n";
		}
		if (califlag=="left")
			MessageBox(strText, "Left Camera Rotation and Translation Vectors");
		else if(califlag="right")
			MessageBox(strText,"Right Camera Rotation and Translation Vectors");
	    else
			MessageBox(strText,"Rotation and Translation Vectors");
	}

}


⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -