📄 cvlkpyramid.cpp
字号:
break;
}
/* calc Ix */
icvSepConvSmall3_32f( patchI, srcPatchStep, Ix, patchStep,
srcPatchSize, kerX, kerY, patchJ );
/* calc Iy */
icvSepConvSmall3_32f( patchI, srcPatchStep, Iy, patchStep,
srcPatchSize, kerY, kerX, patchJ );
/* repack patchI (remove borders) */
for( k = 0; k < patchSize.height; k++ )
memcpy( patchI + k * patchSize.width,
patchI + (k + 1) * srcPatchSize.width + 1, patchStep );
memset( G, 0, sizeof( G ));
/* calculate G matrix */
for( y = -winSize.height, k = 0; y <= winSize.height; y++ )
{
for( x = -winSize.width; x <= winSize.width; x++, k++ )
{
double ixix = ((double) Ix[k]) * Ix[k];
double ixiy = ((double) Ix[k]) * Iy[k];
double iyiy = ((double) Iy[k]) * Iy[k];
double xx, xy, yy;
G[0] += ixix;
G[1] += ixiy;
G[2] += x * ixix;
G[3] += y * ixix;
G[4] += x * ixiy;
G[5] += y * ixiy;
// G[6] == G[1]
G[7] += iyiy;
// G[8] == G[4]
// G[9] == G[5]
G[10] += x * iyiy;
G[11] += y * iyiy;
xx = x * x;
xy = x * y;
yy = y * y;
// G[12] == G[2]
// G[13] == G[8] == G[4]
G[14] += xx * ixix;
G[15] += xy * ixix;
G[16] += xx * ixiy;
G[17] += xy * ixiy;
// G[18] == G[3]
// G[19] == G[9]
// G[20] == G[15]
G[21] += yy * ixix;
// G[22] == G[17]
G[23] += yy * ixiy;
// G[24] == G[4]
// G[25] == G[10]
// G[26] == G[16]
// G[27] == G[22]
G[28] += xx * iyiy;
G[29] += xy * iyiy;
// G[30] == G[5]
// G[31] == G[11]
// G[32] == G[17]
// G[33] == G[23]
// G[34] == G[29]
G[35] += yy * iyiy;
}
}
G[8] = G[4];
G[9] = G[5];
G[22] = G[17];
// fill part of G below its diagonal
for( y = 1; y < 6; y++ )
for( x = 0; x < y; x++ )
G[y * 6 + x] = G[x * 6 + y];
CvMat mat;
cvInitMatHeader( &mat, 6, 6, CV_64FC1, G );
if( cvInvert( &mat, &mat, CV_SVD ) < 1e-3 )
{
/* bad matrix. take the next point */
pt_status = 0;
}
else
{
for( j = 0; j < criteria.max_iter; j++ )
{
double b[6], eta[6];
double t0, t1, s = 0;
if( icvGetQuadrangleSubPix_8u32f_C1R( imgJ[l], step[l], levelSize,
patchJ, patchStep, patchSize, A,
0, 0 ) < 0 )
{
pt_status = 0;
break;
}
memset( b, 0, sizeof( b ));
for( y = -winSize.height, k = 0; y <= winSize.height; y++ )
{
for( x = -winSize.width; x <= winSize.width; x++, k++ )
{
double t = patchI[k] - patchJ[k];
double ixt = Ix[k] * t;
double iyt = Iy[k] * t;
s += t;
b[0] += ixt;
b[1] += iyt;
b[2] += x * ixt;
b[3] += y * ixt;
b[4] += x * iyt;
b[5] += y * iyt;
}
}
icvTransformVector_64d( G, b, eta, 6, 6 );
t0 = v.x + A[0] * eta[0] + A[1] * eta[1];
t1 = v.y + A[2] * eta[0] + A[3] * eta[1];
assert( fabs( t0 ) < levelSize.width * 2 );
assert( fabs( t1 ) < levelSize.height * 2 );
v.x = (float) t0;
v.y = (float) t1;
t0 = A[0] * (1 + eta[2]) + A[1] * eta[4];
t1 = A[0] * eta[3] + A[1] * (1 + eta[5]);
A[0] = (float) t0;
A[1] = (float) t1;
t0 = A[2] * (1 + eta[2]) + A[3] * eta[4];
t1 = A[2] * eta[3] + A[3] * (1 + eta[5]);
A[2] = (float) t0;
A[3] = (float) t1;
/*t0 = 4./(fabs(A[0]) + fabs(A[1]) + fabs(A[2]) + fabs(A[3]) + DBL_EPSILON);
A[0] = (float)(A[0]*t0);
A[1] = (float)(A[1]*t0);
A[2] = (float)(A[2]*t0);
A[3] = (float)(A[3]*t0);
t0 = fabs(A[0]*A[2] - A[1]*A[3]);
if( t0 >
A[0] = (float)(A[0]*t0);
A[1] = (float)(A[1]*t0);
A[2] = (float)(A[2]*t0);
A[3] = (float)(A[3]*t0); */
if( eta[0] * eta[0] + eta[1] * eta[1] < criteria.epsilon )
break;
}
}
if( pt_status == 0 )
break;
}
if( pt_status )
{
featuresB[i] = v;
memcpy( matrices + i * 4, A, sizeof( A ));
if( error )
{
/* calc error */
double err = 0;
for( y = 0, k = 0; y < patchSize.height; y++ )
{
for( x = 0; x < patchSize.width; x++, k++ )
{
double t = patchI[k] - patchJ[k];
err += t * t;
}
}
error[i] = (float) sqrt( err );
}
}
if( status )
status[i] = (char) pt_status;
}
func_exit:
cvFree( &pyr_buffer );
cvFree( &buffer );
return result;
#undef MAX_LEVEL
}
#endif
static int icvMinimalPyramidSize( CvSize img_size )
{
return cvAlign(img_size.width,8) * img_size.height / 3;
}
CV_IMPL void
cvCalcOpticalFlowPyrLK( const void* arrA, const void* arrB,
void* pyrarrA, void* pyrarrB,
const CvPoint2D32f * featuresA,
CvPoint2D32f * featuresB,
int count, CvSize winSize, int level,
char *status, float *error,
CvTermCriteria criteria, int flags )
{
CV_FUNCNAME( "cvCalcOpticalFlowPyrLK" );
__BEGIN__;
CvMat stubA, *imgA = (CvMat*)arrA;
CvMat stubB, *imgB = (CvMat*)arrB;
CvMat pstubA, *pyrA = (CvMat*)pyrarrA;
CvMat pstubB, *pyrB = (CvMat*)pyrarrB;
CvSize img_size;
CV_CALL( imgA = cvGetMat( imgA, &stubA ));
CV_CALL( imgB = cvGetMat( imgB, &stubB ));
if( CV_MAT_TYPE( imgA->type ) != CV_8UC1 )
CV_ERROR( CV_StsUnsupportedFormat, "" );
if( !CV_ARE_TYPES_EQ( imgA, imgB ))
CV_ERROR( CV_StsUnmatchedFormats, "" );
if( !CV_ARE_SIZES_EQ( imgA, imgB ))
CV_ERROR( CV_StsUnmatchedSizes, "" );
if( imgA->step != imgB->step )
CV_ERROR( CV_StsUnmatchedSizes, "imgA and imgB must have equal steps" );
img_size = cvGetMatSize( imgA );
if( pyrA )
{
CV_CALL( pyrA = cvGetMat( pyrA, &pstubA ));
if( pyrA->step*pyrA->height < icvMinimalPyramidSize( img_size ) )
CV_ERROR( CV_StsBadArg, "pyramid A has insufficient size" );
}
else
{
pyrA = &pstubA;
pyrA->data.ptr = 0;
}
if( pyrB )
{
CV_CALL( pyrB = cvGetMat( pyrB, &pstubB ));
if( pyrB->step*pyrB->height < icvMinimalPyramidSize( img_size ) )
CV_ERROR( CV_StsBadArg, "pyramid B has insufficient size" );
}
else
{
pyrB = &pstubB;
pyrB->data.ptr = 0;
}
IPPI_CALL( icvCalcOpticalFlowPyrLK_8uC1R( imgA->data.ptr, imgB->data.ptr, imgA->step,
img_size, pyrA->data.ptr, pyrB->data.ptr,
featuresA, featuresB,
count, winSize, level, status,
error, criteria, flags ));
__END__;
}
#if 0
CV_IMPL void
cvCalcAffineFlowPyrLK( const void* arrA, const void* arrB,
void* pyrarrA, void* pyrarrB,
CvPoint2D32f * featuresA,
CvPoint2D32f * featuresB,
float *matrices, int count,
CvSize winSize, int level,
char *status, float *error,
CvTermCriteria criteria, int flags )
{
CV_FUNCNAME( "cvCalcAffineFlowPyrLK" );
__BEGIN__;
CvMat stubA, *imgA = (CvMat*)arrA;
CvMat stubB, *imgB = (CvMat*)arrB;
CvMat pstubA, *pyrA = (CvMat*)pyrarrA;
CvMat pstubB, *pyrB = (CvMat*)pyrarrB;
CvSize img_size;
CV_CALL( imgA = cvGetMat( imgA, &stubA ));
CV_CALL( imgB = cvGetMat( imgB, &stubB ));
if( CV_MAT_TYPE( imgA->type ) != CV_8UC1 )
CV_ERROR( CV_StsUnsupportedFormat, "" );
if( !CV_ARE_TYPES_EQ( imgA, imgB ))
CV_ERROR( CV_StsUnmatchedFormats, "" );
if( !CV_ARE_SIZES_EQ( imgA, imgB ))
CV_ERROR( CV_StsUnmatchedSizes, "" );
if( imgA->step != imgB->step )
CV_ERROR( CV_StsUnmatchedSizes, "imgA and imgB must have equal steps" );
if( !matrices )
CV_ERROR( CV_StsNullPtr, "" );
img_size = cvGetMatSize( imgA );
if( pyrA )
{
CV_CALL( pyrA = cvGetMat( pyrA, &pstubA ));
if( pyrA->step*pyrA->height < icvMinimalPyramidSize( img_size ) )
CV_ERROR( CV_StsBadArg, "pyramid A has insufficient size" );
}
else
{
pyrA = &pstubA;
pyrA->data.ptr = 0;
}
if( pyrB )
{
CV_CALL( pyrB = cvGetMat( pyrB, &pstubB ));
if( pyrB->step*pyrB->height < icvMinimalPyramidSize( img_size ) )
CV_ERROR( CV_StsBadArg, "pyramid B has insufficient size" );
}
else
{
pyrB = &pstubB;
pyrB->data.ptr = 0;
}
IPPI_CALL( icvCalcAffineFlowPyrLK_8uC1R( imgA->data.ptr, imgB->data.ptr, imgA->step,
img_size, pyrA->data.ptr, pyrB->data.ptr,
featuresA, featuresB, matrices,
count, winSize, level, status,
error, criteria, flags ));
__END__;
}
#endif
/* End of file. */
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -