cvaux.h.svn-base
来自「非结构化路识别」· SVN-BASE 代码 · 共 1,161 行 · 第 1/4 页
SVN-BASE
1,161 行
OPENCVAUXAPI CvStatus icvCvt_32f_64d( float *src, double *dst, int size );
OPENCVAUXAPI CvStatus icvCvt_64d_32f( double *src, float *dst, int size );
OPENCVAUXAPI void cvDeInterlace( IplImage* frame, IplImage* fieldEven, IplImage* fieldOdd );
OPENCVAUXAPI CvStatus icvSelectBestRt( int numImages,
int* numPoints,
CvSize imageSize,
CvPoint2D32f* imagePoints1,
CvPoint2D32f* imagePoints2,
CvPoint3D32f* objectPoints,
CvMatr32f cameraMatrix1,
CvVect32f distortion1,
CvMatr32f rotMatrs1,
CvVect32f transVects1,
CvMatr32f cameraMatrix2,
CvVect32f distortion2,
CvMatr32f rotMatrs2,
CvVect32f transVects2,
CvMatr32f bestRotMatr,
CvVect32f bestTransVect
);
/****************************************************************************************\
* Contour Morphing *
\****************************************************************************************/
/* finds correspondence between two contours */
CvSeq* cvCalcContoursCorrespondence( const CvSeq* contour1,
const CvSeq* contour2,
CvMemStorage* storage);
/* morphs contours using the pre-calculated correspondence:
alpha=0 ~ contour1, alpha=1 ~ contour2 */
CvSeq* cvMorphContours( const CvSeq* contour1, const CvSeq* contour2,
CvSeq* corr, double alpha,
CvMemStorage* storage );
/****************************************************************************************\
* Texture Descriptors *
\****************************************************************************************/
#define CV_GLCM_OPTIMIZATION_NONE -2
#define CV_GLCM_OPTIMIZATION_LUT -1
#define CV_GLCM_OPTIMIZATION_HISTOGRAM 0
#define CV_GLCMDESC_OPTIMIZATION_ALLOWDOUBLENEST 10
#define CV_GLCMDESC_OPTIMIZATION_ALLOWTRIPLENEST 11
#define CV_GLCMDESC_OPTIMIZATION_HISTOGRAM 4
#define CV_GLCMDESC_ENTROPY 0
#define CV_GLCMDESC_ENERGY 1
#define CV_GLCMDESC_HOMOGENITY 2
#define CV_GLCMDESC_CONTRAST 3
#define CV_GLCMDESC_CLUSTERTENDENCY 4
#define CV_GLCMDESC_CLUSTERSHADE 5
#define CV_GLCMDESC_CORRELATION 6
#define CV_GLCMDESC_CORRELATIONINFO1 7
#define CV_GLCMDESC_CORRELATIONINFO2 8
#define CV_GLCMDESC_MAXIMUMPROBABILITY 9
#define CV_GLCM_ALL 0
#define CV_GLCM_GLCM 1
#define CV_GLCM_DESC 2
typedef struct CvGLCM CvGLCM;
OPENCVAUXAPI CvGLCM* cvCreateGLCM( const IplImage* srcImage,
int stepMagnitude,
const int* stepDirections CV_DEFAULT(0),
int numStepDirections CV_DEFAULT(0),
int optimizationType CV_DEFAULT(CV_GLCM_OPTIMIZATION_NONE));
OPENCVAUXAPI void cvReleaseGLCM( CvGLCM** GLCM, int flag CV_DEFAULT(CV_GLCM_ALL));
OPENCVAUXAPI void cvCreateGLCMDescriptors( CvGLCM* destGLCM,
int descriptorOptimizationType
CV_DEFAULT(CV_GLCMDESC_OPTIMIZATION_ALLOWDOUBLENEST));
OPENCVAUXAPI double cvGetGLCMDescriptor( CvGLCM* GLCM, int step, int descriptor );
OPENCVAUXAPI void cvGetGLCMDescriptorStatistics( CvGLCM* GLCM, int descriptor,
double* average, double* standardDeviation );
OPENCVAUXAPI IplImage* cvCreateGLCMImage( CvGLCM* GLCM, int step );
/****************************************************************************************\
* Haar-like object detection *
\****************************************************************************************/
#define CV_HAAR_FEATURE_MAX 3
typedef struct CvHaarFeature
{
int tilted;
struct
{
CvRect r;
float weight;
} rect[CV_HAAR_FEATURE_MAX];
}
CvHaarFeature;
typedef struct CvHaarClassifier
{
int count;
CvHaarFeature* haarFeature;
float* threshold;
int* left;
int* right;
float* alpha;
}
CvHaarClassifier;
typedef struct CvHaarStageClassifier
{
int count;
float threshold;
CvHaarClassifier* classifier;
}
CvHaarStageClassifier;
typedef struct CvHaarClassifierCascade
{
int count;
CvSize origWindowSize;
CvHaarStageClassifier* stageClassifier;
}
CvHaarClassifierCascade;
typedef struct CvHidHaarClassifierCascade CvHidHaarClassifierCascade;
/* create faster internal representation of haar classifier cascade */
OPENCVAUXAPI CvHidHaarClassifierCascade*
cvCreateHidHaarClassifierCascade( CvHaarClassifierCascade* cascade,
const CvArr* sumImage CV_DEFAULT(0),
const CvArr* sqSumImage CV_DEFAULT(0),
const CvArr* tiltedSumImage CV_DEFAULT(0),
double scale CV_DEFAULT(1));
OPENCVAUXAPI double
cvGetHaarClassifierCascadeScale( CvHidHaarClassifierCascade* cascade );
OPENCVAUXAPI CvSize
cvGetHaarClassifierCascadeWindowSize( CvHidHaarClassifierCascade* cascade );
OPENCVAUXAPI void
cvSetImagesForHaarClassifierCascade( CvHidHaarClassifierCascade* cascade,
const CvArr* sumImage,
const CvArr* sqSumImage,
const CvArr* tiltedImage,
double scale );
OPENCVAUXAPI int
cvRunHaarClassifierCascade( CvHidHaarClassifierCascade* cascade,
CvPoint pt, int startStage CV_DEFAULT(0));
OPENCVAUXAPI void
cvReleaseHidHaarClassifierCascade( CvHidHaarClassifierCascade** cascade );
typedef struct CvAvgComp
{
CvRect rect;
int neighbors;
}
CvAvgComp;
#define CV_HAAR_DO_CANNY_PRUNING 1
OPENCVAUXAPI CvSeq*
cvHaarDetectObjects( const IplImage* img,
CvHidHaarClassifierCascade* hid_cascade,
CvMemStorage* storage, double scale_factor CV_DEFAULT(1.1),
int min_neighbors CV_DEFAULT(3), int flags CV_DEFAULT(0));
OPENCVAUXAPI CvHaarClassifierCascade*
cvLoadHaarClassifierCascade( const char* directory CV_DEFAULT("<default_face_cascade>"),
CvSize origWindowSize CV_DEFAULT(cvSize(24,24)));
OPENCVAUXAPI void
cvReleaseHaarClassifierCascade( CvHaarClassifierCascade** cascade );
/****************************************************************************************\
* Face eyes&mouth tracking *
\****************************************************************************************/
typedef struct CvFaceTracker CvFaceTracker;
#define CV_NUM_FACE_ELEMENTS 3
enum CV_FACE_ELEMENTS
{
CV_FACE_MOUTH = 0,
CV_FACE_LEFT_EYE = 1,
CV_FACE_RIGHT_EYE = 2
};
OPENCVAUXAPI CvFaceTracker* cvInitFaceTracker(CvFaceTracker* pFaceTracking, const IplImage* imgGray,
CvRect* pRects, int nRects);
OPENCVAUXAPI int cvTrackFace( CvFaceTracker* pFaceTracker, IplImage* imgGray,
CvRect* pRects, int nRects,
CvPoint* ptRotate, double* dbAngleRotate);
OPENCVAUXAPI void cvReleaseFaceTracker(CvFaceTracker** ppFaceTracker);
typedef struct CvFace
{
CvRect MouthRect;
CvRect LeftEyeRect;
CvRect RightEyeRect;
} CvFaceData;
CvSeq * cvFindFace(IplImage * Image,CvMemStorage* storage);
CvSeq * cvPostBoostingFindFace(IplImage * Image,CvMemStorage* storage);
/****************************************************************************************\
* 3D Tracker *
\****************************************************************************************/
typedef unsigned char CvBool;
typedef struct
{
int id;
CvPoint p;
} Cv3dTracker2dTrackedObject;
CV_INLINE Cv3dTracker2dTrackedObject cv3dTracker2dTrackedObject(int id, CvPoint p);
CV_INLINE Cv3dTracker2dTrackedObject cv3dTracker2dTrackedObject(int id, CvPoint p)
{
Cv3dTracker2dTrackedObject r;
r.id = id;
r.p = p;
return r;
}
typedef struct
{
int id;
CvPoint3D32f p; // location of the tracked object
} Cv3dTrackerTrackedObject;
CV_INLINE Cv3dTracker2dTrackedObject cv3dTracker2dTrackedObject(int id, CvPoint p);
CV_INLINE Cv3dTrackerTrackedObject cv3dTrackerTrackedObject(int id, CvPoint3D32f p)
{
Cv3dTrackerTrackedObject r;
r.id = id;
r.p = p;
return r;
}
typedef struct
{
CvBool valid;
float mat[4][4]; /* maps camera coordinates to world coordinates */
CvPoint2D32f principal_point; /* copied from intrinsics so this structure */
/* has all the info we need */
} Cv3dTrackerCameraInfo;
typedef struct
{
CvPoint2D32f principal_point;
float focal_length[2];
float distortion[4];
} Cv3dTrackerCameraIntrinsics;
OPENCVAUXAPI CvBool cv3dTrackerCalibrateCameras(int num_cameras,
const Cv3dTrackerCameraIntrinsics camera_intrinsics[], /* size is num_cameras */
CvSize etalon_size,
float square_size,
IplImage *samples[], /* size is num_cameras */
Cv3dTrackerCameraInfo camera_info[]); /* size is num_cameras */
OPENCVAUXAPI int cv3dTrackerLocateObjects(int num_cameras, int num_objects,
const Cv3dTrackerCameraInfo camera_info[], /* size is num_cameras */
const Cv3dTracker2dTrackedObject tracking_info[], /* size is num_objects*num_cameras */
Cv3dTrackerTrackedObject tracked_objects[]); /* size is num_objects */
⌨️ 快捷键说明
复制代码Ctrl + C
搜索代码Ctrl + F
全屏模式F11
增大字号Ctrl + =
减小字号Ctrl + -
显示快捷键?