cv3dtracker.cpp.svn-base
来自「非结构化路识别」· SVN-BASE 代码 · 共 590 行 · 第 1/2 页
SVN-BASE
590 行
exit:
cvReleaseImage(&gray_img);
cvReleaseImage(&tmp_img);
cvFree((void **)&object_points);
cvFree((void **)&points);
return cameras_done == num_cameras;
}
// fill in the real-world coordinates of the checkerboard points
static void FillObjectPoints(CvPoint3D32f *obj_points, CvSize etalon_size, float square_size)
{
int x, y, i;
for (y = 0, i = 0; y < etalon_size.height; y++)
{
for (x = 0; x < etalon_size.width; x++, i++)
{
obj_points[i].x = square_size * x;
obj_points[i].y = square_size * y;
obj_points[i].z = 0;
}
}
}
// Mark the points found on the input image
// The marks are drawn multi-colored if all the points were found.
static void DrawEtalon(IplImage *img, CvPoint2D32f *corners,
int corner_count, CvSize etalon_size, int draw_ordered)
{
const int r = 4;
int i;
int x, y;
CvPoint prev_pt = { 0, 0 };
static const int rgb_colors[] = {
CV_RGB(255,0,0),
CV_RGB(255,128,0),
CV_RGB(200,200,0),
CV_RGB(0,255,0),
CV_RGB(0,200,200),
CV_RGB(0,0,255),
CV_RGB(255,0,255) };
static const int gray_colors[] = {
80, 120, 160, 200, 100, 140, 180
};
const int (* const colors)[7] = img->nChannels == 3 ? &rgb_colors : &gray_colors;
int color = (*colors)[0];
for (y = 0, i = 0; y < etalon_size.height; y++)
{
if (draw_ordered)
color = (*colors)[y % ARRAY_SIZEOF(*colors)];
for (x = 0; x < etalon_size.width && i < corner_count; x++, i++)
{
CvPoint pt;
pt.x = cvRound(corners[i].x);
pt.y = cvRound(corners[i].y);
if (img->origin == IPL_ORIGIN_BL)
pt.y = img->height - 1 - pt.y;
if (draw_ordered)
{
if (i != 0)
cvLineAA(img, prev_pt, pt, color);
prev_pt = pt;
}
cvLineAA(img,
cvPoint(pt.x - r, pt.y - r),
cvPoint(pt.x + r, pt.y + r), color);
cvLineAA(img,
cvPoint(pt.x - r, pt.y + r),
cvPoint(pt.x + r, pt.y - r), color);
cvCircleAA(img, pt, r+1, color);
}
}
}
// Find the midpoint of the line segment between two points.
static CvPoint3D32f midpoint(const CvPoint3D32f &p1, const CvPoint3D32f &p2)
{
return cvPoint3D32f((p1.x+p2.x)/2, (p1.y+p2.y)/2, (p1.z+p2.z)/2);
}
static void operator +=(CvPoint3D32f &p1, const CvPoint3D32f &p2)
{
p1.x += p2.x;
p1.y += p2.y;
p1.z += p2.z;
}
static CvPoint3D32f operator /(const CvPoint3D32f &p, int d)
{
return cvPoint3D32f(p.x/d, p.y/d, p.z/d);
}
static const Cv3dTracker2dTrackedObject *find(const Cv3dTracker2dTrackedObject v[], int num_objects, int id)
{
for (int i = 0; i < num_objects; i++)
{
if (v[i].id == id)
return &v[i];
}
return NULL;
}
#define CAMERA_POS(c) (cvPoint3D32f((c).mat[3][0], (c).mat[3][1], (c).mat[3][2]))
//////////////////////////////
// cv3dTrackerLocateObjects //
//////////////////////////////
CV_IMPL int cv3dTrackerLocateObjects(int num_cameras, int num_objects,
const Cv3dTrackerCameraInfo camera_info[], // size is num_cameras
const Cv3dTracker2dTrackedObject tracking_info[], // size is num_objects*num_cameras
Cv3dTrackerTrackedObject tracked_objects[]) // size is num_objects
{
/*CV_FUNCNAME("cv3dTrackerLocateObjects");*/
int found_objects = 0;
// count how many cameras could see each object
std::map<int, int> count;
for (int c = 0; c < num_cameras; c++)
{
if (!camera_info[c].valid)
continue;
for (int i = 0; i < num_objects; i++)
{
const Cv3dTracker2dTrackedObject *o = &tracking_info[c*num_objects+i];
if (o->id != -1)
count[o->id]++;
}
}
// process each object that was seen by at least two cameras
for (std::map<int, int>::iterator i = count.begin(); i != count.end(); i++)
{
if (i->second < 2)
continue; // ignore object seen by only one camera
int id = i->first;
// find an approximation of the objects location for each pair of cameras that
// could see this object, and average them
CvPoint3D32f total = cvPoint3D32f(0, 0, 0);
int weight = 0;
for (int c1 = 0; c1 < num_cameras-1; c1++)
{
if (!camera_info[c1].valid)
continue;
const Cv3dTracker2dTrackedObject *o1 = find(&tracking_info[c1*num_objects],
num_objects, id);
if (o1 == NULL)
continue; // this camera didn't see this object
CvPoint3D32f p1a = CAMERA_POS(camera_info[c1]);
CvPoint3D32f p1b = ImageCStoWorldCS(camera_info[c1], o1->p);
for (int c2 = c1 + 1; c2 < num_cameras; c2++)
{
if (!camera_info[c2].valid)
continue;
const Cv3dTracker2dTrackedObject *o2 = find(&tracking_info[c2*num_objects],
num_objects, id);
if (o2 == NULL)
continue; // this camera didn't see this object
CvPoint3D32f p2a = CAMERA_POS(camera_info[c2]);
CvPoint3D32f p2b = ImageCStoWorldCS(camera_info[c2], o2->p);
// these variables are initialized simply to avoid erroneous error messages
// from the compiler
CvPoint3D32f r1 = cvPoint3D32f(0, 0, 0);
CvPoint3D32f r2 = cvPoint3D32f(0, 0, 0);
// find the intersection of the two lines (or the points of closest
// approach, if they don't intersect)
if (!intersection(p1a, p1b, p2a, p2b, r1, r2))
continue;
total += midpoint(r1, r2);
weight++;
}
}
CvPoint3D32f center = total/weight;
tracked_objects[found_objects++] = cv3dTrackerTrackedObject(id, center);
}
return found_objects;
}
#define EPS 1e-9
inline double abs(double d) { return fabs(d); };
// Compute the determinant of the 3x3 matrix represented by 3 row vectors.
static inline double det(CvPoint3D32f v1, CvPoint3D32f v2, CvPoint3D32f v3)
{
return v1.x*v2.y*v3.z + v1.z*v2.x*v3.y + v1.y*v2.z*v3.x
- v1.z*v2.y*v3.x - v1.x*v2.z*v3.y - v1.y*v2.x*v3.z;
};
static CvPoint3D32f operator +(CvPoint3D32f a, CvPoint3D32f b)
{
return cvPoint3D32f(a.x + b.x, a.y + b.y, a.z + b.z);
}
static CvPoint3D32f operator -(CvPoint3D32f a, CvPoint3D32f b)
{
return cvPoint3D32f(a.x - b.x, a.y - b.y, a.z - b.z);
}
static CvPoint3D32f operator *(CvPoint3D32f v, double f)
{
return cvPoint3D32f(f*v.x, f*v.y, f*v.z);
}
// Find the intersection of two lines, or if they don't intersect,
// the points of closest approach.
// The lines are defined by (o1,p1) and (o2, p2).
// If they intersect, r1 and r2 will be the same.
// Returns false on error.
static bool intersection(CvPoint3D32f o1, CvPoint3D32f p1,
CvPoint3D32f o2, CvPoint3D32f p2,
CvPoint3D32f &r1, CvPoint3D32f &r2)
{
CvPoint3D32f x = o2 - o1;
CvPoint3D32f d1 = p1 - o1;
CvPoint3D32f d2 = p2 - o2;
CvPoint3D32f cross = cvPoint3D32f(d1.y*d2.z - d1.z*d2.y,
d1.z*d2.x - d1.x*d2.z,
d1.x*d2.y - d1.y*d2.x);
double den = cross.x*cross.x + cross.y*cross.y + cross.z*cross.z;
if (den < EPS)
return false;
double t1 = det(x, d2, cross) / den;
double t2 = det(x, d1, cross) / den;
r1 = o1 + d1 * t1;
r2 = o2 + d2 * t2;
return true;
}
// Convert from image to camera space by transforming point p in
// the image plane by the camera matrix.
static CvPoint3D32f ImageCStoWorldCS(const Cv3dTrackerCameraInfo &camera_info, CvPoint p)
{
float tp[4];
tp[0] = (float)p.x - camera_info.principal_point.x;
tp[1] = (float)p.y - camera_info.principal_point.y;
tp[2] = 1.f;
tp[3] = 1.f;
float tr[4];
//multiply tp by mat to get tr
MultVectorMatrix(tr, tp, camera_info.mat);
return cvPoint3D32f(tr[0]/tr[3], tr[1]/tr[3], tr[2]/tr[3]);
}
// Multiply affine transformation m1 by the affine transformation m2 and
// return the result in rm.
static void MultMatrix(float rm[4][4], const float m1[4][4], const float m2[4][4])
{
for (int i=0; i<=3; i++)
for (int j=0; j<=3; j++)
{
rm[i][j]= 0.0;
for (int k=0; k <= 3; k++)
rm[i][j] += m1[i][k]*m2[k][j];
}
}
// Multiply the vector v by the affine transformation matrix m and return the
// result in rv.
void MultVectorMatrix(float rv[4], const float v[4], const float m[4][4])
{
for (int i=0; i<=3; i++)
{
rv[i] = 0.f;
for (int j=0;j<=3;j++)
rv[i] += v[j] * m[j][i];
}
}
⌨️ 快捷键说明
复制代码Ctrl + C
搜索代码Ctrl + F
全屏模式F11
增大字号Ctrl + =
减小字号Ctrl + -
显示快捷键?