⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 opticalflowpredict.cpp

📁 tracciatore di mani con webcam
💻 CPP
📖 第 1 页 / 共 2 页
字号:
  {    m_condens_state.x = best_sample_state.x;    m_condens_state.y = best_sample_state.y;    m_condens_state.vx = best_sample_state.vx;    m_condens_state.vy = best_sample_state.vy ;    m_condens_state.angle = best_sample_state.angle;  }  ASSERT(!isnan(m_condens_state.x) && !isnan(m_condens_state.y) && !isnan(m_condens_state.angle));  ASSERT(!isnan(m_condens_state.vx) && !isnan(m_condens_state.vy));  // now move the current features to where Condensation thinks they should be;  // the observation is no longer needed#if 0  if (false) { // todo    PredictFeatureLocations(old_lens, old_d_angles, m_condens_state, m_tmp_predicted);    FollowObservationForSmallDiffs(m_tmp_predicted, m_features[curr_indx]/*observation*/,                                   m_features[curr_indx]/*output*/, 2.0);  } else #endif  {    PredictFeatureLocations(old_lens, old_d_angles, m_condens_state, m_features[curr_indx]);  }  {    // initialize bounds for state    float lower_bound[OF_CONDENS_DIMS];    float upper_bound[OF_CONDENS_DIMS];    // velocity bounds highly depend on the frame rate that we will achieve,    // increase the factor for lower frame rates;    // it states how much the center can move in either direction in a single    // frame, measured in terms of the width or height of the initial match size    double velocity_factor = .25;     CvPoint2D32f avg;    GetAverage(m_features[curr_indx]/*_observation*/, avg);    double cx = avg.x;    double cy = avg.y;    double width = (m_condens_init_rect.right-m_condens_init_rect.left)*velocity_factor;    double height = (m_condens_init_rect.bottom-m_condens_init_rect.top)*velocity_factor;    lower_bound[0] = (float) (cx-width);    upper_bound[0] = (float) (cx+width);    lower_bound[1] = (float) (-width);    upper_bound[1] = (float) (+width);    lower_bound[2] = (float) (cy-height);    upper_bound[2] = (float) (cy+height);    lower_bound[3] = (float) (-height);    upper_bound[3] = (float) (+height);    lower_bound[4] = (float) (-10.0*velocity_factor*M_PI/180.0);    upper_bound[4] = (float) (+10.0*velocity_factor*M_PI/180.0);    CvMat lb = cvMat(OF_CONDENS_DIMS, 1, CV_MAT3x1_32F, lower_bound);    CvMat ub = cvMat(OF_CONDENS_DIMS, 1, CV_MAT3x1_32F, upper_bound);    cvConDensInitSampleSet(m_pConDens, &lb, &ub);  }}/** if the distance between the predicted ("pred") and the observed feature* location is smaller than diff, use the observation as "corrected" feature,* otherwise use the "pred" location*/void OpticalFlow::FollowObservationForSmallDiffs(const CPointVector& pred,                                                  const CPointVector& obs,                                                  CPointVector& corrected,                                                 double diff){  int num_ft = (int) obs.size();  ASSERT(num_ft);  ASSERT((int)pred.size()==num_ft);  ASSERT((int)corrected.size()==num_ft);  for (int ft=0; ft<num_ft; ft++) {    double dx = pred[ft].x-obs[ft].x;    double dy = pred[ft].y-obs[ft].y;    double len = sqrt(dx*dx+dy*dy);    if (len<diff) {      corrected[ft].x = obs[ft].x;      corrected[ft].y = obs[ft].y;    } else {      corrected[ft].x = pred[ft].x;      corrected[ft].y = pred[ft].y;    }  }}/* compute and store the relative location of each feature versus* the base_state; save it as distance and angle from base_state*/void OpticalFlow::PreparePredictFeatureLocations(const CondensState& base_state,                                                 const CPointVector& base,                                                 CDoubleVector& old_lens,                                                 CDoubleVector& old_d_angles){  int num_ft = (int) base.size();  ASSERT(num_ft);  old_lens.reserve(num_ft);  old_d_angles.reserve(num_ft);  for (int ft=0; ft<num_ft; ft++) {    double old_dx = base[ft].x-base_state.x;    double old_dy = base[ft].y-base_state.y;    double old_len = sqrt(old_dx*old_dx+old_dy*old_dy);    double old_angle = atan(old_dy/old_dx);    if (old_dx<0) old_angle += M_PI;    double old_d_angle = old_angle-base_state.angle;    old_lens.push_back(old_len);    old_d_angles.push_back(old_d_angle);  }}/** given a (predicted) state, where do the features end up?* This requires that PreparePredict.. has been called before* to obtain an intermediate feature representation that is free* of the the old state.*/void OpticalFlow::PredictFeatureLocations(const CDoubleVector& old_lens,                                          const CDoubleVector& old_d_angles,                                          const CondensState& predicted_state,                                          CPointVector& prediction){  int num_ft = (int)prediction.size();  ASSERT((int)old_lens.size()==num_ft);  ASSERT((int)old_d_angles.size()==num_ft);  for (int ft=0; ft<num_ft; ft++) {    double old_d_angle = old_d_angles[ft];    double new_angle = predicted_state.angle+old_d_angle;    double old_len = old_lens[ft];    double new_dx = old_len*cos(new_angle);    double new_dy = old_len*sin(new_angle);    prediction[ft].x = (float) (predicted_state.x+new_dx);    prediction[ft].y = (float) (predicted_state.y+new_dy);//    VERBOSE2(3, "predicted %f, %f", //      prediction[ft].x, prediction[ft].y);  }}/* estimate the likelihood that the given prediction is correct,* given the observation and discarding the furthest-away features*/double OpticalFlow::EstimateProbability(const CPointVector& prediction,                                         const CPointVector& observation,                                        int discard_num_furthest){  int num_ft = (int) prediction.size();  ASSERT((int)observation.size()==num_ft);  ASSERT(num_ft>discard_num_furthest);  vector<double> furthest; // will be sorted highest to smallest  furthest.resize(discard_num_furthest);  double cum_dist = 0.0;  for (int ft=0; ft<num_ft; ft++) {    double dx = prediction[ft].x-observation[ft].x;    double dy = prediction[ft].y-observation[ft].y;    double dist = sqrt(dx*dx+dy*dy);    // check if it's a far-away feature, update "furthest" vector if so.    // only add the distance if it's not too far away    for (int f=0; f<discard_num_furthest; f++) {      if (dist>furthest[f]) {        // add smallest "furthest" dist to the sum before we kick it out        // of the vector        cum_dist += furthest[discard_num_furthest-1];        for (int s=f; s<discard_num_furthest-1; s++) {          furthest[s+1] = furthest[s];        }        furthest[f] = dist;        break;      }    }    cum_dist += dist;  }  double prob = 1.0/(1.0+cum_dist);  return prob;}/** given the locations of features for a predicted state,* do they fall on skin-colored pixels?*/double OpticalFlow::EstimateProbability(const CPointVector& prediction,                                        IplImage* rgbImage){  int num_ft = (int) prediction.size();  ASSERT(num_ft);  int max_x = rgbImage->width-1;  int max_y = rgbImage->height-1;  double prob = 0;  for (int ft=0; ft<num_ft; ft++) {    int x = (int) prediction[ft].x;    int y = (int) prediction[ft].y;    x = max(0, min(x, max_x));    y = max(0, min(y, max_y));    ColorBGR* color;    GetPixel(rgbImage, x, y, &color);    double p = m_pProbDistrProvider->LookupProb(*color);    prob += p;  }  prob = prob/(double)num_ft;  return prob;}

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -