📄 activemodel.h
字号:
/*************************************************************** * C - C++ Header * * File : ActiveModel.h * * Module : ActiveModel * * Author : Adam * * Creation Date : Mon May 16 15:07:55 1994 * * Comments : module for tracking a Profile (outline shape) * ***************************************************************/#ifndef __ACTIVE_MODEL_H__#define __ACTIVE_MODEL_H__#ifdef DEBUG#include "EnvParameter.h"#endif#include "tracker_defines_types_and_helpers.h"#include "NagVector.h"#include "NagMatrix.h"#include "PointVector.h"#include "SplineWeights.h"namespace ReadingPeopleTracker{// forward declarationsclass ProfileSet;class Image;class PCAclass;class Profile;class OcclusionHandler;class ActiveShapeTracker;class EdgeDetector;#ifdef DEBUGclass MovieStore;class RGB32Image;#endifclass ActiveModel{ friend class ActiveShapeTracker; protected: // apart from shape there's ox, oy, ax, ay = 4 additional parameters static const int NO_OF_NON_SHAPE_PARAMS; // images: for edge detection and visualisation. set them for each frame! Image *video_image; Image *difference_image; Image *background_image; Image *previous_video_image; // we are given the video image type (expect GREY8 or RGB32) upon instantiation ImageType video_image_type; OcclusionHandler *occ_handler; NagMatrix P; // model to control point transformation NagMatrix H; // model to sample point transformation NagMatrix N; // model to sample normal tramsformation NagVector evals; // copy of shape eigenvalues int H_no_rows; PointVector mean_points; PointVector mean_normals; // mean square distance from origin to sample points realno mean_sq_distance; NagVector measure_error; NagVector db; // the current tracked profile Profile *currx; private: unsigned int no_sample_points; // in fact max sample points // overweighting constant realno epsilon; // no sample points between spline control points unsigned int no_subdivisions; // number of shape parameters to use unsigned int model_depth; // current number of shape parameters used unsigned int current_depth; realno var_fac; realno var_const; static unsigned int min_bunch_size; // minimum no of observations unsigned int current_bunch_size; realno frame_time; // notional 'time' between image frames (0 to 1) realno frame_step; // notional time step virtual void constrain_b(); // pointer to calling ActiveShapeTracker so that we can access its configuration parameters ActiveShapeTracker *tracker; Profile *mean; EdgeDetector *edge_detector; // mechanism for making observations // no of unoccluded edge points int no_looks; // no of significant edges found int no_observed; // total no of sample points int no_sampled; // these arrays are used in adjust_state() only but we'd rather not // allocate and de-allocate them several times a second... int ids[MAX_BUNCH_SIZE]; realno inv_var[MAX_BUNCH_SIZE]; realno mxx[MAX_BUNCH_SIZE]; realno mxy[MAX_BUNCH_SIZE]; realno myy[MAX_BUNCH_SIZE]; Point2 n[MAX_BUNCH_SIZE]; Point2 p[MAX_BUNCH_SIZE]; Point2 q[MAX_BUNCH_SIZE]; Point2 dp[MAX_BUNCH_SIZE]; void get_estimate(int i, Point2 &p, Point2 &n, Point2 &q); realno adjust_state(); void b_to_x(); void quick_observe_point(Point2 &n, Point2 &p, Point2 &q, int id, realno &mxx, realno &mxy, realno &myy, realno &inv_var, realno &m_density, Point2 &dp); void get_new_depth(); void get_initial_frame_step();public: ActiveModel(ActiveShapeTracker *the_tracker, ImageType the_video_image_type); void initialise(); inline void set_profile(Profile *initial_guess) { currx = initial_guess; } inline void set_edge_detector(EdgeDetector *the_edge_detector) { edge_detector = the_edge_detector; } inline void set_occlusion_handler(OcclusionHandler *the_occ_handler) { occ_handler = the_occ_handler; } inline int get_model_depth() { return model_depth; } inline EdgeDetector *get_edge_detector() const { return edge_detector; } // do the iterative fitting for current profile doing local edge search around the // Profile, using our EdgeDetector. (needs object_id for occlusion reasoning) realno track_profile(object_id_t object_id); // reset to mean shape void hold_shape(); virtual void apply_virtual_shape(); // initialise shape filters virtual void setup_shape_filters(); // get the shape model data from the model_file virtual void read_shape_model(); inline realno get_fitness() { realno fit; if (no_looks > 0) fit = no_observed / ((realno) no_looks); else fit = 0; // nts: changed! Was 1.0 return fit; } realno get_significance() { realno res = no_looks / ((realno) no_sampled); return res; }// accessors and modifierspublic: // these will be set by the ActiveShapeTracker inline void set_video_image(Image *the_video_image) { video_image = the_video_image; } inline void set_difference_image(Image *the_difference_image) { difference_image = the_difference_image; } inline void set_background_image(Image *the_background_image) { background_image = the_background_image; } inline void set_previous_video_image(Image *the_previous_video_image) { previous_video_image = the_previous_video_image; } inline ImageType get_video_image_type() const { return video_image_type; } // these will be queried by the EdgeDetector inline Image *get_video_image() const { return video_image; } inline Image *get_difference_image() const { return difference_image; } inline Image *get_background_image() const { return background_image; } inline Image *get_previous_video_image() const { return previous_video_image; } private: // private helpers etc void setup_pca_model(); // load in the PCA shape model realno get_M_distance(FilterOneD **filters, unsigned int depth, NagVector &evals) const;#ifdef DEBUG static MovieStore *demo_movie; static EnvStringParameter demo_movie_name; static RGB32Image *demo_image;#endif // ifdef DEBUG};} // namespace ReadingPeopleTracker#endif
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -