📄 humanfeaturetracker.cc
字号:
///////////////////////////////////////////////////////////////////////////////// //// HumanFeatureTracker.cc (structure copied from ActiveShapeTracker.cc) //// //// This tracker class tracks human features such as head etc //// //// Author : Nils T Siebel (nts) //// Created : Fri Apr 6 15:08:24 BST 2001 //// Revision : 1.0 of Wed Apr 18 18:06:11 BST 2001 //// Copyright : The University of Reading //// //// Changes: //// nts: rev 1.0: initial working revision Wed Apr 18 18:06:11 BST 2001 //// nts: rev 1.1: put in calibration Thu Nov 15 09:38:47 GMT 2001 //// /////////////////////////////////////////////////////////////////////////////////#include "HumanFeatureTracker.h"#include "RGB32Image.h"#include "limits.h" // for INT_MAX#include "Tracking.h"#include "Calibration.h"#include "Inputs.h"#include "text_output.h"#include "Region.h"#include "TrackedObjectSet.h"#include "Results.h"namespace ReadingPeopleTracker{static const char *HumanFeatureTracker_Revision = "@(#) HumanFeatureTracker.cc, rev 0.1 of Wed Apr 18 18:06:11 BST 2001, Author Nils T Siebel, Copyright (c) 2001 The University of Reading";// BaseTracker::register_base_configuration_variables() registeres some for us!void HumanFeatureTracker::register_configuration_parameters(){ draw_head_search_area = configuration.register_bool("DRAW_HEAD_SEARCH_AREA", true, &draw_head_search_area, false, "HumanFeatureTracker", "Draw the head search area?"); draw_in_colour = configuration.register_bool("DRAW_COLOUR", true, &draw_in_colour, false, "HumanFeatureTracker", "Use colour for drawing the results?"); draw_head_bbox = configuration.register_bool("DRAW_HEAD_BBOX", true, &draw_head_bbox, false, "HumanFeatureTracker", "Draw the head box?"); draw_head_centre = configuration.register_bool("DRAW_HEAD_CENTRE", true, &draw_head_centre, false, "HumanFeatureTracker", "Draw the head centre?"); draw_shoulder_width = configuration.register_bool("DRAW_SHOULDER_WIDTH", true, &draw_shoulder_width, false, "HumanFeatureTracker", "Draw the shoulder width?"); }HumanFeatureTracker::HumanFeatureTracker(Calibration *the_calibration, char *config_filename){ if (config_filename == NULL) { cerror << "HFT: HumanFeatureTracker: cannot instantiate without configuration file " << endl; exit(1); } // member function registers appropriate configuration parameters with configuration manager register_configuration_parameters(); // read from configuration file storing vales in correct place using configuration manager configuration.parse_parameter_file(config_filename); // get calibration (or NULL) from parameter calibration = the_calibration;}void HumanFeatureTracker::predict_old_objects(Results *previous_results){ // no predictions since there is no tracking but only detection // only empty sets for now ListNode<TrackedObject> *curr_obj; // get HumanFeatures sets for each tracked object, destroy for (curr_obj = previous_results->get_tracked_objects()->first; curr_obj != NULL; curr_obj = curr_obj->next) { curr_obj->dat->features->destroy_all(); } return;}void HumanFeatureTracker::track_old_objects(Inputs *inputs, Results *results){ // no tracking of old ojects since there is no tracking but only detection return;}void HumanFeatureTracker::detect_new_objects(Inputs *inputs, Results *results){ ListNode<TrackedObject> *curr_obj; ListNode<Region> *curr_reg; HumanFeatureSet *new_features = new HumanFeatureSet; // to store results temporarily // get regions from tracked object set, adding HumanFeatures for each for (curr_obj = results->get_tracked_objects()->first; curr_obj != NULL; curr_obj = curr_obj->next) { // process all regions in current object (should be only one) for (curr_reg = curr_obj->dat->regions->first; curr_reg != NULL; curr_reg = curr_reg->next) { Region *region = curr_reg->dat; // region should be a measurement if (region->source != MEASUREMENT) continue; // ignore regions which are incorporated into the background if (region->incorporated_into_background) continue; if (region->region_img == NULL) { cdebug << "HFT: Warning: no region image available for this region! " << endl; continue; } process_region(region, new_features); } curr_obj->dat->features->concat(new_features); // move results to associated object } delete new_features;}/////////////////////////////////////////////////////////////////////////////// //// do vertical histogram of region image ("significant" upper line only) //// and scan y values for "peaks" //// //// returns the number of peaks found (currently 0 to 10) //// (FIXME: silently ignores peaks other than the 10 leftmost) //// ///////////////////////////////////////////////////////////////////////////////unsigned int HumanFeatureTracker::do_vertical_histogram(Image *region_image, VerticalHistogram *result){ // TODO: at some point, this could be done in the difference image looking // at consecutive pixel differences or something. #ifdef DEBUG if (debug_level == 1) cdebug << "HFT: vert hist:" << endl;#endif// region_image->display(); assert (result != NULL); // we need the position in the image from result // // step one: calculate vertical histogram-type statistics. // // not an actual histogram because we would like to ignore noise and // "holes" in the lower part of the image. In a way, we extract the // "significant upper contour". // variables: geometric unsigned int x, y; unsigned int w = region_image->get_width(); unsigned int h = region_image->get_height(); // statistical: // (now statical, see header:) unsigned int *stats = new unsigned int[w]; unsigned int min = INT_MAX; unsigned int max = 0; unsigned int leftmost = INT_MAX; // leftmost, rightmost non-empty row unsigned int rightmost = 0; unsigned int sum = 0; unsigned int count; for (x=0; x < w; x++) { stats[x] = 0; // go down row until hitting "significant" number of outline pixels const unsigned int significant = 3; assert(Image::image_addressing_mode == IA_BOTTOM_TO_TOP); for (y = h; y > 0; ) // FIXME: assuming BOTTOM_TO_TOP here { y--; if (*(region_image->get_pixel(x,y)) == MARK) // hit something { for (count = 1; count < significant; ) // increment done below { if (y == 0) // reached the image bottom { stats[x] = count; // make sure we count the hit in // area width guess: ignore isolated pixels if ((x > 0) && (stats[x-1] != 0)) { if (leftmost > x) { // ignore isolated pixels left of here leftmost = x - 1; } // if (rightmost < x) rightmost = x; } break; } else { y--; // check next pixel // FIXME: BOTTOM_TO_TOP if (*(region_image->get_pixel(x,y)) == MARK) count++; // count this mark else break; // no significant contour. } } if (count >= significant) { stats[x] = y + count; // overall "significant area" width ranges from leftmost to // rightmost; guessing width here, ignore isolated pixels if ((x > 0) && (stats[x-1] != 0)) { if (leftmost > x) { // ignore isolated pixels left of this point leftmost = x - 1; } // if (rightmost < x) rightmost = x; } break; } } } } realno cm; // this should be a measure (in pixels) for around 1 cm (in world) realno five_cm; // this should be a measure (in pixels) for around 5 cm (in world) // if we have calibration we can get a good estimate in pixels if (calibration != NULL) { NagVector nag_origin(3); // head point the person in the image, NAG vector format, homogeneous nag_origin[0] = (result->xlo + result->xhi) / 2; nag_origin[1] = (result->ylo + result->yhi) / 2; nag_origin[2] = 1; five_cm = calibration->get_image_distance_from_width_in_cm (nag_origin, 5, BaseTracker::TYPICAL_WORLD_HEIGHT); if (five_cm < 5) five_cm = 5; // use minimum of 5 pixels } else five_cm = 5; // default value, ok for LUL camera 38 (centre) at PAL resolution cm = five_cm / 5; // check whether main area [leftmost;rightmost] is at least, say, 5 cm: if ((rightmost - leftmost < five_cm) || (leftmost > rightmost))
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -