⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 edgedetector.cc

📁 不错的shijuegezong的程序
💻 CC
📖 第 1 页 / 共 3 页
字号:
		theColour.green += gdiff;		register int bdiff = newColour.blue - theColour.blue;	if (bdiff > 0)	    if (bdiff > 4)		bdiff = 4;	    else		bdiff = 1;	else	    if (bdiff != 0)		if (bdiff < -4)		    bdiff = -4;		else		    bdiff = -1;		theColour.blue += bdiff;    }}void ColourBlob::visualise(realno u){#ifndef NO_DISPLAY    if (theColour_valid == false)	return;        if (cblob_glwin == NULLWIN)    {	prefsize(200,200);	// nts: this gives an error in Ygl version 4.x	// foreground();	cblob_glwin = winopen("Colour Blobs");	winset(cblob_glwin);	reshapeviewport();	prefsize(200,200);	winconstraints();#ifdef USE_GL	RGBmode();	gconfig();  // nts: have to do this after RGBmode and before cpack	cpack(0x00ffffff);	gflush();#endif	gconfig();#ifdef USE_GL	RGBcolor(0,0,0);#endif	clear();    }        winset(cblob_glwin);    #ifdef USE_GL    RGBmode();    gconfig();  // nts: have to do this after RGBmode and before RGBcolor    RGBcolor((short) theColour.red, (short) theColour.green, (short)	     theColour.blue);    #endif    circf(100+(50*sin(2 * 3.142 * u)), 100+(50*cos(2 * 3.142 * u)), 10.0);#endif   // #ifndef NO_DISPLAY}edge_status_tColourEdgeDetector::find_edge(realno u, Point2 &pos,			      Point2 &search_line,			      Point2 &normal, realno window_size,			      OcclusionHandler *occlusion_map,			      Profile *currx,			      Point2 &p_obs, realno &var,			      ActiveModel *active_model){    int indx = (int) (u * no_cblobs);    if (indx >= no_cblobs)	// should not happen    {	cerror << " ColourEdgeDetector::find_edge(): indx out of range " << endl;	exit(1);    }    current_blob = &cblobs[indx];    #ifdef DEBUG    if (tracker->debug_level == 1)    {	current_blob->visualise(indx / (realno) no_cblobs);    }    #endif        int nsubs; // sampling size        if (window_size > max_scale)  // this will generally be the case	nsubs = (int) (0.5 + (window_size / max_scale));    else	nsubs = 1;        realno edge_scale = window_size / ((realno) nsubs);    int grid_scale = (int) (edge_scale + 0.5);    var = 0;    realno max_e = -significance_threshold;        Point2 pos_base = pos + Point2(0.5, 0.5);    Point2 best_offset(0,0);        RGB32pixel *best_pix = NULL;//   realno sum_e = 0;//   realno sum_d = 0;//   realno sq_sum = 0;    for (int i = -nsubs; i <= nsubs; i++)    {//	realno d_i = i * edge_scale;	Point2 offset =  (i * edge_scale) * search_line;;		realno e_i;	Point2 pos_i = pos_base + offset;		if (occlusion_map->is_occlusion(pos_i.x, pos_i.y))	    return EDGE_OCCLUDED;		RGB32pixel *inside_pix =	    edge_response((int) pos_i.x, (int) pos_i.y, grid_scale, normal, e_i);	//if (inside_pix == NULL) return EDGE_OCCLUDED;		realno new_e_i = 0;	if ((inside_pix != NULL) && (e_i > significance_threshold))	{	    new_e_i = e_i;	    if (e_i > max_e)	    {		max_e = e_i;		best_pix = inside_pix;		best_offset = offset;	    }	}//       sum_e += new_e_i;//       sum_d += d_i * new_e_i;//       sq_sum += d_i * d_i * new_e_i;	    }        if (best_pix == NULL)	return EDGE_BAD;        current_blob->add_observation(best_pix);    //   realno mean = sum_d / sum_e;//   p_obs = pos + mean * search_line;//   var = 0.25 * ((sq_sum / sum_e) - (mean * mean));        p_obs = pos + best_offset;        return EDGE_GOOD;}void ColourEdgeDetector::setup_batch(Profile *curr){    if (curr->colour_info == NULL)    {	curr->colour_info = new ColourBlob[no_cblobs];	curr->no_cblobs = no_cblobs;  // remember array size    }    else    {	for (int i = 0; i < no_cblobs; i++)	    curr->colour_info[i].update();    }        cblobs = curr->colour_info;    this->EdgeDetector::setup_batch(curr);}ColourForegroundEdgeDetector::ColourForegroundEdgeDetector(realno thresh_lower, realno thresh_upper,							   ActiveShapeTracker *the_tracker, int the_no_cblobs)    : ColourEdgeDetector(the_no_cblobs){    assert (the_tracker != NULL);    tracker = the_tracker;        set_significance_threshold(0.0);    thresh_background = thresh_lower * get_maximum_response();    thresh_foreground = thresh_upper * get_maximum_response();}RGB32pixel*ColourForegroundEdgeDetector::edge_response(realno x, realno y,					    int step, Point2 n,					    realno &edge){    // FIXME: could use other methods than sqrt of sum squared for differencing        int dx = real_to_int(n.x * step);    int dy = real_to_int(n.y * step);        int xr = (int) (x + dx);    int yr = (int) (y + dy);    int xl = (int) (x - dx);    int yl = (int) (y - dy);    Image *vidimage = tracker->get_active_model()->get_video_image();    Image *refimage = tracker->get_active_model()->get_background_image();    // check whether either of the points is off image    if ((vidimage->check_coords(xr,yr) == false) || (vidimage->check_coords(xl,yl) == false))	return NULL;    #ifdef DEBUG     if (tracker->debug_level == 1) 	cdebug << "edge (" << x << "," << y 	     << "): step +/- (" << dx << "," << dy << ")  ";#endif        realno leftdiff, rightdiff;    RGB32pixel *vid_leftpixel, *vid_rightpixel;    RGB32pixel *ref_leftpixel, *ref_rightpixel;    RGB32pixel *diff_leftpixel, *diff_rightpixel;        // this is the position to test whether it is inside the person:    vid_leftpixel = (RGB32pixel*) vidimage->get_pixel(xl,yl);        // check whether a pre-calculated difference image is available (else we get NULL)    Image *diffimage = tracker->get_active_model()->get_difference_image();        if (diffimage != NULL)   // we have a precalculated difference image!    {	// we use these pixels in the difference image...	diff_leftpixel = (RGB32pixel*) diffimage->get_pixel(xl,yl);	diff_rightpixel = (RGB32pixel*) diffimage->get_pixel(xr,yr);		// measure for left pixel...	leftdiff = sqrt((float)(SQUARE(diff_leftpixel->red) +				SQUARE(diff_leftpixel->green) +				SQUARE(diff_leftpixel->blue)));		// measure for right pixel...	rightdiff = sqrt((float)(SQUARE((int)diff_rightpixel->red) +				 SQUARE((int)diff_rightpixel->green) +				 SQUARE((int)diff_rightpixel->blue)));	#ifdef DEBUG  	if (tracker->debug_level == 1)  	    cdebug << " pre: ";  // tell user where the numbers come from#endif    }    else  // we don't have precalculated difference so calculate on the fly    {	// we need these pixels...	vid_rightpixel = (RGB32pixel*) vidimage->get_pixel(xr,yr);	ref_leftpixel = (RGB32pixel*) refimage->get_pixel(xl,yl);	ref_rightpixel = (RGB32pixel*) refimage->get_pixel(xr,yr);		// measure for left pixel...	int rdiff = vid_leftpixel->red - ref_leftpixel->red;	int gdiff = vid_leftpixel->green - ref_leftpixel->green;	int bdiff = vid_leftpixel->blue - ref_leftpixel->blue;		leftdiff = sqrt((float)(rdiff * rdiff + gdiff * gdiff + bdiff * bdiff));		// measure for right pixel...	rdiff = vid_rightpixel->red - ref_rightpixel->red;	gdiff = vid_rightpixel->green - ref_rightpixel->green;	bdiff = vid_rightpixel->blue - ref_rightpixel->blue;		rightdiff = sqrt((float)(rdiff * rdiff + gdiff * gdiff + bdiff * bdiff));	#ifdef DEBUG  	if (tracker->debug_level == 1)  	    cdebug << " fly: ";  // tell user where the numbers come from#endif    }        // the calculation of the difference is the same...        if ((leftdiff <= thresh_foreground) || (rightdiff >= thresh_background))    {	edge = 0;	#ifdef DEBUG  	if (tracker->debug_level == 1)  	    cdebug << "edge = 0  because "  		 << leftdiff << " <= " << thresh_foreground << " or "  		 << rightdiff << " >= " << thresh_background << endl;#endif    }    else    {	realno col_dist = current_blob->colour_distance(vid_leftpixel);		if (col_dist == 0)  // seems we don't have colour blobs yet	{	    edge = (leftdiff - rightdiff);   //  so we cannot use that info	    #ifdef DEBUG  	    if (tracker->debug_level == 1)  		cdebug << "edge = " << (leftdiff - rightdiff) << endl;#endif	}	else	{	    // we know what colour to expect inside the person ("left") so	    // let's use this to make tracking (hopefully) more robust...	    edge = (2 * (leftdiff - rightdiff) - col_dist) / 3;	    #ifdef DEBUG  	    if (tracker->debug_level == 1)  		cdebug << "edge = (2 * (" << leftdiff << "-" << rightdiff  		     << ") - " << col_dist << ") / 3  == " << edge << endl;#endif	}    }        return vid_leftpixel;}inline realno ColourForegroundEdgeDetector::get_maximum_response(){    return // nts: changed weighting so had to change this...  was " 2.0 * "	MAX_RGB_DISTANCE;}NormalisedColourForegroundEdgeDetector::NormalisedColourForegroundEdgeDetector(realno thresh_lower,									       realno thresh_upper,									       ActiveShapeTracker *the_tracker,									       int the_no_cblobs)    : ColourEdgeDetector(the_no_cblobs){    assert (the_tracker != NULL);    tracker = the_tracker;        set_significance_threshold(0.0);    thresh_background = thresh_lower * get_maximum_response();    thresh_foreground = thresh_upper * get_maximum_response();}RGB32pixel *NormalisedColourForegroundEdgeDetector::edge_response(realno x, realno y,								  int step, Point2 n,								  realno &edge){    int dx = real_to_int(n.x * step);    int dy = real_to_int(n.y * step);        int xr = (int) (x + dx);    int yr = (int) (y + dy);    int xl = (int) (x - dx);    int yl = (int) (y - dy);        Image *vidimage = tracker->get_active_model()->get_video_image();    Image *refimage = tracker->get_active_model()->get_background_image();    if (!((vidimage->check_coords(xr,yr)) && (vidimage->check_coords(xl,yl))))	return NULL;        RGB32pixel *vid_rightpixel = (RGB32pixel*) vidimage->get_pixel(xr,yr);    RGB32pixel *ref_rightpixel = (RGB32pixel*) refimage->get_pixel(xr,yr);    RGB32pixel *vid_leftpixel = (RGB32pixel*) vidimage->get_pixel(xl,yl);    RGB32pixel *ref_leftpixel = (RGB32pixel*) refimage->get_pixel(xl,yl);        realno sum1 = 1 +	vid_rightpixel->red + vid_rightpixel->green + vid_rightpixel->blue;    realno sum2 = 1 +	ref_rightpixel->red + ref_rightpixel->green + ref_rightpixel->blue;    realno sum3 = 1 +	vid_leftpixel->red + vid_leftpixel->green + vid_leftpixel->blue;    realno sum4 = 1 +	ref_leftpixel->red + ref_leftpixel->green + ref_leftpixel->blue;        realno rdiff = (vid_rightpixel->red / sum1) - (ref_rightpixel->red / sum2);    realno gdiff = (vid_rightpixel->green / sum1) - (ref_rightpixel->green / sum2);    realno bdiff = (vid_rightpixel->blue / sum1) - (ref_rightpixel->blue / sum2);    realno rightdiff = sqrt((float)(rdiff * rdiff + gdiff * gdiff + bdiff * bdiff));        rdiff = (vid_leftpixel->red / sum3) - (ref_leftpixel->red / sum4);    gdiff = (vid_leftpixel->green / sum3) - (ref_leftpixel->green / sum4);    bdiff = (vid_leftpixel->blue / sum3) - (ref_leftpixel->blue / sum4);    realno leftdiff = sqrt((float)(rdiff * rdiff + gdiff * gdiff + bdiff * bdiff));        if ((leftdiff <= thresh_foreground) || (rightdiff >= thresh_background))	edge = 0;    else    {	edge = (leftdiff - rightdiff) +	    MAX_NORM_RGB_DISTANCE -	    current_blob->norm_colour_distance(vid_leftpixel);    }        return vid_leftpixel;}inline realno NormalisedColourForegroundEdgeDetector::get_maximum_response(){    return 2.0 * MAX_NORM_RGB_DISTANCE;}} // namespace ReadingPeopleTracker

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -