⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 route_timing.c

📁 用于学术研究的FPGA布局布线软件VPR
💻 C
📖 第 1 页 / 共 2 页
字号:
/* Puts the entire partial routing below and including rt_node onto the heap * * (except for those parts marked as not to be expanded) by calling itself   * * recursively.                                                              */    int inode;    t_rt_node *child_node;    t_linked_rt_edge *linked_rt_edge;    float tot_cost, backward_path_cost, R_upstream;/* Pre-order depth-first traversal */    if(rt_node->re_expand)	{	    inode = rt_node->inode;	    backward_path_cost = target_criticality * rt_node->Tdel;	    R_upstream = rt_node->R_upstream;	    tot_cost =		backward_path_cost +		astar_fac * get_timing_driven_expected_cost(inode,							    target_node,							    target_criticality,							    R_upstream);	    node_to_heap(inode, tot_cost, NO_PREVIOUS, NO_PREVIOUS,			 backward_path_cost, R_upstream);	}    linked_rt_edge = rt_node->u.child_list;    while(linked_rt_edge != NULL)	{	    child_node = linked_rt_edge->child;	    add_route_tree_to_heap(child_node, target_node,				   target_criticality, astar_fac);	    linked_rt_edge = linked_rt_edge->next;	}}static voidtiming_driven_expand_neighbours(struct s_heap *current,				int inet,				float bend_cost,				float criticality_fac,				int target_node,				float astar_fac){/* Puts all the rr_nodes adjacent to current on the heap.  rr_nodes outside * * the expanded bounding box specified in route_bb are not added to the     * * heap.                                                                    */    int iconn, to_node, num_edges, inode, iswitch, target_x, target_y;    t_rr_type from_type, to_type;    float new_tot_cost, old_back_pcost, new_back_pcost, R_upstream;    float new_R_upstream, Tdel;    inode = current->index;    old_back_pcost = current->backward_path_cost;    R_upstream = current->R_upstream;    num_edges = rr_node[inode].num_edges;    target_x = rr_node[target_node].xhigh;    target_y = rr_node[target_node].yhigh;    for(iconn = 0; iconn < num_edges; iconn++)	{	    to_node = rr_node[inode].edges[iconn];	    if(rr_node[to_node].xhigh < route_bb[inet].xmin ||	       rr_node[to_node].xlow > route_bb[inet].xmax ||	       rr_node[to_node].yhigh < route_bb[inet].ymin ||	       rr_node[to_node].ylow > route_bb[inet].ymax)		continue;	/* Node is outside (expanded) bounding box. *//* Prune away IPINs that lead to blocks other than the target one.  Avoids  * * the issue of how to cost them properly so they don't get expanded before * * more promising routes, but makes route-throughs (via CLBs) impossible.   * * Change this if you want to investigate route-throughs.                   */	    to_type = rr_node[to_node].type;	    if(to_type == IPIN && (rr_node[to_node].xhigh != target_x ||				   rr_node[to_node].yhigh != target_y))		continue;/* new_back_pcost stores the "known" part of the cost to this node -- the   * * congestion cost of all the routing resources back to the existing route  * * plus the known delay of the total path back to the source.  new_tot_cost * * is this "known" backward cost + an expected cost to get to the target.   */	    new_back_pcost = old_back_pcost + (1. - criticality_fac) *		get_rr_cong_cost(to_node);	    iswitch = rr_node[inode].switches[iconn];	    if(switch_inf[iswitch].buffered)		{		    new_R_upstream = switch_inf[iswitch].R;		}	    else		{		    new_R_upstream = R_upstream + switch_inf[iswitch].R;		}	    Tdel =		rr_node[to_node].C * (new_R_upstream +				      0.5 * rr_node[to_node].R);	    Tdel += switch_inf[iswitch].Tdel;	    new_R_upstream += rr_node[to_node].R;	    new_back_pcost += criticality_fac * Tdel;	    if(bend_cost != 0.)		{		    from_type = rr_node[inode].type;		    to_type = rr_node[to_node].type;		    if((from_type == CHANX && to_type == CHANY) ||		       (from_type == CHANY && to_type == CHANX))			new_back_pcost += bend_cost;		}	    new_tot_cost = new_back_pcost + astar_fac *		get_timing_driven_expected_cost(to_node, target_node,						criticality_fac,						new_R_upstream);	    node_to_heap(to_node, new_tot_cost, inode, iconn, new_back_pcost,			 new_R_upstream);	}			/* End for all neighbours */}static floatget_timing_driven_expected_cost(int inode,				int target_node,				float criticality_fac,				float R_upstream){/* Determines the expected cost (due to both delay and resouce cost) to reach * * the target node from inode.  It doesn't include the cost of inode --       * * that's already in the "known" path_cost.                                   */    t_rr_type rr_type;    int cost_index, ortho_cost_index, num_segs_same_dir, num_segs_ortho_dir;    float expected_cost, cong_cost, Tdel;    rr_type = rr_node[inode].type;    if(rr_type == CHANX || rr_type == CHANY)	{	    num_segs_same_dir =		get_expected_segs_to_target(inode, target_node,					    &num_segs_ortho_dir);	    cost_index = rr_node[inode].cost_index;	    ortho_cost_index = rr_indexed_data[cost_index].ortho_cost_index;	    cong_cost =		num_segs_same_dir * rr_indexed_data[cost_index].base_cost +		num_segs_ortho_dir *		rr_indexed_data[ortho_cost_index].base_cost;	    cong_cost +=		rr_indexed_data[IPIN_COST_INDEX].base_cost +		rr_indexed_data[SINK_COST_INDEX].base_cost;	    Tdel = num_segs_same_dir * rr_indexed_data[cost_index].T_linear +		num_segs_ortho_dir *		rr_indexed_data[ortho_cost_index].T_linear +		num_segs_same_dir * num_segs_same_dir *		rr_indexed_data[cost_index].T_quadratic +		num_segs_ortho_dir * num_segs_ortho_dir *		rr_indexed_data[ortho_cost_index].T_quadratic +		R_upstream * (num_segs_same_dir *			      rr_indexed_data[cost_index].C_load +			      num_segs_ortho_dir *			      rr_indexed_data[ortho_cost_index].C_load);	    Tdel += rr_indexed_data[IPIN_COST_INDEX].T_linear;	    expected_cost =		criticality_fac * Tdel + (1. - criticality_fac) * cong_cost;	    return (expected_cost);	}    else if(rr_type == IPIN)	{			/* Change if you're allowing route-throughs */	    return (rr_indexed_data[SINK_COST_INDEX].base_cost);	}    else	{			/* Change this if you want to investigate route-throughs */	    return (0.);	}}/* Macro used below to ensure that fractions are rounded up, but floating   * * point values very close to an integer are rounded to that integer.       */#define ROUND_UP(x) (ceil (x - 0.001))static intget_expected_segs_to_target(int inode,			    int target_node,			    int *num_segs_ortho_dir_ptr){/* Returns the number of segments the same type as inode that will be needed * * to reach target_node (not including inode) in each direction (the same    * * direction (horizontal or vertical) as inode and the orthogonal direction).*/    t_rr_type rr_type;    int target_x, target_y, num_segs_same_dir, cost_index, ortho_cost_index;    int no_need_to_pass_by_clb;    float inv_length, ortho_inv_length, ylow, yhigh, xlow, xhigh;    target_x = rr_node[target_node].xlow;    target_y = rr_node[target_node].ylow;    cost_index = rr_node[inode].cost_index;    inv_length = rr_indexed_data[cost_index].inv_length;    ortho_cost_index = rr_indexed_data[cost_index].ortho_cost_index;    ortho_inv_length = rr_indexed_data[ortho_cost_index].inv_length;    rr_type = rr_node[inode].type;    if(rr_type == CHANX)	{	    ylow = rr_node[inode].ylow;	    xhigh = rr_node[inode].xhigh;	    xlow = rr_node[inode].xlow;	    /* Count vertical (orthogonal to inode) segs first. */	    if(ylow > target_y)		{		/* Coming from a row above target? */		    *num_segs_ortho_dir_ptr =			ROUND_UP((ylow - target_y + 1.) * ortho_inv_length);		    no_need_to_pass_by_clb = 1;		}	    else if(ylow < target_y - 1)		{		/* Below the FB bottom? */		    *num_segs_ortho_dir_ptr = ROUND_UP((target_y - ylow) *						       ortho_inv_length);		    no_need_to_pass_by_clb = 1;		}	    else		{		/* In a row that passes by target FB */		    *num_segs_ortho_dir_ptr = 0;		    no_need_to_pass_by_clb = 0;		}	    /* Now count horizontal (same dir. as inode) segs. */	    if(xlow > target_x + no_need_to_pass_by_clb)		{		    num_segs_same_dir =			ROUND_UP((xlow - no_need_to_pass_by_clb -				  target_x) * inv_length);		}	    else if(xhigh < target_x - no_need_to_pass_by_clb)		{		    num_segs_same_dir =			ROUND_UP((target_x - no_need_to_pass_by_clb -				  xhigh) * inv_length);		}	    else		{		    num_segs_same_dir = 0;		}	}    else	{			/* inode is a CHANY */	    ylow = rr_node[inode].ylow;	    yhigh = rr_node[inode].yhigh;	    xlow = rr_node[inode].xlow;	    /* Count horizontal (orthogonal to inode) segs first. */	    if(xlow > target_x)		{		/* Coming from a column right of target? */		    *num_segs_ortho_dir_ptr =			ROUND_UP((xlow - target_x + 1.) * ortho_inv_length);		    no_need_to_pass_by_clb = 1;		}	    else if(xlow < target_x - 1)		{		/* Left of and not adjacent to the FB? */		    *num_segs_ortho_dir_ptr = ROUND_UP((target_x - xlow) *						       ortho_inv_length);		    no_need_to_pass_by_clb = 1;		}	    else		{		/* In a column that passes by target FB */		    *num_segs_ortho_dir_ptr = 0;		    no_need_to_pass_by_clb = 0;		}	    /* Now count vertical (same dir. as inode) segs. */	    if(ylow > target_y + no_need_to_pass_by_clb)		{		    num_segs_same_dir =			ROUND_UP((ylow - no_need_to_pass_by_clb -				  target_y) * inv_length);		}	    else if(yhigh < target_y - no_need_to_pass_by_clb)		{		    num_segs_same_dir =			ROUND_UP((target_y - no_need_to_pass_by_clb -				  yhigh) * inv_length);		}	    else		{		    num_segs_same_dir = 0;		}	}    return (num_segs_same_dir);}static voidupdate_rr_base_costs(int inet,		     float largest_criticality){/* Changes the base costs of different types of rr_nodes according to the  * * criticality, fanout, etc. of the current net being routed (inet).       */    float fanout, factor;    int index;    fanout = net[inet].num_sinks;    /* Other reasonable values for factor include fanout and 1 */    factor = sqrt(fanout);    for(index = CHANX_COST_INDEX_START; index < num_rr_indexed_data; index++)	{	    if(rr_indexed_data[index].T_quadratic > 0.)		{		/* pass transistor */		    rr_indexed_data[index].base_cost =			rr_indexed_data[index].saved_base_cost * factor;		}	    else		{		    rr_indexed_data[index].base_cost =			rr_indexed_data[index].saved_base_cost;		}	}}#define ERROR_TOL 0.0001static voidtiming_driven_check_net_delays(float **net_delay){/* Checks that the net delays computed incrementally during timing driven    * * routing match those computed from scratch by the net_delay.c module.      */    int inet, ipin;    float **net_delay_check;    struct s_linked_vptr *ch_list_head_net_delay_check;    net_delay_check = alloc_net_delay(&ch_list_head_net_delay_check);    load_net_delay_from_routing(net_delay_check);    for(inet = 0; inet < num_nets; inet++)	{	    for(ipin = 1; ipin <= net[inet].num_sinks; ipin++)		{		    if(net_delay_check[inet][ipin] == 0.)			{	/* Should be only GLOBAL nets */			    if(net_delay[inet][ipin] != 0.)				{				    printf					("Error in timing_driven_check_net_delays: net %d pin %d."					 "\tIncremental calc. net_delay is %g, but from scratch "					 "net delay is %g.\n", inet, ipin,					 net_delay[inet][ipin],					 net_delay_check[inet][ipin]);				    exit(1);				}			}		    else			{			    if(fabs			       (1. -				net_delay[inet][ipin] /				net_delay_check[inet][ipin]) > ERROR_TOL)				{				    printf					("Error in timing_driven_check_net_delays: net %d pin %d."					 "\tIncremental calc. net_delay is %g, but from scratch "					 "net delay is %g.\n", inet, ipin,					 net_delay[inet][ipin],					 net_delay_check[inet][ipin]);				    exit(1);				}			}		}	}    free_net_delay(net_delay_check, &ch_list_head_net_delay_check);    printf("Completed net delay value cross check successfully.\n");}

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -