⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 route_timing.c

📁 用于学术研究的FPGA布局布线软件VPR
💻 C
📖 第 1 页 / 共 2 页
字号:
#include <stdio.h>#include <math.h>#include "util.h"#include "vpr_types.h"#include "globals.h"#include "mst.h"#include "route_export.h"#include "route_common.h"#include "route_tree_timing.h"#include "route_timing.h"#include "heapsort.h"#include "path_delay.h"#include "net_delay.h"/******************** Subroutines local to route_timing.c ********************/static int get_max_pins_per_net(void);static void add_route_tree_to_heap(t_rt_node * rt_node,				   int target_node,				   float target_criticality,				   float astar_fac);static void timing_driven_expand_neighbours(struct s_heap *current,					    int inet,					    float bend_cost,					    float criticality_fac,					    int target_node,					    float astar_fac);static float get_timing_driven_expected_cost(int inode,					     int target_node,					     float criticality_fac,					     float R_upstream);static int get_expected_segs_to_target(int inode,				       int target_node,				       int *num_segs_ortho_dir_ptr);static void update_rr_base_costs(int inet,				 float largest_criticality);static void timing_driven_check_net_delays(float **net_delay);/************************ Subroutine definitions *****************************/booleantry_timing_driven_route(struct s_router_opts router_opts,			float **net_slack,			float **net_delay,			t_ivec ** fb_opins_used_locally){/* Timing-driven routing algorithm.  The timing graph (includes net_slack)   * * must have already been allocated, and net_delay must have been allocated. * * Returns TRUE if the routing succeeds, FALSE otherwise.                    */    int itry, inet, ipin;    boolean success, is_routable, rip_up_local_opins;    float *pin_criticality;	/* [1..max_pins_per_net-1]. */    int *sink_order;		/* [1..max_pins_per_net-1]. */    t_rt_node **rt_node_of_sink;	/* [1..max_pins_per_net-1]. */    float T_crit, pres_fac;    alloc_timing_driven_route_structs(&pin_criticality, &sink_order,				      &rt_node_of_sink);/* First do one routing iteration ignoring congestion and marking all sinks  * * on each net as critical to get reasonable net delay estimates.            */    for(inet = 0; inet < num_nets; inet++)	{	    if(net[inet].is_global == FALSE)		{		    for(ipin = 1; ipin <= net[inet].num_sinks; ipin++)			net_slack[inet][ipin] = 0.;		}	    else		{		/* Set delay of global signals to zero. */		    for(ipin = 1; ipin <= net[inet].num_sinks; ipin++)			net_delay[inet][ipin] = 0.;		}	}    T_crit = 1.;    pres_fac = router_opts.first_iter_pres_fac;	/* Typically 0 -> ignore cong. */    for(itry = 1; itry <= router_opts.max_router_iterations; itry++)	{	    for(inet = 0; inet < num_nets; inet++)		{		    if(net[inet].is_global == FALSE)			{	/* Skip global nets. */			    is_routable =				timing_driven_route_net(inet, pres_fac,							router_opts.							max_criticality,							router_opts.							criticality_exp,							router_opts.astar_fac,							router_opts.bend_cost,							net_slack[inet],							pin_criticality,							sink_order,							rt_node_of_sink,							T_crit,							net_delay[inet]);			    /* Impossible to route? (disconnected rr_graph) */			    if(!is_routable)				{				    printf("Routing failed.\n");				    free_timing_driven_route_structs					(pin_criticality, sink_order,					 rt_node_of_sink);				    return (FALSE);				}			}		}	    /* Make sure any FB OPINs used up by subblocks being hooked directly     *	     * to them are reserved for that purpose.                                 */	    if(itry == 1)		rip_up_local_opins = FALSE;	    else		rip_up_local_opins = TRUE;	    reserve_locally_used_opins(pres_fac, rip_up_local_opins,				       fb_opins_used_locally);	    /* Pathfinder guys quit after finding a feasible route. I may want to keep *	     * going longer, trying to improve timing.  Think about this some.         */	    success = feasible_routing();	    if(success)		{		    printf			("Successfully routed after %d routing iterations.\n",			 itry);		    free_timing_driven_route_structs(pin_criticality,						     sink_order,						     rt_node_of_sink);#ifdef DEBUG		    timing_driven_check_net_delays(net_delay);#endif		    return (TRUE);		}	    if(itry == 1)		{		    pres_fac = router_opts.initial_pres_fac;		    pathfinder_update_cost(pres_fac, 0.);	/* Acc_fac=0 for first iter. */		}	    else		{		    pres_fac *= router_opts.pres_fac_mult;		    pathfinder_update_cost(pres_fac, router_opts.acc_fac);		}	    /* Update slack values by doing another timing analysis.                 *	     * Timing_driven_route_net updated the net delay values.                 */	    load_timing_graph_net_delays(net_delay);	    T_crit = load_net_slack(net_slack, 0);	    printf("T_crit: %g.\n", T_crit);	}    printf("Routing failed.\n");    free_timing_driven_route_structs(pin_criticality, sink_order,				     rt_node_of_sink);    return (FALSE);}voidalloc_timing_driven_route_structs(float **pin_criticality_ptr,				  int **sink_order_ptr,				  t_rt_node *** rt_node_of_sink_ptr){/* Allocates all the structures needed only by the timing-driven router.   */    int max_pins_per_net;    float *pin_criticality;    int *sink_order;    t_rt_node **rt_node_of_sink;    max_pins_per_net = get_max_pins_per_net();    pin_criticality =	(float *)my_malloc((max_pins_per_net - 1) * sizeof(float));    *pin_criticality_ptr = pin_criticality - 1;	/* First sink is pin #1. */    sink_order = (int *)my_malloc((max_pins_per_net - 1) * sizeof(int));    *sink_order_ptr = sink_order - 1;    rt_node_of_sink = (t_rt_node **) my_malloc((max_pins_per_net - 1) *					       sizeof(t_rt_node *));    *rt_node_of_sink_ptr = rt_node_of_sink - 1;    alloc_route_tree_timing_structs();}voidfree_timing_driven_route_structs(float *pin_criticality,				 int *sink_order,				 t_rt_node ** rt_node_of_sink){/* Frees all the stuctures needed only by the timing-driven router.        */    free(pin_criticality + 1);	/* Starts at index 1. */    free(sink_order + 1);    free(rt_node_of_sink + 1);    free_route_tree_timing_structs();}static intget_max_pins_per_net(void){/* Returns the largest number of pins on any non-global net.    */    int inet, max_pins_per_net;    max_pins_per_net = 0;    for(inet = 0; inet < num_nets; inet++)	{	    if(net[inet].is_global == FALSE)		{		    max_pins_per_net =			max(max_pins_per_net, (net[inet].num_sinks + 1));		}	}    return (max_pins_per_net);}booleantiming_driven_route_net(int inet,			float pres_fac,			float max_criticality,			float criticality_exp,			float astar_fac,			float bend_cost,			float *net_slack,			float *pin_criticality,			int *sink_order,			t_rt_node ** rt_node_of_sink,			float T_crit,			float *net_delay){/* Returns TRUE as long is found some way to hook up this net, even if that * * way resulted in overuse of resources (congestion).  If there is no way   * * to route this net, even ignoring congestion, it returns FALSE.  In this  * * case the rr_graph is disconnected and you can give up.                   */    int ipin, num_sinks, itarget, target_pin, target_node, inode;    float target_criticality, old_tcost, new_tcost, largest_criticality,	pin_crit;    float old_back_cost, new_back_cost;    t_rt_node *rt_root;    struct s_heap *current;    struct s_trace *new_route_start_tptr;/* Rip-up any old routing. */    pathfinder_update_one_cost(trace_head[inet], -1, pres_fac);    free_traceback(inet);    for(ipin = 1; ipin <= net[inet].num_sinks; ipin++)	{			/* For all sinks */	    pin_crit = max(max_criticality - net_slack[ipin] / T_crit, 0.);	    pin_crit = pow(pin_crit, criticality_exp);	    pin_crit = min(pin_crit, max_criticality);	    pin_criticality[ipin] = pin_crit;	}    num_sinks = net[inet].num_sinks;    heapsort(sink_order, pin_criticality, num_sinks);/* Update base costs according to fanout and criticality rules */    largest_criticality = pin_criticality[sink_order[1]];    update_rr_base_costs(inet, largest_criticality);    mark_ends(inet);		/* Only needed to check for multiply-connected SINKs */    rt_root = init_route_tree_to_source(inet);    for(itarget = 1; itarget <= num_sinks; itarget++)	{	    target_pin = sink_order[itarget];	    target_node = net_rr_terminals[inet][target_pin];	    target_criticality = pin_criticality[target_pin];	    add_route_tree_to_heap(rt_root, target_node, target_criticality,				   astar_fac);	    current = get_heap_head();	    if(current == NULL)		{		/* Infeasible routing.  No possible path for net. */		    reset_path_costs();		    free_route_tree(rt_root);		    return (FALSE);		}	    inode = current->index;	    while(inode != target_node)		{		    old_tcost = rr_node_route_inf[inode].path_cost;		    new_tcost = current->cost;		    if(old_tcost > 0.99 * HUGE_FLOAT)	/* First time touched. */			old_back_cost = HUGE_FLOAT;		    else			old_back_cost =			    rr_node_route_inf[inode].backward_path_cost;		    new_back_cost = current->backward_path_cost;		    /* I only re-expand a node if both the "known" backward cost is lower  *		     * in the new expansion (this is necessary to prevent loops from       *		     * forming in the routing and causing havoc) *and* the expected total  *		     * cost to the sink is lower than the old value.  Different R_upstream *		     * values could make a path with lower back_path_cost less desirable   *		     * than one with higher cost.  Test whether or not I should disallow   *		     * re-expansion based on a higher total cost.                          */		    if(old_tcost > new_tcost && old_back_cost > new_back_cost)			{			    rr_node_route_inf[inode].prev_node =				current->u.prev_node;			    rr_node_route_inf[inode].prev_edge =				current->prev_edge;			    rr_node_route_inf[inode].path_cost = new_tcost;			    rr_node_route_inf[inode].backward_path_cost =				new_back_cost;			    if(old_tcost > 0.99 * HUGE_FLOAT)	/* First time touched. */				add_to_mod_list(&rr_node_route_inf[inode].						path_cost);			    timing_driven_expand_neighbours(current, inet,							    bend_cost,							    target_criticality,							    target_node,							    astar_fac);			}		    free_heap_data(current);		    current = get_heap_head();		    if(current == NULL)			{	/* Impossible routing.  No path for net. */			    reset_path_costs();			    free_route_tree(rt_root);			    return (FALSE);			}		    inode = current->index;		}/* NB:  In the code below I keep two records of the partial routing:  the   * * traceback and the route_tree.  The route_tree enables fast recomputation * * of the Elmore delay to each node in the partial routing.  The traceback  * * lets me reuse all the routines written for breadth-first routing, which  * * all take a traceback structure as input.  Before this routine exits the  * * route_tree structure is destroyed; only the traceback is needed at that  * * point.                                                                   */	    rr_node_route_inf[inode].target_flag--;	/* Connected to this SINK. */	    new_route_start_tptr = update_traceback(current, inet);	    rt_node_of_sink[target_pin] = update_route_tree(current);	    free_heap_data(current);	    pathfinder_update_one_cost(new_route_start_tptr, 1, pres_fac);	    empty_heap();	    reset_path_costs();	}/* For later timing analysis. */    update_net_delays_from_route_tree(net_delay, rt_node_of_sink, inet);    free_route_tree(rt_root);    return (TRUE);}static voidadd_route_tree_to_heap(t_rt_node * rt_node,		       int target_node,		       float target_criticality,		       float astar_fac){

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -