⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 rr_graph.c

📁 VPR布局布线源码
💻 C
📖 第 1 页 / 共 5 页
字号:
    assert(Fs % 3 == 0);    for(i = 0; i <= nx; i++)	{	    for(j = 0; j <= ny; j++)		{		    if(i > 0)			{			    build_rr_xchan(i, j, track_to_ipin_lookup,					   switch_block_conn,					   CHANX_COST_INDEX_START,					   nodes_per_chan, opin_mux_size,					   sblock_pattern, Fs / 3,					   seg_details, rr_node_indices,					   rr_edge_done, rr_node,					   wire_to_ipin_switch,					   directionality);			}		    if(j > 0)			{			    build_rr_ychan(i, j, track_to_ipin_lookup,					   switch_block_conn,					   CHANX_COST_INDEX_START +					   num_seg_types, nodes_per_chan,					   opin_mux_size, sblock_pattern,					   Fs / 3, seg_details,					   rr_node_indices, rr_edge_done,					   rr_node, wire_to_ipin_switch,					   directionality);			}		}	}	free(opin_mux_size);}static voidbuild_bidir_rr_opins(IN int i,		     IN int j,		     INOUT t_rr_node * rr_node,		     IN t_ivec *** rr_node_indices,		     IN int *****opin_to_track_map,		     IN int *Fc_out,		     IN boolean * rr_edge_done,		     IN t_seg_details * seg_details,		     IN struct s_grid_tile **grid){    int ipin, inode, num_edges, Fc, ofs;    t_type_ptr type;    struct s_linked_edge *edge_list, *next;    /* OPIN edges need to be done at once so let the offset 0     * block do the work. */    if(grid[i][j].offset > 0)	{	    return;	}    type = grid[i][j].type;    Fc = Fc_out[type->index];    for(ipin = 0; ipin < type->num_pins; ++ipin)	{	    /* We only are working with opins so skip non-drivers */	    if(type->class_inf[type->pin_class[ipin]].type != DRIVER)		{		    continue;		}	    num_edges = 0;	    edge_list = NULL;	    for(ofs = 0; ofs < type->height; ++ofs)		{		    num_edges +=			get_bidir_opin_connections(i, j + ofs, ipin,						   &edge_list,						   opin_to_track_map, Fc,						   rr_edge_done,						   rr_node_indices,						   seg_details);		}	    inode = get_rr_node_index(i, j, OPIN, ipin, rr_node_indices);	    alloc_and_load_edges_and_switches(rr_node, inode, num_edges,					      rr_edge_done, edge_list);		while(edge_list != NULL) {			next = edge_list->next;			free(edge_list);			edge_list = next;		}	}}voidfree_rr_graph(void){    int i;    /* Frees all the routing graph data structures, if they have been       *     * allocated.  I use rr_mem_chunk_list_head as a flag to indicate       *     * whether or not the graph has been allocated -- if it is not NULL,    *     * a routing graph exists and can be freed.  Hence, you can call this   *     * routine even if you're not sure of whether a rr_graph exists or not. */    if(rr_mem_chunk_list_head == NULL)	/* Nothing to free. */	return;    free_chunk_memory(rr_mem_chunk_list_head);	/* Frees ALL "chunked" data */    rr_mem_chunk_list_head = NULL;	/* No chunks allocated now. */    chunk_bytes_avail = 0;	/* 0 bytes left in current "chunk". */    chunk_next_avail_mem = NULL;	/* No current chunk.                */    /* Before adding any more free calls here, be sure the data is NOT chunk *     * allocated, as ALL the chunk allocated data is already free!           */    free(net_rr_terminals);	for(i = 0; i < num_rr_nodes; i++) {		if(rr_node[i].edges != NULL) {			free(rr_node[i].edges);		}		if(rr_node[i].switches != NULL) {			free(rr_node[i].switches);		}	}	assert(rr_node_indices);    free_rr_node_indices(rr_node_indices);    free(rr_node);    free(rr_indexed_data);    for(i = 0; i < num_blocks; i++)	{	    free(rr_blk_source[i]);	}    free(rr_blk_source);    rr_blk_source = NULL;    net_rr_terminals = NULL;    rr_node = NULL;	rr_node_indices = NULL;    rr_indexed_data = NULL;}static voidalloc_net_rr_terminals(void){    int inet;    net_rr_terminals = (int **)my_malloc(num_nets * sizeof(int *));    for(inet = 0; inet < num_nets; inet++)	{	    net_rr_terminals[inet] =		(int *)my_chunk_malloc((net[inet].num_sinks + 1) *				       sizeof(int), &rr_mem_chunk_list_head,				       &chunk_bytes_avail,				       &chunk_next_avail_mem);	}}voidload_net_rr_terminals(t_ivec *** rr_node_indices){    /* Allocates and loads the net_rr_terminals data structure.  For each net   *     * it stores the rr_node index of the SOURCE of the net and all the SINKs   *     * of the net.  [0..num_nets-1][0..num_pins-1].  Entry [inet][pnum] stores  *     * the rr index corresponding to the SOURCE (opin) or SINK (ipin) of pnum.  */    int inet, ipin, inode, iblk, i, j, k, node_block_pin, iclass;    t_type_ptr type;    for(inet = 0; inet < num_nets; inet++)	{	    for(ipin = 0; ipin <= net[inet].num_sinks; ipin++)		{		    iblk = net[inet].node_block[ipin];		    i = block[iblk].x;		    j = block[iblk].y;		    k = block[iblk].z;		    type = block[iblk].type;		    /* In the routing graph, each (x, y) location has unique pins on it		     * so when there is capacity, blocks are packed and their pin numbers		     * are offset to get their actual rr_node */		    node_block_pin = net[inet].node_block_pin[ipin];		    iclass = type->pin_class[node_block_pin];		    inode = get_rr_node_index(i, j, (ipin == 0 ? SOURCE : SINK),	/* First pin is driver */					      iclass, rr_node_indices);		    net_rr_terminals[inet][ipin] = inode;		}	}}static voidalloc_and_load_rr_clb_source(t_ivec *** rr_node_indices){    /* Saves the rr_node corresponding to each SOURCE and SINK in each FB      *     * in the FPGA.  Currently only the SOURCE rr_node values are used, and     *     * they are used only to reserve pins for locally used OPINs in the router. *     * [0..num_blocks-1][0..num_class-1].  The values for blocks that are pads  *     * are NOT valid.                                                           */    int iblk, i, j, iclass, inode;    int class_low, class_high;    t_rr_type rr_type;    t_type_ptr type;    rr_blk_source = (int **)my_malloc(num_blocks * sizeof(int *));    for(iblk = 0; iblk < num_blocks; iblk++)	{	    type = block[iblk].type;	    get_class_range_for_block(iblk, &class_low, &class_high);	    rr_blk_source[iblk] =		(int *)my_malloc(type->num_class * sizeof(int));	    for(iclass = 0; iclass < type->num_class; iclass++)		{		    if(iclass >= class_low && iclass <= class_high)			{			    i = block[iblk].x;			    j = block[iblk].y;			    if(type->class_inf[iclass].type == DRIVER)				rr_type = SOURCE;			    else				rr_type = SINK;			    inode =				get_rr_node_index(i, j, rr_type, iclass,						  rr_node_indices);			    rr_blk_source[iblk][iclass] = inode;			}		    else			{			    rr_blk_source[iblk][iclass] = OPEN;			}		}	}}static voidbuild_rr_sinks_sources(IN int i,		       IN int j,		       IN t_rr_node * rr_node,		       IN t_ivec *** rr_node_indices,		       IN int delayless_switch,		       IN struct s_grid_tile **grid){    /* Loads IPIN, SINK, SOURCE, and OPIN.      * Loads IPIN to SINK edges, and SOURCE to OPIN edges */    int ipin, iclass, inode, pin_num, to_node, num_edges;    int num_class, num_pins;    t_type_ptr type;    struct s_class *class_inf;    int *pin_class;    /* Since we share nodes within a large block, only      * start tile can initialize sinks, sources, and pins */    if(grid[i][j].offset > 0)	return;    type = grid[i][j].type;    num_class = type->num_class;    class_inf = type->class_inf;    num_pins = type->num_pins;    pin_class = type->pin_class;    /* SINKS and SOURCE to OPIN edges */    for(iclass = 0; iclass < num_class; iclass++)	{	    if(class_inf[iclass].type == DRIVER)		{		/* SOURCE */		    inode =			get_rr_node_index(i, j, SOURCE, iclass,					  rr_node_indices);		    num_edges = class_inf[iclass].num_pins;		    rr_node[inode].num_edges = num_edges;		    rr_node[inode].edges =			(int *)my_malloc(num_edges * sizeof(int));		    rr_node[inode].switches =			(short *)my_malloc(num_edges * sizeof(short));		    for(ipin = 0; ipin < class_inf[iclass].num_pins; ipin++)			{			    pin_num = class_inf[iclass].pinlist[ipin];			    to_node =				get_rr_node_index(i, j, OPIN, pin_num,						  rr_node_indices);			    rr_node[inode].edges[ipin] = to_node;			    rr_node[inode].switches[ipin] = delayless_switch;			    ++rr_node[to_node].fan_in;			}		    rr_node[inode].cost_index = SOURCE_COST_INDEX;		    rr_node[inode].type = SOURCE;		}	    else		{		/* SINK */		    assert(class_inf[iclass].type == RECEIVER);		    inode =			get_rr_node_index(i, j, SINK, iclass,					  rr_node_indices);		    /* NOTE:  To allow route throughs through clbs, change the lines below to  *		     * make an edge from the input SINK to the output SOURCE.  Do for just the *		     * special case of INPUTS = class 0 and OUTPUTS = class 1 and see what it  *		     * leads to.  If route throughs are allowed, you may want to increase the  *		     * base cost of OPINs and/or SOURCES so they aren't used excessively.      */		    /* Initialize to unconnected to fix values */		    rr_node[inode].num_edges = 0;		    rr_node[inode].edges = NULL;		    rr_node[inode].switches = NULL;		    rr_node[inode].cost_index = SINK_COST_INDEX;		    rr_node[inode].type = SINK;		}	    /* Things common to both SOURCEs and SINKs.   */	    rr_node[inode].capacity = class_inf[iclass].num_pins;	    rr_node[inode].occ = 0;	    rr_node[inode].xlow = i;	    rr_node[inode].xhigh = i;	    rr_node[inode].ylow = j;	    rr_node[inode].yhigh = j + type->height - 1;	    rr_node[inode].R = 0;	    rr_node[inode].C = 0;	    rr_node[inode].ptc_num = iclass;	    rr_node[inode].direction = OPEN;	    rr_node[inode].drivers = OPEN;	}    /* Connect IPINS to SINKS and dummy for OPINS */    for(ipin = 0; ipin < num_pins; ipin++)	{	    iclass = pin_class[ipin];	    if(class_inf[iclass].type == RECEIVER)		{		    inode =			get_rr_node_index(i, j, IPIN, ipin, rr_node_indices);		    to_node =			get_rr_node_index(i, j, SINK, iclass,					  rr_node_indices);		    rr_node[inode].num_edges = 1;		    rr_node[inode].edges = (int *)my_malloc(sizeof(int));		    rr_node[inode].switches =			(short *)my_malloc(sizeof(short));		    rr_node[inode].edges[0] = to_node;		    rr_node[inode].switches[0] = delayless_switch;		    ++rr_node[to_node].fan_in;		    rr_node[inode].cost_index = IPIN_COST_INDEX;		    rr_node[inode].type = IPIN;		}	    else		{		    assert(class_inf[iclass].type == DRIVER);		    inode =			get_rr_node_index(i, j, OPIN, ipin, rr_node_indices);		    rr_node[inode].num_edges = 0;		    rr_node[inode].edges = NULL;		    rr_node[inode].switches = NULL;		    rr_node[inode].cost_index = OPIN_COST_INDEX;		    rr_node[inode].type = OPIN;		}	    /* Common to both DRIVERs and RECEIVERs */	    rr_node[inode].capacity = 1;	    rr_node[inode].occ = 0;	    rr_node[inode].xlow = i;	    rr_node[inode].xhigh = i;	    rr_node[inode].ylow = j;	    rr_node[inode].yhigh = j + type->height - 1;	    rr_node[inode].C = 0;	    rr_node[inode].R = 0;	    rr_node[inode].ptc_num = ipin;	    rr_node[inode].direction = OPEN;	    rr_node[inode].drivers = OPEN;	}}static voidbuild_rr_xchan(IN int i,	       IN int j,	       IN struct s_ivec ****track_to_ipin_lookup,	       IN struct s_ivec ***switch_block_conn,	       IN int cost_index_offset,	       IN int nodes_per_chan,	       IN int *opin_mux_size,	       IN short *****sblock_pattern,	       IN int Fs_per_side,	       IN t_seg_details * seg_details,	       IN t_ivec *** rr_node_indices,	       INOUT boolean * rr_edge_done,	       INOUT t_rr_node * rr_node,	       IN int wire_to_ipin_switch,	       IN enum e_directionality directionality){    /* Loads up all the routing resource nodes in the x-directed channel      *     * segments starting at (i,j).                                            */    int itrack, istart, iend, num_edges, inode, length;    struct s_linked_edge *edge_list, *next;    for(itrack = 0; itrack < nodes_per_chan; itrack++)	{	    istart = get_seg_start(seg_details, itrack, j, i);	    iend = get_seg_end(seg_details, itrack, istart, j, nx);	    if(i > istart)		continue;	/* Not the start of this segment. */	    edge_list = NULL;	    /* First count number of edges and put the edges in a linked list. */	    num_edges = 0;	    num_edges += get_track_to_ipins(istart, j, itrack,					    &edge_list,					    rr_node_indices,					    track_to_ipin_lookup,					    seg_details,					    CHANX, nx,					    wire_to_ipin_switch,					    directionality);	    if(j > 0)		{		    num_edges += get_track_to_tracks(j, istart, itrack, CHANX,						     j, CHANY, nx,						     nodes_per_chan,						     opin_mux_size,						     Fs_per_side,						     sblock_pattern,						     &edge_list,						     seg_details,						     directionality,						     rr_node_indices,						     rr_edge_done,						     switch_block_conn);		}	    if(j < ny)		{		    num_edges += get_track_to_tracks(j, istart, itrack, CHANX,						     j + 1, CHANY, nx,						     nodes_per_chan,						     opin_mux_size,

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -