📄 rr_graph.c
字号:
Fs_per_side, sblock_pattern, &edge_list, seg_details, directionality, rr_node_indices, rr_edge_done, switch_block_conn); } if(istart > 1) { num_edges += get_track_to_tracks(j, istart, itrack, CHANX, istart - 1, CHANX, nx, nodes_per_chan, opin_mux_size, Fs_per_side, sblock_pattern, &edge_list, seg_details, directionality, rr_node_indices, rr_edge_done, switch_block_conn); } if(iend < nx) { num_edges += get_track_to_tracks(j, istart, itrack, CHANX, iend + 1, CHANX, nx, nodes_per_chan, opin_mux_size, Fs_per_side, sblock_pattern, &edge_list, seg_details, directionality, rr_node_indices, rr_edge_done, switch_block_conn); } inode = get_rr_node_index(i, j, CHANX, itrack, rr_node_indices); alloc_and_load_edges_and_switches(rr_node, inode, num_edges, rr_edge_done, edge_list); while(edge_list != NULL) { next = edge_list->next; free(edge_list); edge_list = next; } /* Edge arrays have now been built up. Do everything else. */ rr_node[inode].cost_index = cost_index_offset + seg_details[itrack].index; rr_node[inode].occ = 0; rr_node[inode].capacity = 1; /* GLOBAL routing handled elsewhere */ rr_node[inode].xlow = istart; rr_node[inode].xhigh = iend; rr_node[inode].ylow = j; rr_node[inode].yhigh = j; length = iend - istart + 1; rr_node[inode].R = length * seg_details[itrack].Rmetal; rr_node[inode].C = length * seg_details[itrack].Cmetal; rr_node[inode].ptc_num = itrack; rr_node[inode].type = CHANX; rr_node[inode].direction = seg_details[itrack].direction; rr_node[inode].drivers = seg_details[itrack].drivers; }}static voidbuild_rr_ychan(IN int i, IN int j, IN struct s_ivec ****track_to_ipin_lookup, IN struct s_ivec ***switch_block_conn, IN int cost_index_offset, IN int nodes_per_chan, IN int *opin_mux_size, IN short *****sblock_pattern, IN int Fs_per_side, IN t_seg_details * seg_details, IN t_ivec *** rr_node_indices, IN boolean * rr_edge_done, INOUT t_rr_node * rr_node, IN int wire_to_ipin_switch, IN enum e_directionality directionality){ /* Loads up all the routing resource nodes in the y-directed channel * * segments starting at (i,j). */ int itrack, istart, iend, num_edges, inode, length; struct s_linked_edge *edge_list, *next; for(itrack = 0; itrack < nodes_per_chan; itrack++) { istart = get_seg_start(seg_details, itrack, i, j); iend = get_seg_end(seg_details, itrack, istart, i, ny); if(j > istart) continue; /* Not the start of this segment. */ edge_list = NULL; /* First count number of edges and put the edges in a linked list. */ num_edges = 0; num_edges += get_track_to_ipins(istart, i, itrack, &edge_list, rr_node_indices, track_to_ipin_lookup, seg_details, CHANY, ny, wire_to_ipin_switch, directionality); if(i > 0) { num_edges += get_track_to_tracks(i, istart, itrack, CHANY, i, CHANX, ny, nodes_per_chan, opin_mux_size, Fs_per_side, sblock_pattern, &edge_list, seg_details, directionality, rr_node_indices, rr_edge_done, switch_block_conn); } if(i < nx) { num_edges += get_track_to_tracks(i, istart, itrack, CHANY, i + 1, CHANX, ny, nodes_per_chan, opin_mux_size, Fs_per_side, sblock_pattern, &edge_list, seg_details, directionality, rr_node_indices, rr_edge_done, switch_block_conn); } if(istart > 1) { num_edges += get_track_to_tracks(i, istart, itrack, CHANY, istart - 1, CHANY, ny, nodes_per_chan, opin_mux_size, Fs_per_side, sblock_pattern, &edge_list, seg_details, directionality, rr_node_indices, rr_edge_done, switch_block_conn); } if(iend < ny) { num_edges += get_track_to_tracks(i, istart, itrack, CHANY, iend + 1, CHANY, ny, nodes_per_chan, opin_mux_size, Fs_per_side, sblock_pattern, &edge_list, seg_details, directionality, rr_node_indices, rr_edge_done, switch_block_conn); } inode = get_rr_node_index(i, j, CHANY, itrack, rr_node_indices); alloc_and_load_edges_and_switches(rr_node, inode, num_edges, rr_edge_done, edge_list); while(edge_list != NULL) { next = edge_list->next; free(edge_list); edge_list = next; } /* Edge arrays have now been built up. Do everything else. */ rr_node[inode].cost_index = cost_index_offset + seg_details[itrack].index; rr_node[inode].occ = 0; rr_node[inode].capacity = 1; /* GLOBAL routing handled elsewhere */ rr_node[inode].xlow = i; rr_node[inode].xhigh = i; rr_node[inode].ylow = istart; rr_node[inode].yhigh = iend; length = iend - istart + 1; rr_node[inode].R = length * seg_details[itrack].Rmetal; rr_node[inode].C = length * seg_details[itrack].Cmetal; rr_node[inode].ptc_num = itrack; rr_node[inode].type = CHANY; rr_node[inode].direction = seg_details[itrack].direction; rr_node[inode].drivers = seg_details[itrack].drivers; }}voidwatch_edges(int inode, t_linked_edge * edge_list_head){ t_linked_edge *list_ptr; int i, to_node; list_ptr = edge_list_head; i = 0; printf("!!! Watching Node %d !!!!\n", inode); print_rr_node(stdout, rr_node, inode); printf("Currently connects to: \n"); while(list_ptr != NULL) { to_node = list_ptr->edge; print_rr_node(stdout, rr_node, to_node); list_ptr = list_ptr->next; i++; }}voidalloc_and_load_edges_and_switches(IN t_rr_node * rr_node, IN int inode, IN int num_edges, INOUT boolean * rr_edge_done, IN t_linked_edge * edge_list_head){ /* Sets up all the edge related information for rr_node inode (num_edges, * * the edges array and the switches array). The edge_list_head points to * * a list of the num_edges edges and switches to put in the arrays. This * * linked list is freed by this routine. This routine also resets the * * rr_edge_done array for the next rr_node (i.e. set it so that no edges * * are marked as having been seen before). */ t_linked_edge *list_ptr, *prev_ptr; int i; /* Check we aren't overwriting edges */ assert(rr_node[inode].num_edges < 1); assert(NULL == rr_node[inode].edges); assert(NULL == rr_node[inode].switches); rr_node[inode].num_edges = num_edges; rr_node[inode].edges = (int *)my_malloc(num_edges * sizeof(int)); rr_node[inode].switches = (short *)my_malloc(num_edges * sizeof(short)); i = 0; list_ptr = edge_list_head; while(list_ptr && (i < num_edges)) { rr_node[inode].edges[i] = list_ptr->edge; rr_node[inode].switches[i] = list_ptr->iswitch; ++rr_node[list_ptr->edge].fan_in; /* Unmark the edge since we are done considering fanout from node. */ rr_edge_done[list_ptr->edge] = FALSE; prev_ptr = list_ptr; list_ptr = list_ptr->next; ++i; } assert(list_ptr == NULL); assert(i == num_edges);}static int ****alloc_and_load_pin_to_track_map(IN enum e_pin_type pin_type, IN int nodes_per_chan, IN int Fc, IN t_type_ptr Type, IN boolean perturb_switch_pattern, IN enum e_directionality directionality){ int **num_dir; /* [0..height][0..3] Number of *physical* pins on each side. */ int ***dir_list; /* [0..height][0..3][0..num_pins-1] list of pins of correct type * * * on each side. Max possible space alloced for simplicity */ int i, j, k, iside, ipin, iclass, num_phys_pins, pindex, ioff; int *pin_num_ordering, *side_ordering, *offset_ordering; int **num_done_per_dir; /* [0..height][0..3] */ int ****tracks_connected_to_pin; /* [0..num_pins-1][0..height][0..3][0..Fc-1] */ /* NB: This wastes some space. Could set tracks_..._pin[ipin][ioff][iside] = * NULL if there is no pin on that side, or that pin is of the wrong type. * Probably not enough memory to worry about, esp. as it's temporary. * If pin ipin on side iside does not exist or is of the wrong type, * tracks_connected_to_pin[ipin][iside][0] = OPEN. */ if(Type->num_pins < 1) { return NULL; } tracks_connected_to_pin = (int ****) alloc_matrix4(0, Type->num_pins - 1, 0, Type->height - 1, 0, 3, 0, Fc - 1, sizeof(int)); for(ipin = 0; ipin < Type->num_pins; ipin++) { for(ioff = 0; ioff < Type->height; ioff++) { for(iside = 0; iside < 4; iside++) { for(i = 0; i < Fc; ++i) { tracks_connected_to_pin[ipin][ioff][iside][i] = OPEN; /* Unconnected. */ } } } } num_dir = (int **)alloc_matrix(0, Type->height - 1, 0, 3, sizeof(int)); dir_list = (int ***)alloc_matrix3(0, Type->height - 1, 0, 3, 0, Type->num_pins - 1, sizeof(int)); /* Defensive coding. Try to crash hard if I use an unset entry. */ for(i = 0; i < Type->height; i++) for(j = 0; j < 4; j++) for(k = 0; k < Type->num_pins; k++) dir_list[i][j][k] = (-1); for(i = 0; i < Type->height; i++) for(j = 0; j < 4; j++) num_dir[i][j] = 0; for(ipin = 0; ipin < Type->num_pins; ipin++) { iclass = Type->pin_class[ipin]; if(Type->class_inf[iclass].type != pin_type) /* Doing either ipins OR opins */ continue; /* Pins connecting only to global resources get no switches -> keeps the * * area model accurate. */ if(Type->is_global_pin[ipin]) continue; for(ioff = 0; ioff < Type->height; ioff++) { for(iside = 0; iside < 4; iside++) { if(Type->pinloc[ioff][iside][ipin] == 1) { dir_list[ioff][iside][num_dir[ioff] [iside]] = ipin; num_dir[ioff][iside]++; } } } } num_phys_pins = 0; for(ioff = 0; ioff < Type->height; ioff++) { for(iside = 0; iside < 4; iside++) num_phys_pins += num_dir[ioff][iside]; /* Num. physical pins per type */ } num_done_per_dir = (int **)alloc_matrix(0, Type->height - 1, 0, 3, sizeof(int)); for(ioff = 0; ioff < Type->height; ioff++) { for(iside = 0; iside < 4; iside++) { num_done_per_dir[ioff][iside] = 0; } } pin_num_ordering = (int *)my_malloc(num_phys_pins * sizeof(int)); side_ordering = (int *)my_malloc(num_phys_pins * sizeof(int)); offset_ordering = (int *)my_malloc(num_phys_pins * sizeof(int)); /* Connection block I use distributes pins evenly across the tracks * * of ALL sides of the clb at once. Ensures that each pin connects * * to spaced out tracks in its connection block, and that the other * * pins (potentially in other C blocks) connect to the remaining tracks * * first. Doesn't matter for large Fc, but should make a fairly * * good low Fc block that leverages the fact that usually lots of pins * * are logically equivalent. */ iside = LEFT; ioff = Type->height - 1; ipin = 0; pindex = -1; while(ipin < num_phys_pins) { if(iside == TOP) { iside = RIGHT; } else if(iside == RIGHT) { if(ioff <= 0) { iside = BOTTOM; } else { ioff--; } } else if(iside == BOTTOM) { iside = LEFT; } else { assert(iside == LEFT); if(ioff >= Type->height - 1) { pindex++; iside = TOP; } else { ioff++; } } assert(pindex < num_phys_pins); /* Number of physical pins bounds number of logical pins */ if(num_done_per_dir[ioff][iside] >= num_dir[ioff][iside]) continue; pin_num_ordering[ipin] = dir_list[ioff][iside][pindex]; side_ordering[ipin] = iside; offset_ordering[ipin] = ioff; assert(Type->pinloc[ioff][iside][dir_list[ioff][iside][pindex]]); num_done_per_dir[ioff][iside]++; ipin++; } if(perturb_switch_pattern) { load_perturbed_switch_pattern(Type, tracks_connected_to_pin, num_phys_pins, pin_num_ordering, side_ordering, offset_ordering,
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -