📄 rr_graph.c
字号:
if(is_global_graph) { assert(nodes_per_chan == 1); switch_block_conn = alloc_and_load_switch_block_conn(1, SUBSET, 3); } else if(BI_DIRECTIONAL == directionality) { switch_block_conn = alloc_and_load_switch_block_conn(nodes_per_chan, sb_type, Fs); } else { assert(UNI_DIRECTIONAL == directionality); unidir_sb_pattern = alloc_sblock_pattern_lookup(nx, ny, nodes_per_chan); for(i = 0; i <= nx; i++) { for(j = 0; j <= ny; j++) { load_sblock_pattern_lookup(i, j, nodes_per_chan, seg_details, Fs, sb_type, unidir_sb_pattern); } } } /* END SB LOOKUP */ /* START IPIN MAP */ /* Create ipin map lookups */ ipin_to_track_map = (int *****)my_malloc(sizeof(int ****) * num_types); track_to_ipin_lookup = (struct s_ivec ****)my_malloc(sizeof(struct s_ivec ***) * num_types); for(i = 0; i < num_types; ++i) { ipin_to_track_map[i] = alloc_and_load_pin_to_track_map(RECEIVER, nodes_per_chan, Fc_in[i], &types[i], perturb_ipins [i], directionality); track_to_ipin_lookup[i] = alloc_and_load_track_to_pin_lookup(ipin_to_track_map[i], Fc_in[i], types[i].height, types[i].num_pins, nodes_per_chan); } /* END IPIN MAP */ /* START OPIN MAP */ /* Create opin map lookups */ if(BI_DIRECTIONAL == directionality) { opin_to_track_map = (int *****)my_malloc(sizeof(int ****) * num_types); for(i = 0; i < num_types; ++i) { opin_to_track_map[i] = alloc_and_load_pin_to_track_map(DRIVER, nodes_per_chan, Fc_out[i], &types[i], FALSE, directionality); } } /* END OPIN MAP */ /* UDSD Modifications by WMF begin */ /* I'm adding 2 new fields to t_rr_node, and I want them initialized to 0. */ for(i = 0; i < num_rr_nodes; i++) { rr_node[i].num_wire_drivers = 0; rr_node[i].num_opin_drivers = 0; } alloc_and_load_rr_graph(num_rr_nodes, rr_node, num_seg_types, seg_details, rr_edge_done, track_to_ipin_lookup, opin_to_track_map, switch_block_conn, grid, nx, ny, Fs, unidir_sb_pattern, Fc_out, Fc_xofs, Fc_yofs, rr_node_indices, nodes_per_chan, sb_type, delayless_switch, directionality, wire_to_ipin_switch, &Fc_clipped);#if 0#ifdef MUX_SIZE_DIST_DISPLAY if(UNI_DIRECTIONAL == directionality) { view_mux_size_distribution(rr_node_indices, nodes_per_chan, seg_details, seg_details); }#endif#endif /* Update rr_nodes capacities if global routing */ if(graph_type == GLOBAL) { for(i = 0; i < num_rr_nodes; i++) { if(rr_node[i].type == CHANX || rr_node[i].type == CHANY) { rr_node[i].capacity = chan_width; } } } rr_graph_externals(timing_inf, segment_inf, num_seg_types, nodes_per_chan, wire_to_ipin_switch, base_cost_type);#ifdef CREATE_ECHO_FILES dump_rr_graph("rr_graph.echo");#endif /* CREATE_ECHO_FILES */ check_rr_graph(graph_type, num_types, types, nx, ny, grid, nodes_per_chan, Fs, num_seg_types, num_switches, segment_inf, global_route_switch, delayless_switch, wire_to_ipin_switch, seg_details, Fc_in, Fc_out, rr_node_indices, opin_to_track_map, ipin_to_track_map, track_to_ipin_lookup, switch_block_conn, perturb_ipins); /* Free all temp structs */ if(seg_details) { free_seg_details(seg_details, nodes_per_chan); seg_details = NULL; } if(Fc_in) { free(Fc_in); Fc_in = NULL; } if(Fc_out) { free(Fc_out); Fc_out = NULL; } if(perturb_ipins) { free(perturb_ipins); perturb_ipins = NULL; } if(switch_block_conn) { free_switch_block_conn(switch_block_conn, nodes_per_chan); switch_block_conn = NULL; } if(rr_edge_done) { free(rr_edge_done); rr_edge_done = NULL; } if(Fc_xofs) { free_matrix(Fc_xofs, 0, ny, 0, sizeof(int)); Fc_xofs = NULL; } if(Fc_yofs) { free_matrix(Fc_yofs, 0, nx, 0, sizeof(int)); Fc_yofs = NULL; } if(unidir_sb_pattern) { free_sblock_pattern_lookup(unidir_sb_pattern); unidir_sb_pattern = NULL; } if(opin_to_track_map) { for(i = 0; i < num_types; ++i) { free_matrix4(opin_to_track_map[i], 0, types[i].num_pins - 1, 0, types[i].height - 1, 0, 3, 0, sizeof(int)); } free(opin_to_track_map); } free_type_pin_to_track_map(ipin_to_track_map, types, Fc_in); free_type_track_to_ipin_map(track_to_ipin_lookup, types, nodes_per_chan);}static voidrr_graph_externals( t_timing_inf timing_inf, t_segment_inf * segment_inf, int num_seg_types, int nodes_per_chan, int wire_to_ipin_switch, enum e_base_cost_type base_cost_type){ add_rr_graph_C_from_switches(timing_inf.C_ipin_cblock); alloc_and_load_rr_indexed_data(segment_inf, num_seg_types, rr_node_indices, nodes_per_chan, wire_to_ipin_switch, base_cost_type); alloc_net_rr_terminals(); load_net_rr_terminals(rr_node_indices); alloc_and_load_rr_clb_source(rr_node_indices);}static boolean *alloc_and_load_perturb_ipins(IN int nodes_per_chan, IN int num_types, IN int *Fc_in, IN int *Fc_out, IN enum e_directionality directionality){ int i; float Fc_ratio; boolean *result = NULL; result = (boolean *) my_malloc(num_types * sizeof(boolean)); if(BI_DIRECTIONAL == directionality) { for(i = 0; i < num_types; ++i) { result[i] = FALSE; if(Fc_in[i] > Fc_out[i]) { Fc_ratio = (float)Fc_in[i] / (float)Fc_out[i]; } else { Fc_ratio = (float)Fc_out[i] / (float)Fc_in[i]; } if((Fc_in[i] <= nodes_per_chan - 2) && (fabs(Fc_ratio - nint(Fc_ratio)) < (0.5 / (float)nodes_per_chan))) { result[i] = TRUE; } } } else { /* Unidirectional routing uses mux balancing patterns and * thus shouldn't need perturbation. */ assert(UNI_DIRECTIONAL == directionality); for(i = 0; i < num_types; ++i) { result[i] = FALSE; } } return result;}static t_seg_details *alloc_and_load_global_route_seg_details(IN int nodes_per_chan, IN int global_route_switch){ t_seg_details *result = NULL; assert(nodes_per_chan == 1); result = (t_seg_details *) my_malloc(sizeof(t_seg_details)); result->index = 0; result->length = 1; result->wire_switch = global_route_switch; result->opin_switch = global_route_switch; result->longline = FALSE; result->direction = BI_DIRECTION; result->Cmetal = 0.0; result->Rmetal = 0.0; result->start = 1; result->drivers = MULTI_BUFFERED; result->cb = (boolean *) my_malloc(sizeof(boolean) * 1); result->cb[0] = TRUE; result->sb = (boolean *) my_malloc(sizeof(boolean) * 2); result->sb[0] = TRUE; result->sb[1] = TRUE; result->group_size = 1; result->group_start = 0; return result;}/* Calculates the actual Fc values for the given nodes_per_chan value */static int *alloc_and_load_actual_fc(IN int num_types, IN t_type_ptr types, IN int nodes_per_chan, IN boolean is_Fc_out, IN enum e_directionality directionality, OUT boolean * Fc_clipped){ int i; int *Result = NULL; int fac, num_sets; float Fc; *Fc_clipped = FALSE; /* Unidir tracks formed in pairs, otherwise no effect. */ fac = 1; if(UNI_DIRECTIONAL == directionality) { fac = 2; } assert((nodes_per_chan % fac) == 0); num_sets = nodes_per_chan / fac; Result = (int *)my_malloc(sizeof(int) * num_types); for(i = 0; i < num_types; ++i) { Fc = (is_Fc_out ? type_descriptors[i]. Fc_out : type_descriptors[i].Fc_in); if(type_descriptors[i].is_Fc_frac) { Result[i] = fac * nint(num_sets * Fc); } else { Result[i] = Fc; } if(is_Fc_out && type_descriptors[i].is_Fc_out_full_flex) { Result[i] = nodes_per_chan; } Result[i] = max(Result[i], fac); if(Result[i] > nodes_per_chan) { *Fc_clipped = TRUE; Result[i] = nodes_per_chan; } assert(Result[i] % fac == 0); } return Result;}/* frees the track to ipin mapping for each physical grid type */static voidfree_type_track_to_ipin_map(struct s_ivec**** track_to_pin_map, t_type_ptr types, int nodes_per_chan){ int i, itrack, ioff, iside; for(i = 0; i < num_types; i++) { if(track_to_pin_map[i] != NULL) { for(itrack = 0; itrack < nodes_per_chan; itrack++) { for(ioff = 0; ioff < types[i].height; ioff++) { for(iside = 0; iside < 4; iside++) { if(track_to_pin_map[i][itrack][ioff][iside].list != NULL) { free(track_to_pin_map[i][itrack][ioff][iside].list); } } } } free_matrix3(track_to_pin_map[i], 0, nodes_per_chan - 1, 0, types[i].height - 1, 0, sizeof(struct s_ivec)); } } free(track_to_pin_map);}/* frees the ipin to track mapping for each physical grid type */static voidfree_type_pin_to_track_map(int***** ipin_to_track_map, t_type_ptr types, int* fc_in){ int i; for(i = 0; i < num_types; i++) { free_matrix4(ipin_to_track_map[i], 0, types[i].num_pins - 1, 0, types[i].height - 1, 0, 3, 0, sizeof(int)); } free(ipin_to_track_map);}/* Does the actual work of allocating the rr_graph and filling all the * * appropriate values. Everything up to this was just a prelude! */static voidalloc_and_load_rr_graph(IN int num_nodes, IN t_rr_node * rr_node, IN int num_seg_types, IN t_seg_details * seg_details, IN boolean * rr_edge_done, IN struct s_ivec ****track_to_ipin_lookup, IN int *****opin_to_track_map, IN struct s_ivec ***switch_block_conn, IN struct s_grid_tile **grid, IN int nx, IN int ny, IN int Fs, IN short *****sblock_pattern, IN int *Fc_out, IN int **Fc_xofs, IN int **Fc_yofs, IN t_ivec *** rr_node_indices, IN int nodes_per_chan, IN enum e_switch_block_type sb_type, IN int delayless_switch, IN enum e_directionality directionality, IN int wire_to_ipin_switch, OUT boolean * Fc_clipped){ int i, j; boolean clipped; int *opin_mux_size = NULL; /* If Fc gets clipped, this will be flagged to true */ *Fc_clipped = FALSE; /* Connection SINKS and SOURCES to their pins. */ for(i = 0; i <= (nx + 1); i++) { for(j = 0; j <= (ny + 1); j++) { build_rr_sinks_sources(i, j, rr_node, rr_node_indices, delayless_switch, grid); } } /* Build opins */ for(i = 0; i <= (nx + 1); ++i) { for(j = 0; j <= (ny + 1); ++j) { if(BI_DIRECTIONAL == directionality) { build_bidir_rr_opins(i, j, rr_node, rr_node_indices, opin_to_track_map, Fc_out, rr_edge_done, seg_details, grid); } else { assert(UNI_DIRECTIONAL == directionality); build_unidir_rr_opins(i, j, grid, Fc_out, nodes_per_chan, seg_details, Fc_xofs, Fc_yofs, rr_node, rr_edge_done, &clipped, rr_node_indices); if(clipped) { *Fc_clipped = TRUE; } } } } /* We make a copy of the current fanin values for the nodes to * know the number of OPINs driving each mux presently */ opin_mux_size = (int *)my_malloc(sizeof(int) * num_nodes); for(i = 0; i < num_nodes; ++i) { opin_mux_size[i] = rr_node[i].fan_in; } /* Build channels */
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -