📄 rr_graph2.c
字号:
#include <stdio.h>#include <assert.h>#include "util.h"#include "vpr_types.h"#include "globals.h"#include "rr_graph_util.h"#include "rr_graph2.h"#include "rr_graph_sbox.h"#define ALLOW_SWITCH_OFF/* WMF: May 07 I put this feature in, but on May 09 in my testing phase * I found that for Wilton, this feature is bad, since Wilton is already doing * a reverse. */#define ENABLE_REVERSE 0#define SAME_TRACK -5#define UN_SET -1/* Variables below are the global variables shared only amongst the rr_graph * ************************************ routines. ******************************//* [0..num_rr_nodes-1]. TRUE if a node is already listed in the edges array * * that's being constructed. This allows me to ensure that there are never * * duplicate edges (two edges between the same thing). */boolean *rr_edge_done;/* Used to keep my own list of free linked integers, for speed reasons. */t_linked_edge *free_edge_list_head = NULL;/*************************** Variables local to this module *****************//* Two arrays below give the rr_node_index of the channel segment at * * (i,j,track) for fast index lookup. *//* UDSD Modifications by WMF Begin *//* The sblock_pattern_init_mux_lookup contains the assignment of incoming * wires to muxes. More specifically, it only contains the assignment of * M-to-N cases. WMF_BETTER_COMMENTS *//* UDSD MOdifications by WMF End *//************************** Subroutines local to this module ****************/static void get_switch_type(boolean is_from_sbox, boolean is_to_sbox, short from_node_switch, short to_node_switch, short switch_types[2]);static void load_chan_rr_indices(IN int nodes_per_chan, IN int chan_len, IN int num_chans, IN t_rr_type type, IN t_seg_details * seg_details, INOUT int *index, INOUT t_ivec *** indices);static int get_bidir_track_to_chan_seg(IN struct s_ivec conn_tracks, IN t_ivec *** rr_node_indices, IN int to_chan, IN int to_seg, IN int to_sb, IN t_rr_type to_type, IN t_seg_details * seg_details, IN boolean from_is_sbox, IN int from_switch, INOUT boolean * rr_edge_done, IN enum e_directionality directionality, INOUT struct s_linked_edge **edge_list);static int get_unidir_track_to_chan_seg(IN boolean is_end_sb, IN int from_track, IN int to_chan, IN int to_seg, IN int to_sb, IN t_rr_type to_type, IN int nodes_per_chan, IN int nx, IN int ny, IN enum e_side from_side, IN enum e_side to_side, IN int Fs_per_side, IN int *opin_mux_size, IN short *****sblock_pattern, IN t_ivec *** rr_node_indices, IN t_seg_details * seg_details, INOUT boolean * rr_edge_done, OUT boolean * Fs_clipped, INOUT struct s_linked_edge **edge_list);static int vpr_to_phy_track(IN int itrack, IN int chan_num, IN int seg_num, IN t_seg_details * seg_details, IN enum e_directionality directionality);static int *get_seg_track_counts(IN int num_sets, IN int num_seg_types, IN t_segment_inf * segment_inf, IN boolean use_full_seg_groups);static int *label_wire_muxes(IN int chan_num, IN int seg_num, IN t_seg_details * seg_details, IN int max_len, IN enum e_direction dir, IN int nodes_per_chan, OUT int *num_wire_muxes);static int *label_wire_muxes_for_balance(IN int chan_num, IN int seg_num, IN t_seg_details * seg_details, IN int max_len, IN enum e_direction direction, IN int nodes_per_chan, IN int *num_wire_muxes, IN t_rr_type chan_type, IN int *opin_mux_size, IN t_ivec *** rr_node_indices);static int *label_incoming_wires(IN int chan_num, IN int seg_num, IN int sb_seg, IN t_seg_details * seg_details, IN int max_len, IN enum e_direction dir, IN int nodes_per_chan, OUT int *num_incoming_wires, OUT int *num_ending_wires);static int find_label_of_track(int *wire_mux_on_track, int num_wire_muxes, int from_track);/******************** Subroutine definitions *******************************//* This assigns tracks (individually or pairs) to segment types. * It tries to match requested ratio. If use_full_seg_groups is * true, then segments are assigned only in multiples of their * length. This is primarily used for making a tileable unidir * layout. The effect of using this is that the number of tracks * requested will not always be met and the result will sometimes * be over and sometimes under. * The pattern when using use_full_seg_groups is to keep adding * one group of the track type that wants the largest number of * groups of tracks. Each time a group is assigned, the types * demand is reduced by 1 unit. The process stops when we are * no longer less than the requested number of tracks. As a final * step, if we were closer to target before last more, undo it * and end up with a result that uses fewer tracks than given. */static int *get_seg_track_counts(IN int num_sets, IN int num_seg_types, IN t_segment_inf * segment_inf, IN boolean use_full_seg_groups){ int *result, *demand; int i, scale, max, imax, freq_sum, assigned, reduce, size; result = (int *)my_malloc(sizeof(int) * num_seg_types); demand = (int *)my_malloc(sizeof(int) * num_seg_types); /* Scale factor so we can divide by any length * and still use integers */ scale = 1; freq_sum = 0; for(i = 0; i < num_seg_types; ++i) { scale *= segment_inf[i].length; freq_sum += segment_inf[i].frequency; } reduce = scale * freq_sum; /* Init assignments to 0 and set the demand values */ for(i = 0; i < num_seg_types; ++i) { result[i] = 0; demand[i] = scale * num_sets * segment_inf[i].frequency; if(use_full_seg_groups) { demand[i] /= segment_inf[i].length; } } /* Keep assigning tracks until we use them up */ assigned = 0; size = 0; imax = 0; while(assigned < num_sets) { /* Find current maximum demand */ max = 0; for(i = 0; i < num_seg_types; ++i) { if(demand[i] > max) { imax = i; max = demand[i]; } } /* Assign tracks to the type and reduce the types demand */ size = (use_full_seg_groups ? segment_inf[imax].length : 1); demand[imax] -= reduce; result[imax] += size; assigned += size; } /* Undo last assignment if we were closer to goal without it */ if((assigned - num_sets) > (size / 2)) { result[imax] -= size; } /* Free temps */ if(demand) { free(demand); demand = NULL; } /* This must be freed by caller */ return result;}t_seg_details *alloc_and_load_seg_details(INOUT int *nodes_per_chan, IN int max_len, IN int num_seg_types, IN t_segment_inf * segment_inf, IN boolean use_full_seg_groups, IN boolean is_global_graph, IN enum e_directionality directionality){ /* Allocates and loads the seg_details data structure. Max_len gives the * * maximum length of a segment (dimension of array). The code below tries * * to: * * (1) stagger the start points of segments of the same type evenly; * * (2) spread out the limited number of connection boxes or switch boxes * * evenly along the length of a segment, starting at the segment ends; * * (3) stagger the connection and switch boxes on different long lines, * * as they will not be staggered by different segment start points. */ int i, cur_track, ntracks, itrack, length, j, index; int wire_switch, opin_switch, fac, num_sets, tmp; int group_start, first_track; int *sets_per_seg_type = NULL; t_seg_details *seg_details = NULL; boolean longline; /* Unidir tracks are assigned in pairs, and bidir tracks individually */ if(directionality == BI_DIRECTIONAL) { fac = 1; } else { assert(directionality == UNI_DIRECTIONAL); fac = 2; } assert(*nodes_per_chan % fac == 0); /* Map segment type fractions and groupings to counts of tracks */ sets_per_seg_type = get_seg_track_counts((*nodes_per_chan / fac), num_seg_types, segment_inf, use_full_seg_groups); /* Count the number tracks actually assigned. */ tmp = 0; for(i = 0; i < num_seg_types; ++i) { tmp += sets_per_seg_type[i] * fac; } assert(use_full_seg_groups || (tmp == *nodes_per_chan)); *nodes_per_chan = tmp; seg_details = (t_seg_details *) my_malloc(*nodes_per_chan * sizeof(t_seg_details)); /* Setup the seg_details data */ cur_track = 0; for(i = 0; i < num_seg_types; ++i) { first_track = cur_track; num_sets = sets_per_seg_type[i]; ntracks = fac * num_sets; if(ntracks < 1) { continue; } /* Avoid divide by 0 if ntracks */ longline = segment_inf[i].longline; length = segment_inf[i].length; if(longline) { length = max_len; } wire_switch = segment_inf[i].wire_switch; opin_switch = segment_inf[i].opin_switch; assert((wire_switch == opin_switch) || (directionality != UNI_DIRECTIONAL)); /* Set up the tracks of same type */ group_start = 0; for(itrack = 0; itrack < ntracks; itrack++) { /* Remember the start track of the current wire group */ if((itrack / fac) % length == 0 && (itrack % fac) == 0) { group_start = cur_track; } seg_details[cur_track].length = length; seg_details[cur_track].longline = longline; /* Stagger the start points in for each track set. The * pin mappings should be aware of this when chosing an * intelligent way of connecting pins and tracks. * cur_track is used as an offset so that extra tracks * from different segment types are hopefully better * balanced. */ seg_details[cur_track].start = (cur_track / fac) % length + 1; /* These properties are used for vpr_to_phy_track to determine * * twisting of wires. */ seg_details[cur_track].group_start = group_start; seg_details[cur_track].group_size = min(ntracks + first_track - group_start, length * fac); assert(0 == seg_details[cur_track].group_size % fac); if(0 == seg_details[cur_track].group_size) { seg_details[cur_track].group_size = length * fac; } /* Setup the cb and sb patterns. Global route graphs can't depopulate cb and sb * since this is a property of a detailed route. */ seg_details[cur_track].cb = (boolean *) my_malloc(length * sizeof(boolean)); seg_details[cur_track].sb = (boolean *) my_malloc((length + 1) * sizeof(boolean)); for(j = 0; j < length; ++j) { if(is_global_graph) { seg_details[cur_track].cb[j] = TRUE; } else { index = j; /* Rotate longline's so they vary across the FPGA */ if(longline) { index = (index + itrack) % length; } /* Reverse the order for tracks going in DEC_DIRECTION */ if(itrack % fac == 1) { index = (length - 1) - j; } /* Use the segment's pattern. */ index = j % segment_inf[i].cb_len; seg_details[cur_track].cb[j] = segment_inf[i].cb[index]; } } for(j = 0; j < (length + 1); ++j) { if(is_global_graph) { seg_details[cur_track].sb[j] = TRUE; } else { index = j; /* Rotate longline's so they vary across the FPGA */ if(longline) { index = (index + itrack) % (length + 1); } /* Reverse the order for tracks going in DEC_DIRECTION */ if(itrack % fac == 1) { index = ((length + 1) - 1) - j; } /* Use the segment's pattern. */ index = j % segment_inf[i].sb_len; seg_details[cur_track].sb[j] = segment_inf[i].sb[index]; } } seg_details[cur_track].Rmetal = segment_inf[i].Rmetal; seg_details[cur_track].Cmetal = segment_inf[i].Cmetal; seg_details[cur_track].wire_switch = wire_switch; seg_details[cur_track].opin_switch = opin_switch; if(BI_DIRECTIONAL == directionality) { seg_details[cur_track].direction = BI_DIRECTION; } else { assert(UNI_DIRECTIONAL == directionality); seg_details[cur_track].direction = (itrack % 2) ? DEC_DIRECTION : INC_DIRECTION; } switch (segment_inf[i].directionality) { case UNI_DIRECTIONAL: seg_details[cur_track].drivers = SINGLE; break; case BI_DIRECTIONAL: seg_details[cur_track].drivers = MULTI_BUFFERED; break; } seg_details[cur_track].index = i; ++cur_track; } } /* End for each segment type. */ /* free variables */ free(sets_per_seg_type); return seg_details;}voidfree_seg_details(t_seg_details * seg_details, int nodes_per_chan){ /* Frees all the memory allocated to an array of seg_details structures. */ int i;
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -