bug fixing for building routing channels in build_rr_graph()

This commit is contained in:
tangxifan 2020-02-04 11:37:59 -07:00
parent b6a2013565
commit 15167c9bfb
3 changed files with 44 additions and 25 deletions

View File

@ -68,19 +68,21 @@ short RRGraph::node_xhigh(const RRNodeId& node) const {
/* Special for SOURCE and SINK node, we always return the xlow
* This is due to the convention in creating RRGraph
* so that we can guarantee unique SOURCE/SINK nodes searching
*/
if ( (SOURCE == node_type(node))
|| (SINK == node_type(node)) ) {
return node_bounding_box(node).xmin();
}
*/
return node_bounding_box(node).xmax();
}
short RRGraph::node_yhigh(const RRNodeId& node) const {
/*
if ( (SOURCE == node_type(node))
|| (SINK == node_type(node)) ) {
return node_bounding_box(node).ymin();
}
*/
return node_bounding_box(node).ymax();
}
@ -1229,6 +1231,7 @@ void RRGraph::build_fast_node_lookup() const {
std::vector<size_t> xlows;
std::vector<size_t> ylows;
/*
if ( (SOURCE == node_type(node))
|| (SINK == node_type(node))
|| (CHANX == node_type(node))
@ -1237,13 +1240,15 @@ void RRGraph::build_fast_node_lookup() const {
ylows.resize(node_bounding_boxes_[node].ymax() - node_bounding_boxes_[node].ymin() + 1);
std::iota(xlows.begin(), xlows.end(), node_xlow(node));
std::iota(ylows.begin(), ylows.end(), node_ylow(node));
/* Sanity check */
VTR_ASSERT(size_t(node_bounding_boxes_[node].xmax()) == xlows.back());
VTR_ASSERT(size_t(node_bounding_boxes_[node].ymax()) == ylows.back());
} else {
} else {
*/
xlows.push_back(node_xlow(node));
ylows.push_back(node_ylow(node));
/*
}
*/
for (size_t x : xlows) {
for (size_t y : ylows) {

View File

@ -718,7 +718,7 @@ static void build_rr_graph(const t_graph_type graph_type,
*/
device_ctx.rr_graph.reserve_switches(device_ctx.num_arch_switches);
// Create the switches
for (size_t iswitch = 0; iswitch < device_ctx.num_arch_switches; ++iswitch) {
for (int iswitch = 0; iswitch < device_ctx.num_arch_switches; ++iswitch) {
const t_rr_switch_inf& temp_rr_switch = create_rr_switch_from_arch_switch(iswitch, R_minW_nmos, R_minW_pmos);
device_ctx.rr_graph.create_switch(temp_rr_switch);
}
@ -1864,26 +1864,17 @@ void alloc_and_load_edges(RRGraph& rr_graph,
size_t edge_count = std::distance(edge_range.first, edge_range.second);
if (rr_graph.node_out_edges(inode).size() == 0) {
//Create initial edges
//
//Note that we do this in bulk instead of via add_edge() to reduce
//memory fragmentation
//Create initial edges
//
//Note that we do this in bulk instead of via add_edge() to reduce
//memory fragmentation
rr_graph.reserve_edges(edge_count + rr_graph.edges().size());
rr_graph.reserve_edges(edge_count + rr_graph.edges().size());
for (auto itr = edge_range.first; itr != edge_range.second; ++itr) {
VTR_ASSERT(itr->from_node == inode);
for (auto itr = edge_range.first; itr != edge_range.second; ++itr) {
VTR_ASSERT(itr->from_node == inode);
rr_graph.create_edge(inode, itr->to_node, RRSwitchId(itr->switch_type));
}
} else {
//Add new edge incrementally
//
//This should occur relatively rarely (e.g. a backward bidir edge) so memory fragmentation shouldn't be a big problem
for (auto itr = edge_range.first; itr != edge_range.second; ++itr) {
rr_graph.create_edge(inode, itr->to_node, RRSwitchId(itr->switch_type));
}
rr_graph.create_edge(inode, itr->to_node, RRSwitchId(itr->switch_type));
}
}
}
@ -2672,7 +2663,23 @@ std::string describe_rr_node(const RRNodeId& inode) {
return msg;
}
static void build_unidir_rr_opins(const int i, const int j, const e_side side, const DeviceGrid& grid, const std::vector<vtr::Matrix<int>>& Fc_out, const int max_chan_width, const t_chan_details& chan_details_x, const t_chan_details& chan_details_y, vtr::NdMatrix<int, 3>& Fc_xofs, vtr::NdMatrix<int, 3>& Fc_yofs, t_rr_edge_info_set& rr_edges_to_create, bool* Fc_clipped, const t_rr_node_indices& L_rr_node_indices, const RRGraph& rr_graph, const t_direct_inf* directs, const int num_directs, const t_clb_to_clb_directs* clb_to_clb_directs, const int num_seg_types) {
static void build_unidir_rr_opins(const int i, const int j,
const e_side side,
const DeviceGrid& grid,
const std::vector<vtr::Matrix<int>>& Fc_out,
const int max_chan_width,
const t_chan_details& chan_details_x,
const t_chan_details& chan_details_y,
vtr::NdMatrix<int, 3>& Fc_xofs,
vtr::NdMatrix<int, 3>& Fc_yofs,
t_rr_edge_info_set& rr_edges_to_create,
bool* Fc_clipped,
const t_rr_node_indices& L_rr_node_indices,
const RRGraph& rr_graph,
const t_direct_inf* directs,
const int num_directs,
const t_clb_to_clb_directs* clb_to_clb_directs,
const int num_seg_types) {
/*
* This routine adds the edges from opins to channels at the specified
* grid location (i,j) and grid tile side
@ -2696,7 +2703,11 @@ static void build_unidir_rr_opins(const int i, const int j, const e_side side, c
}
RRNodeId opin_node_index = rr_graph.find_node(i, j, OPIN, pin_index, side);
if (false == rr_graph.valid_node_id(opin_node_index)) continue; //No valid from node
//if (false == rr_graph.valid_node_id(opin_node_index)) continue; //No valid from node
if (1 == type->pinloc[width_offset][height_offset][side][pin_index]) {
VTR_ASSERT(true == rr_graph.valid_node_id(opin_node_index));
}
for (int iseg = 0; iseg < num_seg_types; iseg++) {
/* get Fc for this segment type */

View File

@ -809,8 +809,8 @@ int get_unidir_opin_connections(const int chan,
dec_track = dec_muxes[dec_mux];
/* Figure the inodes of those muxes */
inc_inode_index = rr_graph.find_node(chan, seg, chan_type, inc_track);
dec_inode_index = rr_graph.find_node(chan, seg, chan_type, dec_track);
inc_inode_index = rr_graph.find_node(x, y, chan_type, inc_track);
dec_inode_index = rr_graph.find_node(x, y, chan_type, dec_track);
if (inc_inode_index == RRNodeId::INVALID() || dec_inode_index == RRNodeId::INVALID()) {
continue;
@ -1078,6 +1078,9 @@ static void load_chan_rr_indices(const int max_chan_width,
/* We give a fake coordinator here, to ease the downstream builder */
short xlow = chan;
short ylow = start;
if (CHANX == type) {
std::swap(xlow, ylow);
}
RRNodeId node = rr_graph.find_node(xlow, ylow, type, track);
if (false == rr_graph.valid_node_id(node)) {