critical bug fixed when annotating the routing results.

Add previous node check. This is due to that some loops between SB/CBs may exist
when routing congestion is high, which leads to same nets appear in the inputs
of a routing multiplexer. Actually one of them is driven by the other as a downstream node
Using previous node check can identify which one to pick
This commit is contained in:
tangxifan 2020-06-17 11:17:57 -06:00
parent 4f7e8020a8
commit 5d79a3f69f
6 changed files with 179 additions and 4 deletions

View File

@ -50,5 +50,134 @@ void annotate_rr_node_nets(const DeviceContext& device_ctx,
VTR_LOG("Done with %d nodes mapping\n", counter);
}
/********************************************************************
* This function will find a previous node for a given rr_node
* from the routing traces
*
* It requires a candidate which provided by upstream functions
* Try to validate a candidate by searching it from driving node list
* If not validated, try to find a right one in the routing traces
*******************************************************************/
static
RRNodeId find_previous_node_from_routing_traces(const RRGraph& rr_graph,
t_trace* routing_trace_head,
const RRNodeId& prev_node_candidate,
const RRNodeId& cur_rr_node) {
RRNodeId prev_node = prev_node_candidate;
/* For a valid prev_node, ensure prev node is one of the driving nodes for this rr_node! */
if (prev_node) {
/* Try to spot the previous node in the incoming node list of this rr_node */
bool valid_prev_node = false;
for (const RREdgeId& in_edge : rr_graph.node_in_edges(cur_rr_node)) {
if (prev_node == rr_graph.edge_src_node(in_edge)) {
valid_prev_node = true;
break;
}
}
/* Early exit if we already validate the node */
if (true == valid_prev_node) {
return prev_node;
}
/* If we cannot find one, it could be possible that this rr_node branches
* from an earlier point in the routing tree
*
* +----- ... --->prev_node
* |
* src_node->+
* |
* +-----+ rr_node
*
* Our job now is to start from the head of the traces and find the prev_node
* that drives this rr_node
*
* This search will find the first-fit and finish.
* This is reasonable because if there is a second-fit, it should be a longer path
* which should be considered in routing optimization
*/
if (false == valid_prev_node) {
t_trace* tptr = routing_trace_head;
while (tptr != nullptr) {
RRNodeId cand_prev_node = tptr->index;
bool is_good_cand = false;
for (const RREdgeId& in_edge : rr_graph.node_in_edges(cur_rr_node)) {
if (cand_prev_node == rr_graph.edge_src_node(in_edge)) {
is_good_cand = true;
break;
}
}
if (true == is_good_cand) {
/* Update prev_node */
prev_node = cand_prev_node;
break;
}
/* Move on to the next */
tptr = tptr->next;
}
}
}
return prev_node;
}
/********************************************************************
* Create a mapping between each rr_node and its previous node
* based on VPR routing results
* - Unmapped rr_node will have an invalid id of previous rr_node
*******************************************************************/
void annotate_rr_node_previous_nodes(const DeviceContext& device_ctx,
const ClusteringContext& clustering_ctx,
const RoutingContext& routing_ctx,
VprRoutingAnnotation& vpr_routing_annotation,
const bool& verbose) {
size_t counter = 0;
VTR_LOG("Annotating previous nodes for rr_node...");
VTR_LOGV(verbose, "\n");
for (auto net_id : clustering_ctx.clb_nlist.nets()) {
/* Ignore nets that are not routed */
if (true == clustering_ctx.clb_nlist.net_is_ignored(net_id)) {
continue;
}
/* Ignore used in local cluster only, reserved one CLB pin */
if (false == clustering_ctx.clb_nlist.net_sinks(net_id).size()) {
continue;
}
/* Cache Previous nodes */
RRNodeId prev_node = RRNodeId::INVALID();
t_trace* tptr = routing_ctx.trace[net_id].head;
while (tptr != nullptr) {
RRNodeId rr_node = tptr->index;
/* Find the right previous node */
prev_node = find_previous_node_from_routing_traces(device_ctx.rr_graph,
routing_ctx.trace[net_id].head,
prev_node,
rr_node);
/* Only update mapped nodes */
if (prev_node) {
vpr_routing_annotation.set_rr_node_prev_node(rr_node, prev_node);
counter++;
}
/* Update prev_node */
prev_node = rr_node;
/* Move on to the next */
tptr = tptr->next;
}
}
VTR_LOG("Done with %d nodes mapping\n", counter);
}
} /* end namespace openfpga */

View File

@ -21,6 +21,12 @@ void annotate_rr_node_nets(const DeviceContext& device_ctx,
VprRoutingAnnotation& vpr_routing_annotation,
const bool& verbose);
void annotate_rr_node_previous_nodes(const DeviceContext& device_ctx,
const ClusteringContext& clustering_ctx,
const RoutingContext& routing_ctx,
VprRoutingAnnotation& vpr_routing_annotation,
const bool& verbose);
} /* end namespace openfpga */
#endif

View File

@ -24,11 +24,18 @@ ClusterNetId VprRoutingAnnotation::rr_node_net(const RRNodeId& rr_node) const {
return rr_node_nets_[rr_node];
}
RRNodeId VprRoutingAnnotation::rr_node_prev_node(const RRNodeId& rr_node) const {
/* Ensure that the node_id is in the list */
VTR_ASSERT(size_t(rr_node) < rr_node_nets_.size());
return rr_node_prev_nodes_[rr_node];
}
/************************************************************************
* Public mutators
***********************************************************************/
void VprRoutingAnnotation::init(const RRGraph& rr_graph) {
rr_node_nets_.resize(rr_graph.nodes().size(), ClusterNetId::INVALID());
rr_node_prev_nodes_.resize(rr_graph.nodes().size(), RRNodeId::INVALID());
}
void VprRoutingAnnotation::set_rr_node_net(const RRNodeId& rr_node,
@ -45,4 +52,18 @@ void VprRoutingAnnotation::set_rr_node_net(const RRNodeId& rr_node,
rr_node_nets_[rr_node] = net_id;
}
void VprRoutingAnnotation::set_rr_node_prev_node(const RRNodeId& rr_node,
const RRNodeId& prev_node) {
/* Ensure that the node_id is in the list */
VTR_ASSERT(size_t(rr_node) < rr_node_nets_.size());
/* Warn any override attempt */
if ( (RRNodeId::INVALID() != rr_node_prev_nodes_[rr_node])
&& (prev_node != rr_node_prev_nodes_[rr_node])) {
VTR_LOG_WARN("Override the previous node '%ld' by previous node '%ld' for node '%ld' with in routing context annotation!\n",
size_t(rr_node_prev_nodes_[rr_node]), size_t(prev_node), size_t(rr_node));
}
rr_node_prev_nodes_[rr_node] = prev_node;
}
} /* End namespace openfpga*/

View File

@ -26,13 +26,19 @@ class VprRoutingAnnotation {
VprRoutingAnnotation();
public: /* Public accessors */
ClusterNetId rr_node_net(const RRNodeId& rr_node) const;
RRNodeId rr_node_prev_node(const RRNodeId& rr_node) const;
public: /* Public mutators */
void init(const RRGraph& rr_graph);
void set_rr_node_net(const RRNodeId& rr_node,
const ClusterNetId& net_id);
void set_rr_node_prev_node(const RRNodeId& rr_node,
const RRNodeId& prev_node);
private: /* Internal data */
/* Pair a regular pb_type to its physical pb_type */
/* Clustered net ids mapped to each rr_node */
vtr::vector<RRNodeId, ClusterNetId> rr_node_nets_;
/* Previous rr_node driving each rr_node */
vtr::vector<RRNodeId, RRNodeId> rr_node_prev_nodes_;
};
} /* End namespace openfpga*/

View File

@ -95,7 +95,9 @@ int link_arch(OpenfpgaContext& openfpga_ctx,
openfpga_ctx.mutable_vpr_device_annotation(),
cmd_context.option_enable(cmd, opt_verbose));
/* Annotate net mapping to each rr_node
/* Annotate routing results:
* - net mapping to each rr_node
* - previous nodes driving each rr_node
*/
openfpga_ctx.mutable_vpr_routing_annotation().init(g_vpr_ctx.device().rr_graph);
@ -103,6 +105,11 @@ int link_arch(OpenfpgaContext& openfpga_ctx,
openfpga_ctx.mutable_vpr_routing_annotation(),
cmd_context.option_enable(cmd, opt_verbose));
annotate_rr_node_previous_nodes(g_vpr_ctx.device(), g_vpr_ctx.clustering(), g_vpr_ctx.routing(),
openfpga_ctx.mutable_vpr_routing_annotation(),
cmd_context.option_enable(cmd, opt_verbose));
/* Build the routing graph annotation
* - RRGSB
* - DeviceRRGSB

View File

@ -66,8 +66,11 @@ void build_switch_block_mux_bitstream(BitstreamManager& bitstream_manager,
*/
int path_id = DEFAULT_PATH_ID;
if (ClusterNetId::INVALID() != output_net) {
/* We must have a valid previous node that is supposed to drive the source node! */
VTR_ASSERT(routing_annotation.rr_node_prev_node(cur_rr_node));
for (size_t inode = 0; inode < drive_rr_nodes.size(); ++inode) {
if (input_nets[inode] == output_net) {
if ( (input_nets[inode] == output_net)
&& (drive_rr_nodes[inode] == routing_annotation.rr_node_prev_node(cur_rr_node)) ) {
path_id = (int)inode;
break;
}
@ -246,7 +249,10 @@ void build_connection_block_mux_bitstream(BitstreamManager& bitstream_manager,
if (ClusterNetId::INVALID() != output_net) {
for (const RREdgeId& edge : rr_graph.node_in_edges(src_rr_node)) {
RRNodeId driver_node = rr_graph.edge_src_node(edge);
if (routing_annotation.rr_node_net(driver_node) == output_net) {
/* We must have a valid previous node that is supposed to drive the source node! */
VTR_ASSERT(routing_annotation.rr_node_prev_node(src_rr_node));
if ( (routing_annotation.rr_node_net(driver_node) == output_net)
&& (driver_node == routing_annotation.rr_node_prev_node(src_rr_node)) ) {
path_id = edge_index;
break;
}