route_tree_timing adopt RRGraph object
This commit is contained in:
parent
5139b5d194
commit
400bd76bdc
|
@ -27,7 +27,7 @@
|
|||
/* Array below allows mapping from any rr_node to any rt_node currently in
|
||||
* the rt_tree. */
|
||||
|
||||
static std::vector<t_rt_node*> rr_node_to_rt_node; /* [0..device_ctx.rr_graph.nodes().size()-1] */
|
||||
static vtr::vector<RRNodeId, t_rt_node*> rr_node_to_rt_node; /* [0..device_ctx.rr_graph.nodes().size()-1] */
|
||||
|
||||
/* Frees lists for fast addition and deletion of nodes and edges. */
|
||||
|
||||
|
@ -47,19 +47,19 @@ static void free_linked_rt_edge(t_linked_rt_edge* rt_edge);
|
|||
static t_rt_node* add_subtree_to_route_tree(t_heap* hptr,
|
||||
t_rt_node** sink_rt_node_ptr);
|
||||
|
||||
static t_rt_node* add_non_configurable_to_route_tree(const int rr_node, const bool reached_by_non_configurable_edge, std::unordered_set<int>& visited);
|
||||
static t_rt_node* add_non_configurable_to_route_tree(const RRNodeId& rr_node, const bool reached_by_non_configurable_edge, std::unordered_set<RRNodeId>& visited);
|
||||
|
||||
static t_rt_node* update_unbuffered_ancestors_C_downstream(t_rt_node* start_of_new_subtree_rt_node);
|
||||
|
||||
bool verify_route_tree_recurr(t_rt_node* node, std::set<int>& seen_nodes);
|
||||
bool verify_route_tree_recurr(t_rt_node* node, std::set<RRNodeId>& seen_nodes);
|
||||
|
||||
static t_rt_node* prune_route_tree_recurr(t_rt_node* node, CBRR& connections_inf, bool congested, std::vector<int>* non_config_node_set_usage);
|
||||
|
||||
static t_trace* traceback_to_route_tree_branch(t_trace* trace, std::map<int, t_rt_node*>& rr_node_to_rt, std::vector<int>* non_config_node_set_usage);
|
||||
static t_trace* traceback_to_route_tree_branch(t_trace* trace, std::map<RRNodeId, t_rt_node*>& rr_node_to_rt, std::vector<int>* non_config_node_set_usage);
|
||||
|
||||
static std::pair<t_trace*, t_trace*> traceback_from_route_tree_recurr(t_trace* head, t_trace* tail, const t_rt_node* node);
|
||||
|
||||
void collect_route_tree_connections(const t_rt_node* node, std::set<std::tuple<int, int, int>>& connections);
|
||||
void collect_route_tree_connections(const t_rt_node* node, std::set<std::tuple<RRNodeId, int, RRNodeId>>& connections);
|
||||
|
||||
/************************** Subroutine definitions ***************************/
|
||||
|
||||
|
@ -84,7 +84,7 @@ bool alloc_route_tree_timing_structs(bool exists_ok) {
|
|||
}
|
||||
}
|
||||
|
||||
rr_node_to_rt_node = std::vector<t_rt_node*>(device_ctx.rr_graph.nodes().size(), nullptr);
|
||||
rr_node_to_rt_node = vtr::vector<RRNodeId, t_rt_node*>(device_ctx.rr_graph.nodes().size(), nullptr);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
@ -173,7 +173,7 @@ static void free_linked_rt_edge(t_linked_rt_edge* rt_edge) {
|
|||
* node of the rt_tree (which is just the net source). */
|
||||
t_rt_node* init_route_tree_to_source(ClusterNetId inet) {
|
||||
t_rt_node* rt_root;
|
||||
int inode;
|
||||
RRNodeId inode;
|
||||
|
||||
auto& route_ctx = g_vpr_ctx.routing();
|
||||
auto& device_ctx = g_vpr_ctx.device();
|
||||
|
@ -187,9 +187,9 @@ t_rt_node* init_route_tree_to_source(ClusterNetId inet) {
|
|||
inode = route_ctx.net_rr_terminals[inet][0]; /* Net source */
|
||||
|
||||
rt_root->inode = inode;
|
||||
rt_root->C_downstream = device_ctx.rr_nodes[inode].C();
|
||||
rt_root->R_upstream = device_ctx.rr_nodes[inode].R();
|
||||
rt_root->Tdel = 0.5 * device_ctx.rr_nodes[inode].R() * device_ctx.rr_nodes[inode].C();
|
||||
rt_root->C_downstream = device_ctx.rr_graph.node_C(inode);
|
||||
rt_root->R_upstream = device_ctx.rr_graph.node_R(inode);
|
||||
rt_root->Tdel = 0.5 * device_ctx.rr_graph.node_R(inode) * device_ctx.rr_graph.node_C(inode);
|
||||
rr_node_to_rt_node[inode] = rt_root;
|
||||
|
||||
return (rt_root);
|
||||
|
@ -263,7 +263,7 @@ add_subtree_to_route_tree(t_heap* hptr, t_rt_node** sink_rt_node_ptr) {
|
|||
auto& device_ctx = g_vpr_ctx.device();
|
||||
auto& route_ctx = g_vpr_ctx.routing();
|
||||
|
||||
int inode = hptr->index;
|
||||
RRNodeId inode = hptr->index;
|
||||
|
||||
//if (device_ctx.rr_nodes[inode].type() != SINK) {
|
||||
//VPR_FATAL_ERROR(VPR_ERROR_ROUTE,
|
||||
|
@ -286,11 +286,11 @@ add_subtree_to_route_tree(t_heap* hptr, t_rt_node** sink_rt_node_ptr) {
|
|||
|
||||
downstream_rt_node = sink_rt_node;
|
||||
|
||||
std::unordered_set<int> main_branch_visited;
|
||||
std::unordered_set<int> all_visited;
|
||||
std::unordered_set<RRNodeId> main_branch_visited;
|
||||
std::unordered_set<RRNodeId> all_visited;
|
||||
inode = hptr->u.prev.node;
|
||||
t_edge_size iedge = hptr->u.prev.edge;
|
||||
short iswitch = device_ctx.rr_nodes[inode].edge_switch(iedge);
|
||||
RREdgeId iedge = hptr->u.prev.edge;
|
||||
short iswitch = (short)size_t(device_ctx.rr_graph.edge_switch(iedge));
|
||||
|
||||
/* For all "new" nodes in the main path */
|
||||
// inode is node index of previous node
|
||||
|
@ -319,7 +319,7 @@ add_subtree_to_route_tree(t_heap* hptr, t_rt_node** sink_rt_node_ptr) {
|
|||
|
||||
rr_node_to_rt_node[inode] = rt_node;
|
||||
|
||||
if (device_ctx.rr_nodes[inode].type() == IPIN) {
|
||||
if (device_ctx.rr_graph.node_type(inode) == IPIN) {
|
||||
rt_node->re_expand = false;
|
||||
} else {
|
||||
rt_node->re_expand = true;
|
||||
|
@ -328,7 +328,7 @@ add_subtree_to_route_tree(t_heap* hptr, t_rt_node** sink_rt_node_ptr) {
|
|||
downstream_rt_node = rt_node;
|
||||
iedge = route_ctx.rr_node_route_inf[inode].prev_edge;
|
||||
inode = route_ctx.rr_node_route_inf[inode].prev_node;
|
||||
iswitch = device_ctx.rr_nodes[inode].edge_switch(iedge);
|
||||
iswitch = (short)size_t(device_ctx.rr_graph.edge_switch(iedge));
|
||||
}
|
||||
|
||||
//Inode is now the branch point to the old routing; do not need
|
||||
|
@ -347,7 +347,7 @@ add_subtree_to_route_tree(t_heap* hptr, t_rt_node** sink_rt_node_ptr) {
|
|||
|
||||
//Expand (recursively) each of the main-branch nodes adding any
|
||||
//non-configurably connected nodes
|
||||
for (int rr_node : main_branch_visited) {
|
||||
for (const RRNodeId& rr_node : main_branch_visited) {
|
||||
add_non_configurable_to_route_tree(rr_node, false, all_visited);
|
||||
}
|
||||
|
||||
|
@ -355,7 +355,7 @@ add_subtree_to_route_tree(t_heap* hptr, t_rt_node** sink_rt_node_ptr) {
|
|||
return (downstream_rt_node);
|
||||
}
|
||||
|
||||
static t_rt_node* add_non_configurable_to_route_tree(const int rr_node, const bool reached_by_non_configurable_edge, std::unordered_set<int>& visited) {
|
||||
static t_rt_node* add_non_configurable_to_route_tree(const RRNodeId& rr_node, const bool reached_by_non_configurable_edge, std::unordered_set<RRNodeId>& visited) {
|
||||
t_rt_node* rt_node = nullptr;
|
||||
|
||||
if (!visited.count(rr_node) || !reached_by_non_configurable_edge) {
|
||||
|
@ -375,7 +375,7 @@ static t_rt_node* add_non_configurable_to_route_tree(const int rr_node, const bo
|
|||
rt_node->u.child_list = nullptr;
|
||||
rt_node->inode = rr_node;
|
||||
|
||||
if (device_ctx.rr_nodes[rr_node].type() == IPIN) {
|
||||
if (device_ctx.rr_graph.node_type(rr_node) == IPIN) {
|
||||
rt_node->re_expand = false;
|
||||
} else {
|
||||
rt_node->re_expand = true;
|
||||
|
@ -385,18 +385,18 @@ static t_rt_node* add_non_configurable_to_route_tree(const int rr_node, const bo
|
|||
}
|
||||
}
|
||||
|
||||
for (int iedge : device_ctx.rr_nodes[rr_node].non_configurable_edges()) {
|
||||
for (const RREdgeId& iedge : device_ctx.rr_graph.node_non_configurable_out_edges(rr_node)) {
|
||||
//Recursive case: expand children
|
||||
VTR_ASSERT(!device_ctx.rr_nodes[rr_node].edge_is_configurable(iedge));
|
||||
VTR_ASSERT(!device_ctx.rr_graph.edge_is_configurable(iedge));
|
||||
|
||||
int to_rr_node = device_ctx.rr_nodes[rr_node].edge_sink_node(iedge);
|
||||
RRNodeId to_rr_node = device_ctx.rr_graph.edge_sink_node(iedge);
|
||||
|
||||
//Recurse
|
||||
t_rt_node* child_rt_node = add_non_configurable_to_route_tree(to_rr_node, true, visited);
|
||||
|
||||
if (!child_rt_node) continue;
|
||||
|
||||
int iswitch = device_ctx.rr_nodes[rr_node].edge_switch(iedge);
|
||||
int iswitch = (short)size_t(device_ctx.rr_graph.edge_switch(iedge));
|
||||
|
||||
//Create the edge
|
||||
t_linked_rt_edge* linked_rt_edge = alloc_linked_rt_edge();
|
||||
|
@ -428,7 +428,7 @@ void load_new_subtree_R_upstream(t_rt_node* rt_node) {
|
|||
auto& device_ctx = g_vpr_ctx.device();
|
||||
|
||||
t_rt_node* parent_rt_node = rt_node->parent_node;
|
||||
int inode = rt_node->inode;
|
||||
RRNodeId inode = rt_node->inode;
|
||||
|
||||
//Calculate upstream resistance
|
||||
float R_upstream = 0.;
|
||||
|
@ -441,7 +441,7 @@ void load_new_subtree_R_upstream(t_rt_node* rt_node) {
|
|||
}
|
||||
R_upstream += device_ctx.rr_switch_inf[iswitch].R; //Parent switch R
|
||||
}
|
||||
R_upstream += device_ctx.rr_nodes[inode].R(); //Current node R
|
||||
R_upstream += device_ctx.rr_graph.node_R(inode); //Current node R
|
||||
|
||||
rt_node->R_upstream = R_upstream;
|
||||
|
||||
|
@ -457,7 +457,7 @@ float load_new_subtree_C_downstream(t_rt_node* rt_node) {
|
|||
if (rt_node) {
|
||||
auto& device_ctx = g_vpr_ctx.device();
|
||||
|
||||
C_downstream += device_ctx.rr_nodes[rt_node->inode].C();
|
||||
C_downstream += device_ctx.rr_graph.node_C(rt_node->inode);
|
||||
for (t_linked_rt_edge* edge = rt_node->u.child_list; edge != nullptr; edge = edge->next) {
|
||||
/*Similar to net_delay.cpp, this for loop traverses a rc subtree, whose edges represent enabled switches.
|
||||
* When switches such as multiplexers and tristate buffers are enabled, their fanout
|
||||
|
@ -581,9 +581,9 @@ void load_route_tree_rr_route_inf(t_rt_node* root) {
|
|||
t_linked_rt_edge* edge{root->u.child_list};
|
||||
|
||||
for (;;) {
|
||||
int inode = root->inode;
|
||||
route_ctx.rr_node_route_inf[inode].prev_node = NO_PREVIOUS;
|
||||
route_ctx.rr_node_route_inf[inode].prev_edge = NO_PREVIOUS;
|
||||
RRNodeId inode = root->inode;
|
||||
route_ctx.rr_node_route_inf[inode].prev_node = RRNodeId::INVALID();
|
||||
route_ctx.rr_node_route_inf[inode].prev_edge = RREdgeId::INVALID();
|
||||
// path cost should be unset
|
||||
VTR_ASSERT(std::isinf(route_ctx.rr_node_route_inf[inode].path_cost));
|
||||
VTR_ASSERT(std::isinf(route_ctx.rr_node_route_inf[inode].backward_path_cost));
|
||||
|
@ -606,13 +606,13 @@ void load_route_tree_rr_route_inf(t_rt_node* root) {
|
|||
}
|
||||
|
||||
bool verify_route_tree(t_rt_node* root) {
|
||||
std::set<int> seen_nodes;
|
||||
std::set<RRNodeId> seen_nodes;
|
||||
return verify_route_tree_recurr(root, seen_nodes);
|
||||
}
|
||||
|
||||
bool verify_route_tree_recurr(t_rt_node* node, std::set<int>& seen_nodes) {
|
||||
bool verify_route_tree_recurr(t_rt_node* node, std::set<RRNodeId>& seen_nodes) {
|
||||
if (seen_nodes.count(node->inode)) {
|
||||
VPR_FATAL_ERROR(VPR_ERROR_ROUTE, "Duplicate route tree nodes found for node %d", node->inode);
|
||||
VPR_FATAL_ERROR(VPR_ERROR_ROUTE, "Duplicate route tree nodes found for node %ld", size_t(node->inode));
|
||||
}
|
||||
|
||||
seen_nodes.insert(node->inode);
|
||||
|
@ -657,7 +657,7 @@ void print_route_tree(const t_rt_node* rt_node, int depth) {
|
|||
}
|
||||
|
||||
auto& device_ctx = g_vpr_ctx.device();
|
||||
VTR_LOG("%srt_node: %d (%s)", indent.c_str(), rt_node->inode, device_ctx.rr_nodes[rt_node->inode].type_string());
|
||||
VTR_LOG("%srt_node: %ld (%s)", indent.c_str(), size_t(rt_node->inode), rr_node_typename[device_ctx.rr_graph.node_type(rt_node->inode)]);
|
||||
|
||||
if (rt_node->parent_switch != OPEN) {
|
||||
bool parent_edge_configurable = device_ctx.rr_switch_inf[rt_node->parent_switch].configurable();
|
||||
|
@ -668,7 +668,7 @@ void print_route_tree(const t_rt_node* rt_node, int depth) {
|
|||
|
||||
auto& route_ctx = g_vpr_ctx.routing();
|
||||
|
||||
if (route_ctx.rr_node_route_inf[rt_node->inode].occ() > device_ctx.rr_nodes[rt_node->inode].capacity()) {
|
||||
if (route_ctx.rr_node_route_inf[rt_node->inode].occ() > device_ctx.rr_graph.node_capacity(rt_node->inode)) {
|
||||
VTR_LOG(" x");
|
||||
}
|
||||
|
||||
|
@ -727,7 +727,7 @@ t_rt_node* traceback_to_route_tree(t_trace* head, std::vector<int>* non_config_n
|
|||
|
||||
VTR_ASSERT_DEBUG(validate_traceback(head));
|
||||
|
||||
std::map<int, t_rt_node*> rr_node_to_rt;
|
||||
std::map<RRNodeId, t_rt_node*> rr_node_to_rt;
|
||||
|
||||
t_trace* trace = head;
|
||||
while (trace) { //Each branch
|
||||
|
@ -749,14 +749,14 @@ t_rt_node* traceback_to_route_tree(t_trace* head, std::vector<int>* non_config_n
|
|||
//
|
||||
//Returns the t_trace defining the start of the next branch
|
||||
static t_trace* traceback_to_route_tree_branch(t_trace* trace,
|
||||
std::map<int, t_rt_node*>& rr_node_to_rt,
|
||||
std::map<RRNodeId, t_rt_node*>& rr_node_to_rt,
|
||||
std::vector<int>* non_config_node_set_usage) {
|
||||
t_trace* next = nullptr;
|
||||
|
||||
if (trace) {
|
||||
t_rt_node* node = nullptr;
|
||||
|
||||
int inode = trace->index;
|
||||
RRNodeId inode = trace->index;
|
||||
int iswitch = trace->iswitch;
|
||||
|
||||
auto itr = rr_node_to_rt.find(trace->index);
|
||||
|
@ -773,7 +773,7 @@ static t_trace* traceback_to_route_tree_branch(t_trace* trace,
|
|||
node->Tdel = std::numeric_limits<float>::quiet_NaN();
|
||||
|
||||
auto& device_ctx = g_vpr_ctx.device();
|
||||
auto node_type = device_ctx.rr_nodes[inode].type();
|
||||
auto node_type = device_ctx.rr_graph.node_type(inode);
|
||||
if (node_type == IPIN || node_type == SINK)
|
||||
node->re_expand = false;
|
||||
else
|
||||
|
@ -899,7 +899,7 @@ t_trace* traceback_from_route_tree(ClusterNetId inet, const t_rt_node* root, int
|
|||
|
||||
t_trace* head;
|
||||
t_trace* tail;
|
||||
std::unordered_set<int> nodes;
|
||||
std::unordered_set<RRNodeId> nodes;
|
||||
|
||||
std::tie(head, tail) = traceback_from_route_tree_recurr(nullptr, nullptr, root);
|
||||
|
||||
|
@ -912,7 +912,7 @@ t_trace* traceback_from_route_tree(ClusterNetId inet, const t_rt_node* root, int
|
|||
nodes.insert(trace->index);
|
||||
|
||||
//Sanity check that number of sinks match expected
|
||||
if (device_ctx.rr_nodes[trace->index].type() == SINK) {
|
||||
if (device_ctx.rr_graph.node_type(trace->index) == SINK) {
|
||||
num_trace_sinks += 1;
|
||||
}
|
||||
}
|
||||
|
@ -937,7 +937,7 @@ static t_rt_node* prune_route_tree_recurr(t_rt_node* node, CBRR& connections_inf
|
|||
auto& device_ctx = g_vpr_ctx.device();
|
||||
auto& route_ctx = g_vpr_ctx.routing();
|
||||
|
||||
bool congested = (route_ctx.rr_node_route_inf[node->inode].occ() > device_ctx.rr_nodes[node->inode].capacity());
|
||||
bool congested = (route_ctx.rr_node_route_inf[node->inode].occ() > device_ctx.rr_graph.node_capacity(node->inode));
|
||||
int node_set = -1;
|
||||
auto itr = device_ctx.rr_node_to_non_config_node_set.find(node->inode);
|
||||
if (itr != device_ctx.rr_node_to_non_config_node_set.end()) {
|
||||
|
@ -949,7 +949,7 @@ static t_rt_node* prune_route_tree_recurr(t_rt_node* node, CBRR& connections_inf
|
|||
force_prune = true;
|
||||
}
|
||||
|
||||
if (connections_inf.should_force_reroute_connection(node->inode)) {
|
||||
if (connections_inf.should_force_reroute_connection(size_t(node->inode))) {
|
||||
//Forcibly re-route (e.g. to improve delay)
|
||||
force_prune = true;
|
||||
}
|
||||
|
@ -995,7 +995,7 @@ static t_rt_node* prune_route_tree_recurr(t_rt_node* node, CBRR& connections_inf
|
|||
}
|
||||
}
|
||||
|
||||
if (device_ctx.rr_nodes[node->inode].type() == SINK) {
|
||||
if (device_ctx.rr_graph.node_type(node->inode) == SINK) {
|
||||
if (!force_prune) {
|
||||
//Valid path to sink
|
||||
|
||||
|
@ -1007,7 +1007,7 @@ static t_rt_node* prune_route_tree_recurr(t_rt_node* node, CBRR& connections_inf
|
|||
VTR_ASSERT(force_prune);
|
||||
|
||||
//Record as not reached
|
||||
connections_inf.toreach_rr_sink(node->inode);
|
||||
connections_inf.toreach_rr_sink(size_t(node->inode));
|
||||
|
||||
free_rt_node(node);
|
||||
return nullptr; //Pruned
|
||||
|
@ -1131,9 +1131,9 @@ t_rt_node* prune_route_tree(t_rt_node* rt_root, CBRR& connections_inf, std::vect
|
|||
auto& device_ctx = g_vpr_ctx.device();
|
||||
auto& route_ctx = g_vpr_ctx.routing();
|
||||
|
||||
VTR_ASSERT_MSG(device_ctx.rr_nodes[rt_root->inode].type() == SOURCE, "Root of route tree must be SOURCE");
|
||||
VTR_ASSERT_MSG(device_ctx.rr_graph.node_type(rt_root->inode) == SOURCE, "Root of route tree must be SOURCE");
|
||||
|
||||
VTR_ASSERT_MSG(route_ctx.rr_node_route_inf[rt_root->inode].occ() <= device_ctx.rr_nodes[rt_root->inode].capacity(),
|
||||
VTR_ASSERT_MSG(route_ctx.rr_node_route_inf[rt_root->inode].occ() <= device_ctx.rr_graph.node_capacity(rt_root->inode),
|
||||
"Route tree root/SOURCE should never be congested");
|
||||
|
||||
return prune_route_tree_recurr(rt_root, connections_inf, false, non_config_node_set_usage);
|
||||
|
@ -1206,7 +1206,7 @@ void print_edge(const t_linked_rt_edge* edge) {
|
|||
return;
|
||||
}
|
||||
while (edge) {
|
||||
VTR_LOG("%d(%d) ", edge->child->inode, edge->iswitch);
|
||||
VTR_LOG("%ld(%d) ", size_t(edge->child->inode), edge->iswitch);
|
||||
edge = edge->next;
|
||||
}
|
||||
VTR_LOG("\n");
|
||||
|
@ -1215,31 +1215,30 @@ void print_edge(const t_linked_rt_edge* edge) {
|
|||
static void print_node(const t_rt_node* rt_node) {
|
||||
auto& device_ctx = g_vpr_ctx.device();
|
||||
|
||||
int inode = rt_node->inode;
|
||||
t_rr_type node_type = device_ctx.rr_nodes[inode].type();
|
||||
RRNodeId inode = rt_node->inode;
|
||||
t_rr_type node_type = device_ctx.rr_graph.node_type(inode);
|
||||
VTR_LOG("%5.1e %5.1e %2d%6s|%-6d-> ", rt_node->C_downstream, rt_node->R_upstream,
|
||||
rt_node->re_expand, rr_node_typename[node_type], inode);
|
||||
rt_node->re_expand, rr_node_typename[node_type], size_t(inode));
|
||||
}
|
||||
|
||||
static void print_node_inf(const t_rt_node* rt_node) {
|
||||
auto& route_ctx = g_vpr_ctx.routing();
|
||||
|
||||
int inode = rt_node->inode;
|
||||
RRNodeId inode = rt_node->inode;
|
||||
const auto& node_inf = route_ctx.rr_node_route_inf[inode];
|
||||
VTR_LOG("%5.1e %5.1e%6d%3d|%-6d-> ", node_inf.path_cost, node_inf.backward_path_cost,
|
||||
node_inf.prev_node, node_inf.prev_edge, inode);
|
||||
node_inf.prev_node, node_inf.prev_edge, size_t(inode));
|
||||
}
|
||||
|
||||
static void print_node_congestion(const t_rt_node* rt_node) {
|
||||
auto& device_ctx = g_vpr_ctx.device();
|
||||
auto& route_ctx = g_vpr_ctx.routing();
|
||||
|
||||
int inode = rt_node->inode;
|
||||
RRNodeId inode = rt_node->inode;
|
||||
const auto& node_inf = route_ctx.rr_node_route_inf[inode];
|
||||
const auto& node = device_ctx.rr_nodes[inode];
|
||||
const auto& node_state = route_ctx.rr_node_route_inf[inode];
|
||||
VTR_LOG("%2d %2d|%-6d-> ", node_inf.pres_cost, rt_node->Tdel,
|
||||
node_state.occ(), node.capacity(), inode);
|
||||
node_state.occ(), device_ctx.rr_graph.node_capacity(inode), size_t(inode));
|
||||
}
|
||||
|
||||
void print_route_tree_inf(const t_rt_node* rt_root) {
|
||||
|
@ -1263,8 +1262,8 @@ bool is_equivalent_route_tree(const t_rt_node* root, const t_rt_node* root_clone
|
|||
if (!root && !root_clone) return true;
|
||||
if (!root || !root_clone) return false; // one of them is null
|
||||
if ((root->inode != root_clone->inode) || (!equal_approx(root->R_upstream, root_clone->R_upstream)) || (!equal_approx(root->C_downstream, root_clone->C_downstream)) || (!equal_approx(root->Tdel, root_clone->Tdel))) {
|
||||
VTR_LOG("mismatch i %d|%d R %e|%e C %e|%e T %e %e\n",
|
||||
root->inode, root_clone->inode,
|
||||
VTR_LOG("mismatch i %ld|%ld R %e|%e C %e|%e T %e %e\n",
|
||||
size_t(root->inode), size_t(root_clone->inode),
|
||||
root->R_upstream, root_clone->R_upstream,
|
||||
root->C_downstream, root_clone->C_downstream,
|
||||
root->Tdel, root_clone->Tdel);
|
||||
|
@ -1274,8 +1273,8 @@ bool is_equivalent_route_tree(const t_rt_node* root, const t_rt_node* root_clone
|
|||
t_linked_rt_edge* clone_edge{root_clone->u.child_list};
|
||||
while (orig_edge && clone_edge) {
|
||||
if (orig_edge->iswitch != clone_edge->iswitch)
|
||||
VTR_LOG("mismatch i %d|%d edge switch %d|%d\n",
|
||||
root->inode, root_clone->inode,
|
||||
VTR_LOG("mismatch i %ld|%ld edge switch %d|%d\n",
|
||||
size_t(root->inode), size_t(root_clone->inode),
|
||||
orig_edge->iswitch, clone_edge->iswitch);
|
||||
if (!is_equivalent_route_tree(orig_edge->child, clone_edge->child)) return false; // child trees not equivalent
|
||||
orig_edge = orig_edge->next;
|
||||
|
@ -1290,23 +1289,23 @@ bool is_equivalent_route_tree(const t_rt_node* root, const t_rt_node* root_clone
|
|||
|
||||
// check only the connections are correct, ignore R and C
|
||||
bool is_valid_skeleton_tree(const t_rt_node* root) {
|
||||
int inode = root->inode;
|
||||
RRNodeId inode = root->inode;
|
||||
t_linked_rt_edge* edge = root->u.child_list;
|
||||
while (edge) {
|
||||
if (edge->child->parent_node != root) {
|
||||
VTR_LOG("parent-child relationship not mutually acknowledged by parent %d->%d child %d<-%d\n",
|
||||
inode, edge->child->inode,
|
||||
edge->child->inode, edge->child->parent_node->inode);
|
||||
VTR_LOG("parent-child relationship not mutually acknowledged by parent %ld->%ld child %ld<-%ld\n",
|
||||
size_t(inode), size_t(edge->child->inode),
|
||||
size_t(edge->child->inode), size_t(edge->child->parent_node->inode));
|
||||
return false;
|
||||
}
|
||||
if (edge->iswitch != edge->child->parent_switch) {
|
||||
VTR_LOG("parent(%d)-child(%d) connected switch not equivalent parent %d child %d\n",
|
||||
inode, edge->child->inode, edge->iswitch, edge->child->parent_switch);
|
||||
VTR_LOG("parent(%ld)-child(%ld) connected switch not equivalent parent %d child %d\n",
|
||||
size_t(inode), size_t(edge->child->inode), edge->iswitch, edge->child->parent_switch);
|
||||
return false;
|
||||
}
|
||||
|
||||
if (!is_valid_skeleton_tree(edge->child)) {
|
||||
VTR_LOG("subtree %d invalid, propagating up\n", edge->child->inode);
|
||||
VTR_LOG("subtree %ld invalid, propagating up\n", size_t(edge->child->inode));
|
||||
return false;
|
||||
}
|
||||
edge = edge->next;
|
||||
|
@ -1324,24 +1323,24 @@ bool is_valid_route_tree(const t_rt_node* root) {
|
|||
constexpr float RES_REL_TOL = 1e-6;
|
||||
constexpr float RES_ABS_TOL = vtr::DEFAULT_ABS_TOL;
|
||||
|
||||
int inode = root->inode;
|
||||
RRNodeId inode = root->inode;
|
||||
short iswitch = root->parent_switch;
|
||||
if (root->parent_node) {
|
||||
if (device_ctx.rr_switch_inf[iswitch].buffered()) {
|
||||
float R_upstream_check = device_ctx.rr_nodes[inode].R() + device_ctx.rr_switch_inf[iswitch].R;
|
||||
float R_upstream_check = device_ctx.rr_graph.node_R(inode) + device_ctx.rr_switch_inf[iswitch].R;
|
||||
if (!vtr::isclose(root->R_upstream, R_upstream_check, RES_REL_TOL, RES_ABS_TOL)) {
|
||||
VTR_LOG("%d mismatch R upstream %e supposed %e\n", inode, root->R_upstream, R_upstream_check);
|
||||
VTR_LOG("%ld mismatch R upstream %e supposed %e\n", size_t(inode), root->R_upstream, R_upstream_check);
|
||||
return false;
|
||||
}
|
||||
} else {
|
||||
float R_upstream_check = device_ctx.rr_nodes[inode].R() + root->parent_node->R_upstream + device_ctx.rr_switch_inf[iswitch].R;
|
||||
float R_upstream_check = device_ctx.rr_graph.node_R(inode) + root->parent_node->R_upstream + device_ctx.rr_switch_inf[iswitch].R;
|
||||
if (!vtr::isclose(root->R_upstream, R_upstream_check, RES_REL_TOL, RES_ABS_TOL)) {
|
||||
VTR_LOG("%d mismatch R upstream %e supposed %e\n", inode, root->R_upstream, R_upstream_check);
|
||||
VTR_LOG("%ld mismatch R upstream %e supposed %e\n", size_t(inode), root->R_upstream, R_upstream_check);
|
||||
return false;
|
||||
}
|
||||
}
|
||||
} else if (root->R_upstream != device_ctx.rr_nodes[inode].R()) {
|
||||
VTR_LOG("%d mismatch R upstream %e supposed %e\n", inode, root->R_upstream, device_ctx.rr_nodes[inode].R());
|
||||
} else if (root->R_upstream != device_ctx.rr_graph.node_R(inode)) {
|
||||
VTR_LOG("%ld mismatch R upstream %e supposed %e\n", size_t(inode), root->R_upstream, device_ctx.rr_graph.node_R(inode));
|
||||
return false;
|
||||
}
|
||||
|
||||
|
@ -1351,22 +1350,22 @@ bool is_valid_route_tree(const t_rt_node* root) {
|
|||
// sink, must not be congested
|
||||
if (!edge) {
|
||||
int occ = route_ctx.rr_node_route_inf[inode].occ();
|
||||
int capacity = device_ctx.rr_nodes[inode].capacity();
|
||||
int capacity = device_ctx.rr_graph.node_capacity(inode);
|
||||
if (occ > capacity) {
|
||||
VTR_LOG("SINK %d occ %d > cap %d\n", inode, occ, capacity);
|
||||
VTR_LOG("SINK %ld occ %d > cap %d\n", size_t(inode), occ, capacity);
|
||||
return false;
|
||||
}
|
||||
}
|
||||
while (edge) {
|
||||
if (edge->child->parent_node != root) {
|
||||
VTR_LOG("parent-child relationship not mutually acknowledged by parent %d->%d child %d<-%d\n",
|
||||
inode, edge->child->inode,
|
||||
edge->child->inode, edge->child->parent_node->inode);
|
||||
VTR_LOG("parent-child relationship not mutually acknowledged by parent %ld->%ld child %ld<-%ld\n",
|
||||
size_t(inode), size_t(edge->child->inode),
|
||||
size_t(edge->child->inode), size_t(edge->child->parent_node->inode));
|
||||
return false;
|
||||
}
|
||||
if (edge->iswitch != edge->child->parent_switch) {
|
||||
VTR_LOG("parent(%d)-child(%d) connected switch not equivalent parent %d child %d\n",
|
||||
inode, edge->child->inode, edge->iswitch, edge->child->parent_switch);
|
||||
VTR_LOG("parent(%ld)-child(%ld) connected switch not equivalent parent %d child %d\n",
|
||||
size_t(inode), size_t(edge->child->inode), edge->iswitch, edge->child->parent_switch);
|
||||
return false;
|
||||
}
|
||||
|
||||
|
@ -1383,9 +1382,9 @@ bool is_valid_route_tree(const t_rt_node* root) {
|
|||
edge = edge->next;
|
||||
}
|
||||
|
||||
float C_downstream_check = C_downstream_children + device_ctx.rr_nodes[inode].C();
|
||||
float C_downstream_check = C_downstream_children + device_ctx.rr_graph.node_C(inode);
|
||||
if (!vtr::isclose(root->C_downstream, C_downstream_check, CAP_REL_TOL, CAP_ABS_TOL)) {
|
||||
VTR_LOG("%d mismatch C downstream %e supposed %e\n", inode, root->C_downstream, C_downstream_check);
|
||||
VTR_LOG("%ld mismatch C downstream %e supposed %e\n", size_t(inode), root->C_downstream, C_downstream_check);
|
||||
return false;
|
||||
}
|
||||
|
||||
|
@ -1397,8 +1396,8 @@ bool is_uncongested_route_tree(const t_rt_node* root) {
|
|||
auto& route_ctx = g_vpr_ctx.routing();
|
||||
auto& device_ctx = g_vpr_ctx.device();
|
||||
|
||||
int inode = root->inode;
|
||||
if (route_ctx.rr_node_route_inf[inode].occ() > device_ctx.rr_nodes[inode].capacity()) {
|
||||
RRNodeId inode = root->inode;
|
||||
if (route_ctx.rr_node_route_inf[inode].occ() > device_ctx.rr_graph.node_capacity(inode)) {
|
||||
//This node is congested
|
||||
return false;
|
||||
}
|
||||
|
@ -1428,30 +1427,28 @@ init_route_tree_to_source_no_net(const RRNodeId& inode) {
|
|||
rt_root->parent_node = nullptr;
|
||||
rt_root->parent_switch = OPEN;
|
||||
rt_root->re_expand = true;
|
||||
rt_root->inode = size_t(inode);
|
||||
|
||||
rt_root->inode_id = inode;
|
||||
rt_root->inode = inode;
|
||||
|
||||
rt_root->C_downstream = device_ctx.rr_graph.node_C(inode);
|
||||
rt_root->R_upstream = device_ctx.rr_graph.node_R(inode);
|
||||
rt_root->Tdel = 0.5 * device_ctx.rr_graph.node_R(inode) * device_ctx.rr_graph.node_C(inode);
|
||||
rr_node_to_rt_node[size_t(inode)] = rt_root;
|
||||
rr_node_to_rt_node[inode] = rt_root;
|
||||
|
||||
return (rt_root);
|
||||
}
|
||||
|
||||
bool verify_traceback_route_tree_equivalent(const t_trace* head, const t_rt_node* rt_root) {
|
||||
//Walk the route tree saving all the used connections
|
||||
std::set<std::tuple<int, int, int>> route_tree_connections;
|
||||
std::set<std::tuple<RRNodeId, int, RRNodeId>> route_tree_connections;
|
||||
collect_route_tree_connections(rt_root, route_tree_connections);
|
||||
|
||||
//Remove the extra parent connection to root (not included in traceback)
|
||||
route_tree_connections.erase(std::make_tuple(OPEN, OPEN, rt_root->inode));
|
||||
route_tree_connections.erase(std::make_tuple(RRNodeId::INVALID(), OPEN, rt_root->inode));
|
||||
|
||||
//Walk the traceback and verify that every connection exists in the route tree set
|
||||
int prev_node = OPEN;
|
||||
RRNodeId prev_node = RRNodeId::INVALID();
|
||||
int prev_switch = OPEN;
|
||||
int to_node = OPEN;
|
||||
RRNodeId to_node = RRNodeId::INVALID();
|
||||
for (const t_trace* trace = head; trace != nullptr; trace = trace->next) {
|
||||
to_node = trace->index;
|
||||
|
||||
|
@ -1474,7 +1471,7 @@ bool verify_traceback_route_tree_equivalent(const t_trace* head, const t_rt_node
|
|||
std::string msg = "Found route tree connection(s) not in traceback:\n";
|
||||
for (auto conn : route_tree_connections) {
|
||||
std::tie(prev_node, prev_switch, to_node) = conn;
|
||||
msg += vtr::string_fmt("\tnode %d -> %d (switch %d)\n", prev_node, to_node, prev_switch);
|
||||
msg += vtr::string_fmt("\tnode %ld -> %ld (switch %d)\n", size_t(prev_node), size_t(to_node), prev_switch);
|
||||
}
|
||||
|
||||
VPR_FATAL_ERROR(VPR_ERROR_ROUTE, msg.c_str());
|
||||
|
@ -1483,12 +1480,12 @@ bool verify_traceback_route_tree_equivalent(const t_trace* head, const t_rt_node
|
|||
return true;
|
||||
}
|
||||
|
||||
void collect_route_tree_connections(const t_rt_node* node, std::set<std::tuple<int, int, int>>& connections) {
|
||||
void collect_route_tree_connections(const t_rt_node* node, std::set<std::tuple<RRNodeId, int, RRNodeId>>& connections) {
|
||||
if (node) {
|
||||
//Record reaching connection
|
||||
int prev_node = OPEN;
|
||||
RRNodeId prev_node = RRNodeId::INVALID();
|
||||
int prev_switch = OPEN;
|
||||
int to_node = node->inode;
|
||||
RRNodeId to_node = node->inode;
|
||||
if (node->parent_node) {
|
||||
prev_node = node->parent_node->inode;
|
||||
prev_switch = node->parent_switch;
|
||||
|
@ -1517,13 +1514,13 @@ t_rt_node* find_sink_rt_node(t_rt_node* rt_root, ClusterNetId net_id, ClusterPin
|
|||
auto& route_ctx = g_vpr_ctx.routing();
|
||||
|
||||
int ipin = cluster_ctx.clb_nlist.pin_net_index(sink_pin);
|
||||
int sink_rr_inode = route_ctx.net_rr_terminals[net_id][ipin]; //obtain the value of the routing resource sink
|
||||
RRNodeId sink_rr_inode = route_ctx.net_rr_terminals[net_id][ipin]; //obtain the value of the routing resource sink
|
||||
|
||||
t_rt_node* sink_rt_node = find_sink_rt_node_recurr(rt_root, sink_rr_inode); //find pointer to route tree node corresponding to sink_rr_inode
|
||||
VTR_ASSERT(sink_rt_node);
|
||||
return sink_rt_node;
|
||||
}
|
||||
t_rt_node* find_sink_rt_node_recurr(t_rt_node* node, int sink_rr_inode) {
|
||||
t_rt_node* find_sink_rt_node_recurr(t_rt_node* node, const RRNodeId& sink_rr_inode) {
|
||||
if (node->inode == sink_rr_inode) { //check if current node matches sink_rr_inode
|
||||
return node;
|
||||
}
|
||||
|
|
|
@ -39,7 +39,7 @@ bool verify_route_tree(t_rt_node* root);
|
|||
bool verify_traceback_route_tree_equivalent(const t_trace* trace_head, const t_rt_node* rt_root);
|
||||
|
||||
t_rt_node* find_sink_rt_node(t_rt_node* rt_root, ClusterNetId net_id, ClusterPinId sink_pin);
|
||||
t_rt_node* find_sink_rt_node_recurr(t_rt_node* node, int sink_inode);
|
||||
t_rt_node* find_sink_rt_node_recurr(t_rt_node* node, const RRNodeId& sink_inode);
|
||||
|
||||
/********** Incremental reroute ***********/
|
||||
// instead of ripping up a net that has some congestion, cut the branches
|
||||
|
|
Loading…
Reference in New Issue