deploying RRGraph to placer
This commit is contained in:
parent
35c136d8a4
commit
9d9a2c1402
|
@ -160,7 +160,7 @@ struct DeviceContext : public Context {
|
|||
std::vector<std::vector<int>> rr_non_config_node_sets;
|
||||
|
||||
//Reverse look-up from RR node to non-configurably connected node set (index into rr_nonconf_node_sets)
|
||||
std::unordered_map<int, int> rr_node_to_non_config_node_set;
|
||||
std::unordered_map<RRNodeId, RRNodeId> rr_node_to_non_config_node_set;
|
||||
|
||||
//The indicies of rr nodes of a given type at a specific x,y grid location
|
||||
t_rr_node_indices rr_node_indices; //[0..NUM_RR_TYPES-1][0..grid.width()-1][0..grid.width()-1][0..size-1]
|
||||
|
@ -290,7 +290,7 @@ struct RoutingContext : public Context {
|
|||
|
||||
vtr::vector<ClusterBlockId, std::vector<int>> rr_blk_source; /* [0..num_blocks-1][0..num_class-1] */
|
||||
|
||||
std::vector<t_rr_node_route_inf> rr_node_route_inf; /* [0..device_ctx.num_rr_nodes-1] */
|
||||
vtr::vector<RRNodeId, t_rr_node_route_inf> rr_node_route_inf; /* [0..device_ctx.num_rr_nodes-1] */
|
||||
|
||||
//Information about current routing status of each net
|
||||
vtr::vector<ClusterNetId, t_net_routing_status> net_status; //[0..cluster_ctx.clb_nlist.nets().size()-1]
|
||||
|
|
|
@ -1164,11 +1164,9 @@ struct t_trace {
|
|||
* Number of times this node must be reached to fully route. *
|
||||
* occ: The current occupancy of the associated rr node */
|
||||
struct t_rr_node_route_inf {
|
||||
int prev_node;
|
||||
/* Xifan Tang - prev_node for RRGraph object */
|
||||
RRNodeId prev_node_id;
|
||||
|
||||
t_edge_size prev_edge;
|
||||
RRNodeId prev_node;
|
||||
RREdgeId prev_edge;
|
||||
|
||||
float pres_cost;
|
||||
float acc_cost;
|
||||
|
|
|
@ -322,7 +322,7 @@ static float route_connection_delay(
|
|||
|
||||
{
|
||||
successfully_routed = route_profiler.calculate_delay(
|
||||
size_t(source_rr_node), size_t(sink_rr_node),
|
||||
source_rr_node, sink_rr_node,
|
||||
router_opts,
|
||||
&net_delay_value);
|
||||
}
|
||||
|
|
|
@ -706,26 +706,26 @@ static std::pair<t_trace*, t_trace*> add_trace_non_configurable_recurr(int node,
|
|||
|
||||
/* The routine sets the path_cost to HUGE_POSITIVE_FLOAT for *
|
||||
* all channel segments touched by previous routing phases. */
|
||||
void reset_path_costs(const std::vector<int>& visited_rr_nodes) {
|
||||
void reset_path_costs(const std::vector<RRNodeId>& visited_rr_nodes) {
|
||||
auto& route_ctx = g_vpr_ctx.mutable_routing();
|
||||
|
||||
for (auto node : visited_rr_nodes) {
|
||||
route_ctx.rr_node_route_inf[node].path_cost = std::numeric_limits<float>::infinity();
|
||||
route_ctx.rr_node_route_inf[node].backward_path_cost = std::numeric_limits<float>::infinity();
|
||||
route_ctx.rr_node_route_inf[node].prev_node = NO_PREVIOUS;
|
||||
route_ctx.rr_node_route_inf[node].prev_edge = NO_PREVIOUS;
|
||||
route_ctx.rr_node_route_inf[node].prev_node = RRNodeId::INVALID();
|
||||
route_ctx.rr_node_route_inf[node].prev_edge = RREdgeId::INVALID();
|
||||
}
|
||||
}
|
||||
|
||||
/* Returns the *congestion* cost of using this rr_node. */
|
||||
float get_rr_cong_cost(int inode) {
|
||||
float get_rr_cong_cost(const RRNodeId& inode) {
|
||||
auto& device_ctx = g_vpr_ctx.device();
|
||||
|
||||
float cost = get_single_rr_cong_cost(inode);
|
||||
|
||||
auto itr = device_ctx.rr_node_to_non_config_node_set.find(inode);
|
||||
if (itr != device_ctx.rr_node_to_non_config_node_set.end()) {
|
||||
for (int node : device_ctx.rr_non_config_node_sets[itr->second]) {
|
||||
for (const RRNodeId& node : device_ctx.rr_non_config_node_sets[itr->second]) {
|
||||
if (node == inode) {
|
||||
continue; //Already included above
|
||||
}
|
||||
|
@ -738,11 +738,11 @@ float get_rr_cong_cost(int inode) {
|
|||
|
||||
/* Returns the congestion cost of using this rr_node, *ignoring*
|
||||
* non-configurable edges */
|
||||
static float get_single_rr_cong_cost(int inode) {
|
||||
static float get_single_rr_cong_cost(const RRNodeId& inode) {
|
||||
auto& device_ctx = g_vpr_ctx.device();
|
||||
auto& route_ctx = g_vpr_ctx.routing();
|
||||
|
||||
auto cost_index = device_ctx.rr_nodes[inode].cost_index();
|
||||
auto cost_index = device_ctx.rr_graph.node_cost_index(inode);
|
||||
float cost = device_ctx.rr_indexed_data[cost_index].base_cost
|
||||
* route_ctx.rr_node_route_inf[inode].acc_cost
|
||||
* route_ctx.rr_node_route_inf[inode].pres_cost;
|
||||
|
@ -1265,17 +1265,17 @@ void push_back(t_heap* const hptr) {
|
|||
++heap_tail;
|
||||
}
|
||||
|
||||
void push_back_node(int inode, float total_cost, int prev_node, int prev_edge, float backward_path_cost, float R_upstream) {
|
||||
void push_back_node(const RRNodeId& inode, float total_cost, const RRNodeId& prev_node, const RREdgeId& prev_edge, float backward_path_cost, float R_upstream) {
|
||||
/* Puts an rr_node on the heap with the same condition as node_to_heap,
|
||||
* but do not fix heap property yet as that is more efficiently done from
|
||||
* bottom up with build_heap */
|
||||
|
||||
auto& route_ctx = g_vpr_ctx.routing();
|
||||
if (total_cost >= route_ctx.rr_node_route_inf[inode].path_cost)
|
||||
if (total_cost >= route_ctx.rr_node_route_inf[size_t(inode)].path_cost)
|
||||
return;
|
||||
|
||||
t_heap* hptr = alloc_heap_data();
|
||||
hptr->index = inode;
|
||||
hptr->index = size_t(inode);
|
||||
hptr->cost = total_cost;
|
||||
hptr->u.prev.node = prev_node;
|
||||
hptr->u.prev.edge = prev_edge;
|
||||
|
|
|
@ -3,6 +3,7 @@
|
|||
#include <vector>
|
||||
#include "clustered_netlist.h"
|
||||
#include "vtr_vector.h"
|
||||
#include "rr_graph_obj.h"
|
||||
|
||||
/* Used by the heap as its fundamental data structure.
|
||||
* Each heap element represents a partial route.
|
||||
|
@ -37,11 +38,11 @@ struct t_heap {
|
|||
float backward_path_cost = 0.;
|
||||
float R_upstream = 0.;
|
||||
|
||||
int index = OPEN;
|
||||
RRNodeId index = RRNodeId::INVALID();
|
||||
|
||||
struct t_prev {
|
||||
int node;
|
||||
int edge;
|
||||
RRNodeId& node;
|
||||
RREdgeId& edge;
|
||||
};
|
||||
|
||||
union {
|
||||
|
@ -67,7 +68,7 @@ t_trace* update_traceback(t_heap* hptr, ClusterNetId net_id);
|
|||
|
||||
void reset_path_costs(const std::vector<int>& visited_rr_nodes);
|
||||
|
||||
float get_rr_cong_cost(int inode);
|
||||
float get_rr_cong_cost(const RRNodeId& inode);
|
||||
|
||||
void mark_ends(ClusterNetId net_id);
|
||||
void mark_remaining_ends(const std::vector<int>& remaining_sinks);
|
||||
|
@ -89,7 +90,7 @@ void build_heap();
|
|||
void sift_down(size_t hole);
|
||||
void sift_up(size_t tail, t_heap* const hptr);
|
||||
void push_back(t_heap* const hptr);
|
||||
void push_back_node(int inode, float total_cost, int prev_node, int prev_edge, float backward_path_cost, float R_upstream);
|
||||
void push_back_node(const RRNodeId& inode, float total_cost, const RRNodeId& prev_node, const RREdgeId& prev_edge, float backward_path_cost, float R_upstream);
|
||||
bool is_valid();
|
||||
void pop_heap();
|
||||
void print_heap();
|
||||
|
|
|
@ -166,11 +166,11 @@ static t_heap* timing_driven_route_connection_from_route_tree_high_fanout(t_rt_n
|
|||
std::vector<int>& modified_rr_node_inf,
|
||||
RouterStats& router_stats);
|
||||
|
||||
static t_heap* timing_driven_route_connection_from_heap(int sink_node,
|
||||
static t_heap* timing_driven_route_connection_from_heap(const RRNodeId& sink_node,
|
||||
const t_conn_cost_params cost_params,
|
||||
t_bb bounding_box,
|
||||
const RouterLookahead& router_lookahead,
|
||||
std::vector<int>& modified_rr_node_inf,
|
||||
std::vector<RRNodeId>& modified_rr_node_inf,
|
||||
RouterStats& router_stats);
|
||||
|
||||
static std::vector<t_heap> timing_driven_find_all_shortest_paths_from_heap(const t_conn_cost_params cost_params,
|
||||
|
@ -180,17 +180,17 @@ static std::vector<t_heap> timing_driven_find_all_shortest_paths_from_heap(const
|
|||
|
||||
void disable_expansion_and_remove_sink_from_route_tree_nodes(t_rt_node* node);
|
||||
static void timing_driven_expand_cheapest(t_heap* cheapest,
|
||||
int target_node,
|
||||
const RRNodeId& target_node,
|
||||
const t_conn_cost_params cost_params,
|
||||
t_bb bounding_box,
|
||||
const RouterLookahead& router_lookahead,
|
||||
std::vector<int>& modified_rr_node_inf,
|
||||
std::vector<RRNodeId>& modified_rr_node_inf,
|
||||
RouterStats& router_stats);
|
||||
|
||||
static t_rt_node* setup_routing_resources(int itry, ClusterNetId net_id, unsigned num_sinks, float pres_fac, int min_incremental_reroute_fanout, CBRR& incremental_rerouting_res, t_rt_node** rt_node_of_sink);
|
||||
|
||||
static void add_route_tree_to_heap(t_rt_node* rt_node,
|
||||
int target_node,
|
||||
const RRNodeId& target_node,
|
||||
const t_conn_cost_params cost_params,
|
||||
const RouterLookahead& router_lookahead,
|
||||
RouterStats& router_stats);
|
||||
|
@ -206,7 +206,7 @@ static t_bb add_high_fanout_route_tree_to_heap(t_rt_node* rt_root,
|
|||
static t_bb adjust_highfanout_bounding_box(t_bb highfanout_bb);
|
||||
|
||||
static void add_route_tree_node_to_heap(t_rt_node* rt_node,
|
||||
int target_node,
|
||||
const RRNodeId& target_node,
|
||||
const t_conn_cost_params cost_params,
|
||||
const RouterLookahead& router_lookahead,
|
||||
RouterStats& router_stats);
|
||||
|
@ -215,36 +215,36 @@ static void timing_driven_expand_neighbours(t_heap* current,
|
|||
const t_conn_cost_params cost_params,
|
||||
t_bb bounding_box,
|
||||
const RouterLookahead& router_lookahead,
|
||||
int target_node,
|
||||
const RRNodeId& target_node,
|
||||
RouterStats& router_stats);
|
||||
|
||||
static void timing_driven_expand_neighbour(t_heap* current,
|
||||
const int from_node,
|
||||
const t_edge_size from_edge,
|
||||
const int to_node,
|
||||
const RRNodeId& from_node,
|
||||
const RREdgeId& from_edge,
|
||||
const RRNodeId& to_node,
|
||||
const t_conn_cost_params cost_params,
|
||||
const t_bb bounding_box,
|
||||
const RouterLookahead& router_lookahead,
|
||||
int target_node,
|
||||
const RRNodeId& target_node,
|
||||
const t_bb target_bb,
|
||||
RouterStats& router_stats);
|
||||
|
||||
static void timing_driven_add_to_heap(const t_conn_cost_params cost_params,
|
||||
const RouterLookahead& router_lookahead,
|
||||
const t_heap* current,
|
||||
const int from_node,
|
||||
const int to_node,
|
||||
const int iconn,
|
||||
const int target_node,
|
||||
const RRNodeId& from_node,
|
||||
const RRNodeId& to_node,
|
||||
const RREdgeId& iconn,
|
||||
const RRNodeId& target_node,
|
||||
RouterStats& router_stats);
|
||||
|
||||
static void timing_driven_expand_node(const t_conn_cost_params cost_params,
|
||||
const RouterLookahead& router_lookahead,
|
||||
t_heap* current,
|
||||
const int from_node,
|
||||
const int to_node,
|
||||
const int iconn,
|
||||
const int target_node);
|
||||
const RRNodeId& from_node,
|
||||
const RRNodeId& to_node,
|
||||
const RREdgeId& iconn,
|
||||
const RRNodeId& target_node);
|
||||
|
||||
static void evaluate_timing_driven_node_costs(t_heap* from,
|
||||
const t_conn_cost_params cost_params,
|
||||
|
@ -1296,18 +1296,18 @@ static bool timing_driven_route_sink(ClusterNetId net_id,
|
|||
//
|
||||
//Returns either the last element of the path, or nullptr if no path is found
|
||||
t_heap* timing_driven_route_connection_from_route_tree(t_rt_node* rt_root,
|
||||
int sink_node,
|
||||
const RRNodeId& sink_node,
|
||||
const t_conn_cost_params cost_params,
|
||||
t_bb bounding_box,
|
||||
const RouterLookahead& router_lookahead,
|
||||
std::vector<int>& modified_rr_node_inf,
|
||||
std::vector<RRNodeId>& modified_rr_node_inf,
|
||||
RouterStats& router_stats) {
|
||||
// re-explore route tree from root to add any new nodes (buildheap afterwards)
|
||||
// route tree needs to be repushed onto the heap since each node's cost is target specific
|
||||
add_route_tree_to_heap(rt_root, sink_node, cost_params, router_lookahead, router_stats);
|
||||
heap_::build_heap(); // via sifting down everything
|
||||
|
||||
int source_node = rt_root->inode;
|
||||
RRNodeId source_node = rt_root->inode;
|
||||
|
||||
if (is_empty_heap()) {
|
||||
VTR_LOG("No source in route tree: %s\n", describe_unrouteable_connection(source_node, sink_node).c_str());
|
||||
|
@ -1454,11 +1454,11 @@ static t_heap* timing_driven_route_connection_from_route_tree_high_fanout(t_rt_n
|
|||
//This is the core maze routing routine.
|
||||
//
|
||||
//Returns either the last element of the path, or nullptr if no path is found
|
||||
static t_heap* timing_driven_route_connection_from_heap(int sink_node,
|
||||
static t_heap* timing_driven_route_connection_from_heap(const RRNodeId& sink_node,
|
||||
const t_conn_cost_params cost_params,
|
||||
t_bb bounding_box,
|
||||
const RouterLookahead& router_lookahead,
|
||||
std::vector<int>& modified_rr_node_inf,
|
||||
std::vector<RRNodeId>& modified_rr_node_inf,
|
||||
RouterStats& router_stats) {
|
||||
VTR_ASSERT_SAFE(heap_::is_valid());
|
||||
|
||||
|
@ -1472,13 +1472,13 @@ static t_heap* timing_driven_route_connection_from_heap(int sink_node,
|
|||
cheapest = get_heap_head();
|
||||
++router_stats.heap_pops;
|
||||
|
||||
int inode = cheapest->index;
|
||||
RRNodeId inode = cheapest->index;
|
||||
VTR_LOGV_DEBUG(f_router_debug, " Popping node %d (cost: %g)\n",
|
||||
inode, cheapest->cost);
|
||||
size_t(inode), cheapest->cost);
|
||||
|
||||
//Have we found the target?
|
||||
if (inode == sink_node) {
|
||||
VTR_LOGV_DEBUG(f_router_debug, " Found target %8d (%s)\n", inode, describe_rr_node(inode).c_str());
|
||||
VTR_LOGV_DEBUG(f_router_debug, " Found target %8d (%s)\n", size_t(inode), describe_rr_node(inode).c_str());
|
||||
break;
|
||||
}
|
||||
|
||||
|
@ -1584,15 +1584,15 @@ static std::vector<t_heap> timing_driven_find_all_shortest_paths_from_heap(const
|
|||
}
|
||||
|
||||
static void timing_driven_expand_cheapest(t_heap* cheapest,
|
||||
int target_node,
|
||||
const RRNodeId& target_node,
|
||||
const t_conn_cost_params cost_params,
|
||||
t_bb bounding_box,
|
||||
const RouterLookahead& router_lookahead,
|
||||
std::vector<int>& modified_rr_node_inf,
|
||||
std::vector<RRNodeId>& modified_rr_node_inf,
|
||||
RouterStats& router_stats) {
|
||||
auto& route_ctx = g_vpr_ctx.mutable_routing();
|
||||
|
||||
int inode = cheapest->index;
|
||||
RRNodeId inode = cheapest->index;
|
||||
|
||||
float best_total_cost = route_ctx.rr_node_route_inf[inode].path_cost;
|
||||
float best_back_cost = route_ctx.rr_node_route_inf[inode].backward_path_cost;
|
||||
|
@ -1611,7 +1611,7 @@ static void timing_driven_expand_cheapest(t_heap* cheapest,
|
|||
if (best_total_cost > new_total_cost && best_back_cost > new_back_cost) {
|
||||
//Explore from this node, since the current/new partial path has the best cost
|
||||
//found so far
|
||||
VTR_LOGV_DEBUG(f_router_debug, " Better cost to %d\n", inode);
|
||||
VTR_LOGV_DEBUG(f_router_debug, " Better cost to %d\n", size_t(inode));
|
||||
VTR_LOGV_DEBUG(f_router_debug, " New total cost: %g\n", new_total_cost);
|
||||
VTR_LOGV_DEBUG(f_router_debug, " New back cost: %g\n", new_back_cost);
|
||||
VTR_LOGV_DEBUG(f_router_debug, " Setting path costs for assicated node %d (from %d edge %d)\n", cheapest->index, cheapest->u.prev.node, cheapest->u.prev.edge);
|
||||
|
@ -1630,7 +1630,7 @@ static void timing_driven_expand_cheapest(t_heap* cheapest,
|
|||
} else {
|
||||
//Post-heap prune, do not re-explore from the current/new partial path as it
|
||||
//has worse cost than the best partial path to this node found so far
|
||||
VTR_LOGV_DEBUG(f_router_debug, " Worse cost to %d\n", inode);
|
||||
VTR_LOGV_DEBUG(f_router_debug, " Worse cost to %d\n", size_t(inode));
|
||||
VTR_LOGV_DEBUG(f_router_debug, " Old total cost: %g\n", best_total_cost);
|
||||
VTR_LOGV_DEBUG(f_router_debug, " Old back cost: %g\n", best_back_cost);
|
||||
VTR_LOGV_DEBUG(f_router_debug, " New total cost: %g\n", new_total_cost);
|
||||
|
@ -1797,7 +1797,7 @@ void disable_expansion_and_remove_sink_from_route_tree_nodes(t_rt_node* rt_node)
|
|||
}
|
||||
|
||||
static void add_route_tree_to_heap(t_rt_node* rt_node,
|
||||
int target_node,
|
||||
const RRNodeId& target_node,
|
||||
const t_conn_cost_params cost_params,
|
||||
const RouterLookahead& router_lookahead,
|
||||
RouterStats& router_stats) {
|
||||
|
@ -1929,11 +1929,11 @@ static t_bb adjust_highfanout_bounding_box(t_bb highfanout_bb) {
|
|||
//Note that if you want to respect rt_node->re_expand that is the caller's
|
||||
//responsibility.
|
||||
static void add_route_tree_node_to_heap(t_rt_node* rt_node,
|
||||
int target_node,
|
||||
const RRNodeId& target_node,
|
||||
const t_conn_cost_params cost_params,
|
||||
const RouterLookahead& router_lookahead,
|
||||
RouterStats& router_stats) {
|
||||
int inode = rt_node->inode;
|
||||
const RRNodeId& inode = rt_node->inode_id;
|
||||
float backward_path_cost = cost_params.criticality * rt_node->Tdel;
|
||||
|
||||
float R_upstream = rt_node->R_upstream;
|
||||
|
@ -1955,7 +1955,7 @@ static void add_route_tree_node_to_heap(t_rt_node* rt_node,
|
|||
|
||||
VTR_LOGV_DEBUG(f_router_debug, " Adding node %8d to heap from init route tree with cost %g (%s)\n", inode, tot_cost, describe_rr_node(inode).c_str());
|
||||
|
||||
heap_::push_back_node(inode, tot_cost, NO_PREVIOUS, NO_PREVIOUS,
|
||||
heap_::push_back_node(inode, tot_cost, RRNodeId::INVALID(), RREdgeId::INVALID(),
|
||||
backward_path_cost, R_upstream);
|
||||
|
||||
++router_stats.heap_pushes;
|
||||
|
@ -1965,7 +1965,7 @@ static void timing_driven_expand_neighbours(t_heap* current,
|
|||
const t_conn_cost_params cost_params,
|
||||
t_bb bounding_box,
|
||||
const RouterLookahead& router_lookahead,
|
||||
int target_node,
|
||||
const RRNodeId& target_node,
|
||||
RouterStats& router_stats) {
|
||||
/* Puts all the rr_nodes adjacent to current on the heap.
|
||||
*/
|
||||
|
@ -1974,18 +1974,17 @@ static void timing_driven_expand_neighbours(t_heap* current,
|
|||
|
||||
t_bb target_bb;
|
||||
if (target_node != OPEN) {
|
||||
target_bb.xmin = device_ctx.rr_nodes[target_node].xlow();
|
||||
target_bb.ymin = device_ctx.rr_nodes[target_node].ylow();
|
||||
target_bb.xmax = device_ctx.rr_nodes[target_node].xhigh();
|
||||
target_bb.ymax = device_ctx.rr_nodes[target_node].yhigh();
|
||||
target_bb.xmin = device_ctx.rr_graph.node_xlow(target_node);
|
||||
target_bb.ymin = device_ctx.rr_graph.node_ylow(target_node);
|
||||
target_bb.xmax = device_ctx.rr_graph.node_xhigh(target_node);
|
||||
target_bb.ymax = device_ctx.rr_graph.node_yhigh(target_node);
|
||||
}
|
||||
|
||||
//For each node associated with the current heap element, expand all of it's neighbours
|
||||
int num_edges = device_ctx.rr_nodes[current->index].num_edges();
|
||||
for (int iconn = 0; iconn < num_edges; iconn++) {
|
||||
int to_node = device_ctx.rr_nodes[current->index].edge_sink_node(iconn);
|
||||
for (const RREdgeId& edge : device_ctx.rr_graph.node_out_edges(current->index)) {
|
||||
const RRNodeId& to_node = device_ctx.rr_graph.edge_sink_node[edge];
|
||||
timing_driven_expand_neighbour(current,
|
||||
current->index, iconn, to_node,
|
||||
current->index, edge, to_node,
|
||||
cost_params,
|
||||
bounding_box,
|
||||
router_lookahead,
|
||||
|
@ -1999,31 +1998,31 @@ static void timing_driven_expand_neighbours(t_heap* current,
|
|||
//RR nodes outside the expanded bounding box specified in bounding_box are not added
|
||||
//to the heap.
|
||||
static void timing_driven_expand_neighbour(t_heap* current,
|
||||
const int from_node,
|
||||
const t_edge_size from_edge,
|
||||
const int to_node,
|
||||
const RRNodeId& from_node,
|
||||
const RREdgeId& from_edge,
|
||||
const RRNodeId& to_node,
|
||||
const t_conn_cost_params cost_params,
|
||||
const t_bb bounding_box,
|
||||
const RouterLookahead& router_lookahead,
|
||||
int target_node,
|
||||
const RRNodeId& target_node,
|
||||
const t_bb target_bb,
|
||||
RouterStats& router_stats) {
|
||||
auto& device_ctx = g_vpr_ctx.device();
|
||||
|
||||
int to_xlow = device_ctx.rr_nodes[to_node].xlow();
|
||||
int to_ylow = device_ctx.rr_nodes[to_node].ylow();
|
||||
int to_xhigh = device_ctx.rr_nodes[to_node].xhigh();
|
||||
int to_yhigh = device_ctx.rr_nodes[to_node].yhigh();
|
||||
int to_xlow = device_ctx.rr_graph.node_xlow(to_node);
|
||||
int to_ylow = device_ctx.rr_graph.node_ylow(to_node);
|
||||
int to_xhigh = device_ctx.rr_graph.node_xhigh(to_node);
|
||||
int to_yhigh = device_ctx.rr_graph.node_yhigh(to_node);
|
||||
|
||||
if (to_xhigh < bounding_box.xmin //Strictly left of BB left-edge
|
||||
|| to_xlow > bounding_box.xmax //Strictly right of BB right-edge
|
||||
|| to_yhigh < bounding_box.ymin //Strictly below BB bottom-edge
|
||||
|| to_ylow > bounding_box.ymax) { //Strictly above BB top-edge
|
||||
VTR_LOGV_DEBUG(f_router_debug,
|
||||
" Pruned expansion of node %d edge %d -> %d"
|
||||
" Pruned expansion of node %ld edge %ld -> %ld"
|
||||
" (to node location %d,%dx%d,%d outside of expanded"
|
||||
" net bounding box %d,%dx%d,%d)\n",
|
||||
from_node, from_edge, to_node,
|
||||
size_t(from_node), size_t(from_edge), size_t(to_node),
|
||||
to_xlow, to_ylow, to_xhigh, to_yhigh,
|
||||
bounding_box.xmin, bounding_box.ymin, bounding_box.xmax, bounding_box.ymax);
|
||||
return; /* Node is outside (expanded) bounding box. */
|
||||
|
@ -2034,7 +2033,7 @@ static void timing_driven_expand_neighbour(t_heap* current,
|
|||
* more promising routes, but makes route-throughs (via CLBs) impossible. *
|
||||
* Change this if you want to investigate route-throughs. */
|
||||
if (target_node != OPEN) {
|
||||
t_rr_type to_type = device_ctx.rr_nodes[to_node].type();
|
||||
t_rr_type to_type = device_ctx.rr_graph.node_type(to_node);
|
||||
if (to_type == IPIN) {
|
||||
//Check if this IPIN leads to the target block
|
||||
// IPIN's of the target block should be contained within it's bounding box
|
||||
|
@ -2043,10 +2042,10 @@ static void timing_driven_expand_neighbour(t_heap* current,
|
|||
|| to_xhigh > target_bb.xmax
|
||||
|| to_yhigh > target_bb.ymax) {
|
||||
VTR_LOGV_DEBUG(f_router_debug,
|
||||
" Pruned expansion of node %d edge %d -> %d"
|
||||
" Pruned expansion of node %ld edge %ld -> %ld"
|
||||
" (to node is IPIN at %d,%dx%d,%d which does not"
|
||||
" lead to target block %d,%dx%d,%d)\n",
|
||||
from_node, from_edge, to_node,
|
||||
size_t(from_node), size_t(from_edge), size_t(to_node),
|
||||
to_xlow, to_ylow, to_xhigh, to_yhigh,
|
||||
target_bb.xmin, target_bb.ymin, target_bb.xmax, target_bb.ymax);
|
||||
return;
|
||||
|
@ -2054,8 +2053,8 @@ static void timing_driven_expand_neighbour(t_heap* current,
|
|||
}
|
||||
}
|
||||
|
||||
VTR_LOGV_DEBUG(f_router_debug, " Expanding node %d edge %d -> %d\n",
|
||||
from_node, from_edge, to_node);
|
||||
VTR_LOGV_DEBUG(f_router_debug, " Expanding node %ld edge %ld -> %ld\n",
|
||||
size_t(from_node), size_t(from_edge), size_t(to_node));
|
||||
|
||||
timing_driven_add_to_heap(cost_params,
|
||||
router_lookahead,
|
||||
|
@ -2066,10 +2065,10 @@ static void timing_driven_expand_neighbour(t_heap* current,
|
|||
static void timing_driven_add_to_heap(const t_conn_cost_params cost_params,
|
||||
const RouterLookahead& router_lookahead,
|
||||
const t_heap* current,
|
||||
const int from_node,
|
||||
const int to_node,
|
||||
const int iconn,
|
||||
const int target_node,
|
||||
const RRNodeId& from_node,
|
||||
const RRNodeId& to_node,
|
||||
const RREdgeId& iconn,
|
||||
const RRNodeId& target_node,
|
||||
RouterStats& router_stats) {
|
||||
t_heap* next = alloc_heap_data();
|
||||
next->index = to_node;
|
||||
|
@ -2085,8 +2084,8 @@ static void timing_driven_add_to_heap(const t_conn_cost_params cost_params,
|
|||
|
||||
auto& route_ctx = g_vpr_ctx.routing();
|
||||
|
||||
float best_total_cost = route_ctx.rr_node_route_inf[to_node].path_cost;
|
||||
float best_back_cost = route_ctx.rr_node_route_inf[to_node].backward_path_cost;
|
||||
float best_total_cost = route_ctx.rr_node_route_inf[size_t(to_node)].path_cost;
|
||||
float best_back_cost = route_ctx.rr_node_route_inf[size_t(to_node)].backward_path_cost;
|
||||
|
||||
float new_total_cost = next->cost;
|
||||
float new_back_cost = next->backward_path_cost;
|
||||
|
@ -2110,11 +2109,11 @@ static void timing_driven_add_to_heap(const t_conn_cost_params cost_params,
|
|||
static void timing_driven_expand_node(const t_conn_cost_params cost_params,
|
||||
const RouterLookahead& router_lookahead,
|
||||
t_heap* current,
|
||||
const int from_node,
|
||||
const int to_node,
|
||||
const int iconn,
|
||||
const int target_node) {
|
||||
VTR_LOGV_DEBUG(f_router_debug, " Expanding to node %d (%s)\n", to_node, describe_rr_node(to_node).c_str());
|
||||
const RRNodeId& from_node,
|
||||
const RRNodeId& to_node,
|
||||
const RREdgeId& iconn,
|
||||
const RRNodeId& target_node) {
|
||||
VTR_LOGV_DEBUG(f_router_debug, " Expanding to node %ld (%s)\n", size_t(to_node), describe_rr_node(to_node).c_str());
|
||||
|
||||
evaluate_timing_driven_node_costs(current,
|
||||
cost_params,
|
||||
|
@ -2131,10 +2130,10 @@ static void timing_driven_expand_node(const t_conn_cost_params cost_params,
|
|||
static void evaluate_timing_driven_node_costs(t_heap* to,
|
||||
const t_conn_cost_params cost_params,
|
||||
const RouterLookahead& router_lookahead,
|
||||
const int from_node,
|
||||
const int to_node,
|
||||
const int iconn,
|
||||
const int target_node) {
|
||||
const RRNodeId& from_node,
|
||||
const RRNodeId& to_node,
|
||||
const RREdgeId& iconn,
|
||||
const RRNodeId& target_node) {
|
||||
/* new_costs.backward_cost: is the "known" part of the cost to this node -- the
|
||||
* congestion cost of all the routing resources back to the existing route
|
||||
* plus the known delay of the total path back to the source.
|
||||
|
@ -2146,18 +2145,18 @@ static void evaluate_timing_driven_node_costs(t_heap* to,
|
|||
auto& device_ctx = g_vpr_ctx.device();
|
||||
|
||||
//Info for the switch connecting from_node to_node
|
||||
int iswitch = device_ctx.rr_nodes[from_node].edge_switch(iconn);
|
||||
int iswitch = size_t(device_ctx.rr_graph.edge_switch(iconn));
|
||||
bool switch_buffered = device_ctx.rr_switch_inf[iswitch].buffered();
|
||||
float switch_R = device_ctx.rr_switch_inf[iswitch].R;
|
||||
float switch_Tdel = device_ctx.rr_switch_inf[iswitch].Tdel;
|
||||
float switch_Cinternal = device_ctx.rr_switch_inf[iswitch].Cinternal;
|
||||
|
||||
//To node info
|
||||
float node_C = device_ctx.rr_nodes[to_node].C();
|
||||
float node_R = device_ctx.rr_nodes[to_node].R();
|
||||
float node_C = device_ctx.rr_graph.node_C(to_node);
|
||||
float node_R = device_ctx.rr_graph.node_R(to_node);
|
||||
|
||||
//From node info
|
||||
float from_node_R = device_ctx.rr_nodes[from_node].R();
|
||||
float from_node_R = device_ctx.rr_graph.node_R(from_node);
|
||||
|
||||
//Update R_upstream
|
||||
if (switch_buffered) {
|
||||
|
@ -2189,7 +2188,7 @@ static void evaluate_timing_driven_node_costs(t_heap* to,
|
|||
//Second, we adjust the Tdel to account for the delay caused by the internal capacitance.
|
||||
Tdel += Rdel_adjust * switch_Cinternal;
|
||||
|
||||
bool reached_configurably = device_ctx.rr_nodes[from_node].edge_is_configurable(iconn);
|
||||
bool reached_configurably = device_ctx.rr_graph.edge_is_configurable(iconn);
|
||||
|
||||
float cong_cost = 0.;
|
||||
if (reached_configurably) {
|
||||
|
@ -2211,8 +2210,8 @@ static void evaluate_timing_driven_node_costs(t_heap* to,
|
|||
to->backward_path_cost += cost_params.criticality * Tdel; //Delay cost
|
||||
|
||||
if (cost_params.bend_cost != 0.) {
|
||||
t_rr_type from_type = device_ctx.rr_nodes[from_node].type();
|
||||
t_rr_type to_type = device_ctx.rr_nodes[to_node].type();
|
||||
t_rr_type from_type = device_ctx.rr_graph.node_type(from_node);
|
||||
t_rr_type to_type = device_ctx.rr_graph.node_type(to_node);
|
||||
if ((from_type == CHANX && to_type == CHANY) || (from_type == CHANY && to_type == CHANX)) {
|
||||
to->backward_path_cost += cost_params.bend_cost; //Bend cost
|
||||
}
|
||||
|
@ -2736,7 +2735,7 @@ static void print_route_status(int itry, double elapsed_sec, float pres_fac, int
|
|||
fflush(stdout);
|
||||
}
|
||||
|
||||
static std::string describe_unrouteable_connection(const int source_node, const int sink_node) {
|
||||
static std::string describe_unrouteable_connection(const RRNodeId& source_node, const RRNodeId& sink_node) {
|
||||
std::string msg = vtr::string_fmt(
|
||||
"Cannot route from %s (%s) to "
|
||||
"%s (%s) -- no possible path",
|
||||
|
@ -3051,7 +3050,7 @@ static void prune_unused_non_configurable_nets(CBRR& connections_inf) {
|
|||
}
|
||||
|
||||
//Returns true if both nodes are part of the same non-configurable edge set
|
||||
static bool same_non_config_node_set(int from_node, int to_node) {
|
||||
static bool same_non_config_node_set(const RRNodeId& from_node, const RRNodeId& to_node) {
|
||||
auto& device_ctx = g_vpr_ctx.device();
|
||||
|
||||
auto from_itr = device_ctx.rr_node_to_non_config_node_set.find(from_node);
|
||||
|
|
|
@ -56,7 +56,7 @@ void alloc_timing_driven_route_structs(float** pin_criticality_ptr,
|
|||
t_rt_node*** rt_node_of_sink_ptr);
|
||||
void free_timing_driven_route_structs(float* pin_criticality, int* sink_order, t_rt_node** rt_node_of_sink);
|
||||
|
||||
void enable_router_debug(const t_router_opts& router_opts, ClusterNetId net, int sink_rr);
|
||||
void enable_router_debug(const t_router_opts& router_opts, ClusterNetId net, const RRNodeId& sink_rr);
|
||||
|
||||
//Delay budget information for a specific connection
|
||||
struct t_conn_delay_budget {
|
||||
|
@ -80,11 +80,11 @@ struct t_conn_cost_params {
|
|||
};
|
||||
|
||||
t_heap* timing_driven_route_connection_from_route_tree(t_rt_node* rt_root,
|
||||
int sink_node,
|
||||
const RRNodeId& sink_node,
|
||||
const t_conn_cost_params cost_params,
|
||||
t_bb bounding_box,
|
||||
const RouterLookahead& router_lookahead,
|
||||
std::vector<int>& modified_rr_node_inf,
|
||||
std::vector<RRNodeId>& modified_rr_node_inf,
|
||||
RouterStats& router_stats);
|
||||
|
||||
std::vector<t_heap> timing_driven_find_all_shortest_paths_from_route_tree(t_rt_node* rt_root,
|
||||
|
|
|
@ -536,7 +536,7 @@ void load_route_tree_Tdel(t_rt_node* subtree_rt_root, float Tarrival) {
|
|||
* must be correct before this routine is called. Tarrival is the time at
|
||||
* at which the signal arrives at this node's *input*. */
|
||||
|
||||
int inode;
|
||||
RRNodeId inode;
|
||||
short iswitch;
|
||||
t_rt_node* child_node;
|
||||
t_linked_rt_edge* linked_rt_edge;
|
||||
|
@ -550,7 +550,7 @@ void load_route_tree_Tdel(t_rt_node* subtree_rt_root, float Tarrival) {
|
|||
* along a wire segment's length. See discussion in net_delay.c if you want
|
||||
* to change this. */
|
||||
|
||||
Tdel = Tarrival + 0.5 * subtree_rt_root->C_downstream * device_ctx.rr_nodes[inode].R();
|
||||
Tdel = Tarrival + 0.5 * subtree_rt_root->C_downstream * device_ctx.rr_graph.node_R(inode);
|
||||
subtree_rt_root->Tdel = Tdel;
|
||||
|
||||
/* Now expand the children of this node to load their Tdel values (depth-
|
||||
|
@ -1415,7 +1415,7 @@ bool is_uncongested_route_tree(const t_rt_node* root) {
|
|||
}
|
||||
|
||||
t_rt_node*
|
||||
init_route_tree_to_source_no_net(int inode) {
|
||||
init_route_tree_to_source_no_net(const RRNodeId& inode) {
|
||||
/* Initializes the routing tree to just the net source, and returns the root
|
||||
* node of the rt_tree (which is just the net source). */
|
||||
|
||||
|
@ -1428,11 +1428,14 @@ init_route_tree_to_source_no_net(int inode) {
|
|||
rt_root->parent_node = nullptr;
|
||||
rt_root->parent_switch = OPEN;
|
||||
rt_root->re_expand = true;
|
||||
rt_root->inode = inode;
|
||||
rt_root->C_downstream = device_ctx.rr_nodes[inode].C();
|
||||
rt_root->R_upstream = device_ctx.rr_nodes[inode].R();
|
||||
rt_root->Tdel = 0.5 * device_ctx.rr_nodes[inode].R() * device_ctx.rr_nodes[inode].C();
|
||||
rr_node_to_rt_node[inode] = rt_root;
|
||||
rt_root->inode = size_t(inode);
|
||||
|
||||
rt_root->inode_id = inode;
|
||||
|
||||
rt_root->C_downstream = device_ctx.rr_graph.node_C(inode);
|
||||
rt_root->R_upstream = device_ctx.rr_graph.node_R(inode);
|
||||
rt_root->Tdel = 0.5 * device_ctx.rr_graph.node_R(inode) * device_ctx.rr_graph.node_C(inode);
|
||||
rr_node_to_rt_node[size_t(inode)] = rt_root;
|
||||
|
||||
return (rt_root);
|
||||
}
|
||||
|
|
|
@ -4,6 +4,7 @@
|
|||
#include "connection_based_routing.h"
|
||||
#include "route_common.h"
|
||||
#include "spatial_route_tree_lookup.h"
|
||||
#include "rr_graph_obj.h"
|
||||
|
||||
/**************** Subroutines exported by route_tree_timing.c ***************/
|
||||
|
||||
|
@ -30,7 +31,7 @@ void update_remaining_net_delays_from_route_tree(float* net_delay,
|
|||
void load_route_tree_Tdel(t_rt_node* rt_root, float Tarrival);
|
||||
void load_route_tree_rr_route_inf(t_rt_node* root);
|
||||
|
||||
t_rt_node* init_route_tree_to_source_no_net(int inode);
|
||||
t_rt_node* init_route_tree_to_source_no_net(const RRNodeId& inode);
|
||||
|
||||
void add_route_tree_to_rr_node_lookup(t_rt_node* node);
|
||||
|
||||
|
|
|
@ -51,10 +51,8 @@ struct t_rt_node {
|
|||
RRSwitchId parent_switch_id;
|
||||
|
||||
bool re_expand;
|
||||
int inode;
|
||||
|
||||
/* Xifan Tang - RRGraph node */
|
||||
RRNodeId inode_id;
|
||||
RRNodeId inode;
|
||||
|
||||
float C_downstream;
|
||||
float R_upstream;
|
||||
|
|
|
@ -13,7 +13,7 @@ RouterDelayProfiler::RouterDelayProfiler(
|
|||
const RouterLookahead* lookahead)
|
||||
: router_lookahead_(lookahead) {}
|
||||
|
||||
bool RouterDelayProfiler::calculate_delay(int source_node, int sink_node, const t_router_opts& router_opts, float* net_delay) const {
|
||||
bool RouterDelayProfiler::calculate_delay(const RRNodeId& source_node, const RRNodeId& sink_node, const t_router_opts& router_opts, float* net_delay) const {
|
||||
/* Returns true as long as found some way to hook up this net, even if that *
|
||||
* way resulted in overuse of resources (congestion). If there is no way *
|
||||
* to route this net, even ignoring congestion, it returns false. In this *
|
||||
|
@ -22,7 +22,8 @@ bool RouterDelayProfiler::calculate_delay(int source_node, int sink_node, const
|
|||
auto& route_ctx = g_vpr_ctx.routing();
|
||||
|
||||
t_rt_node* rt_root = setup_routing_resources_no_net(source_node);
|
||||
enable_router_debug(router_opts, ClusterNetId(), sink_node);
|
||||
/* TODO: This should be changed to RRNodeId */
|
||||
enable_router_debug(router_opts, ClusterNetId(), size_t(sink_node));
|
||||
|
||||
/* Update base costs according to fanout and criticality rules */
|
||||
update_rr_base_costs(1);
|
||||
|
@ -59,7 +60,7 @@ bool RouterDelayProfiler::calculate_delay(int source_node, int sink_node, const
|
|||
//find delay
|
||||
*net_delay = rt_node_of_sink->Tdel;
|
||||
|
||||
VTR_ASSERT_MSG(route_ctx.rr_node_route_inf[rt_root->inode].occ() <= device_ctx.rr_nodes[rt_root->inode].capacity(), "SOURCE should never be congested");
|
||||
VTR_ASSERT_MSG(route_ctx.rr_node_route_inf[rt_root->inode].occ() <= device_ctx.rr_graph.node_capacity(rt_root->inode_id), "SOURCE should never be congested");
|
||||
free_route_tree(rt_root);
|
||||
}
|
||||
|
||||
|
|
|
@ -9,7 +9,7 @@
|
|||
class RouterDelayProfiler {
|
||||
public:
|
||||
RouterDelayProfiler(const RouterLookahead* lookahead);
|
||||
bool calculate_delay(int source_node, int sink_node, const t_router_opts& router_opts, float* net_delay) const;
|
||||
bool calculate_delay(const RRNodeId& source_node, const RRNodeId& sink_node, const t_router_opts& router_opts, float* net_delay) const;
|
||||
|
||||
private:
|
||||
const RouterLookahead* router_lookahead_;
|
||||
|
|
|
@ -5,7 +5,7 @@
|
|||
#include "globals.h"
|
||||
#include "route_timing.h"
|
||||
|
||||
static int get_expected_segs_to_target(int inode, int target_node, int* num_segs_ortho_dir_ptr);
|
||||
static int get_expected_segs_to_target(const RRNodeId& inode, const RRNodeId& target_node, int* num_segs_ortho_dir_ptr);
|
||||
static int round_up(float x);
|
||||
|
||||
static std::unique_ptr<RouterLookahead> make_router_lookahead_object(e_router_lookahead router_lookahead_type) {
|
||||
|
@ -41,10 +41,10 @@ std::unique_ptr<RouterLookahead> make_router_lookahead(
|
|||
return router_lookahead;
|
||||
}
|
||||
|
||||
float ClassicLookahead::get_expected_cost(int current_node, int target_node, const t_conn_cost_params& params, float R_upstream) const {
|
||||
float ClassicLookahead::get_expected_cost(const RRNodeId& current_node, const RRNodeId& target_node, const t_conn_cost_params& params, float R_upstream) const {
|
||||
auto& device_ctx = g_vpr_ctx.device();
|
||||
|
||||
t_rr_type rr_type = device_ctx.rr_nodes[current_node].type();
|
||||
t_rr_type rr_type = device_ctx.rr_graph.node_type(current_node);
|
||||
|
||||
if (rr_type == CHANX || rr_type == CHANY) {
|
||||
return classic_wire_lookahead_cost(current_node, target_node, params.criticality, R_upstream);
|
||||
|
@ -55,15 +55,15 @@ float ClassicLookahead::get_expected_cost(int current_node, int target_node, con
|
|||
}
|
||||
}
|
||||
|
||||
float ClassicLookahead::classic_wire_lookahead_cost(int inode, int target_node, float criticality, float R_upstream) const {
|
||||
float ClassicLookahead::classic_wire_lookahead_cost(const RRNodeId& inode, const RRNodeId& target_node, float criticality, float R_upstream) const {
|
||||
auto& device_ctx = g_vpr_ctx.device();
|
||||
|
||||
VTR_ASSERT_SAFE(device_ctx.rr_nodes[inode].type() == CHANX || device_ctx.rr_nodes[inode].type() == CHANY);
|
||||
VTR_ASSERT_SAFE(device_ctx.rr_graph.node_type(inode) == CHANX || device_ctx.rr_graph.node_type(inode) == CHANY);
|
||||
|
||||
int num_segs_ortho_dir = 0;
|
||||
int num_segs_same_dir = get_expected_segs_to_target(inode, target_node, &num_segs_ortho_dir);
|
||||
|
||||
int cost_index = device_ctx.rr_nodes[inode].cost_index();
|
||||
int cost_index = device_ctx.rr_graph.node_cost_index(inode);
|
||||
int ortho_cost_index = device_ctx.rr_indexed_data[cost_index].ortho_cost_index;
|
||||
|
||||
const auto& same_data = device_ctx.rr_indexed_data[cost_index];
|
||||
|
@ -87,10 +87,10 @@ float ClassicLookahead::classic_wire_lookahead_cost(int inode, int target_node,
|
|||
return (expected_cost);
|
||||
}
|
||||
|
||||
float MapLookahead::get_expected_cost(int current_node, int target_node, const t_conn_cost_params& params, float /*R_upstream*/) const {
|
||||
float MapLookahead::get_expected_cost(const RRNodeId& current_node, const RRNodeId& target_node, const t_conn_cost_params& params, float /*R_upstream*/) const {
|
||||
auto& device_ctx = g_vpr_ctx.device();
|
||||
|
||||
t_rr_type rr_type = device_ctx.rr_nodes[current_node].type();
|
||||
t_rr_type rr_type = device_ctx.rr_graph.node_type(current_node);
|
||||
|
||||
if (rr_type == CHANX || rr_type == CHANY) {
|
||||
return get_lookahead_map_cost(current_node, target_node, params.criticality);
|
||||
|
@ -105,7 +105,7 @@ void MapLookahead::compute(const std::vector<t_segment_inf>& segment_inf) {
|
|||
compute_router_lookahead(segment_inf.size());
|
||||
}
|
||||
|
||||
float NoOpLookahead::get_expected_cost(int /*current_node*/, int /*target_node*/, const t_conn_cost_params& /*params*/, float /*R_upstream*/) const {
|
||||
float NoOpLookahead::get_expected_cost(const RRNodeId& /*current_node*/, const RRNodeId& /*target_node*/, const t_conn_cost_params& /*params*/, float /*R_upstream*/) const {
|
||||
return 0.;
|
||||
}
|
||||
|
||||
|
@ -115,7 +115,7 @@ static int round_up(float x) {
|
|||
return std::ceil(x - 0.001);
|
||||
}
|
||||
|
||||
static int get_expected_segs_to_target(int inode, int target_node, int* num_segs_ortho_dir_ptr) {
|
||||
static int get_expected_segs_to_target(const RRNodeId& inode, const RRNodeId& target_node, int* num_segs_ortho_dir_ptr) {
|
||||
/* Returns the number of segments the same type as inode that will be needed *
|
||||
* to reach target_node (not including inode) in each direction (the same *
|
||||
* direction (horizontal or vertical) as inode and the orthogonal direction).*/
|
||||
|
@ -127,18 +127,18 @@ static int get_expected_segs_to_target(int inode, int target_node, int* num_segs
|
|||
int no_need_to_pass_by_clb;
|
||||
float inv_length, ortho_inv_length, ylow, yhigh, xlow, xhigh;
|
||||
|
||||
target_x = device_ctx.rr_nodes[target_node].xlow();
|
||||
target_y = device_ctx.rr_nodes[target_node].ylow();
|
||||
cost_index = device_ctx.rr_nodes[inode].cost_index();
|
||||
target_x = device_ctx.rr_graph.node_xlow(target_node);
|
||||
target_y = device_ctx.rr_graph.node_ylow(target_node);
|
||||
cost_index = device_ctx.rr_graph.node_cost_index(inode);
|
||||
inv_length = device_ctx.rr_indexed_data[cost_index].inv_length;
|
||||
ortho_cost_index = device_ctx.rr_indexed_data[cost_index].ortho_cost_index;
|
||||
ortho_inv_length = device_ctx.rr_indexed_data[ortho_cost_index].inv_length;
|
||||
rr_type = device_ctx.rr_nodes[inode].type();
|
||||
rr_type = device_ctx.rr_graph.node_type(inode);
|
||||
|
||||
if (rr_type == CHANX) {
|
||||
ylow = device_ctx.rr_nodes[inode].ylow();
|
||||
xhigh = device_ctx.rr_nodes[inode].xhigh();
|
||||
xlow = device_ctx.rr_nodes[inode].xlow();
|
||||
ylow = device_ctx.rr_graph.node_ylow(inode);
|
||||
xhigh = device_ctx.rr_graph.node_xhigh(inode);
|
||||
xlow = device_ctx.rr_graph.node_xlow(inode);
|
||||
|
||||
/* Count vertical (orthogonal to inode) segs first. */
|
||||
|
||||
|
@ -163,9 +163,9 @@ static int get_expected_segs_to_target(int inode, int target_node, int* num_segs
|
|||
num_segs_same_dir = 0;
|
||||
}
|
||||
} else { /* inode is a CHANY */
|
||||
ylow = device_ctx.rr_nodes[inode].ylow();
|
||||
yhigh = device_ctx.rr_nodes[inode].yhigh();
|
||||
xlow = device_ctx.rr_nodes[inode].xlow();
|
||||
ylow = device_ctx.rr_graph.node_ylow(inode);
|
||||
yhigh = device_ctx.rr_graph.node_yhigh(inode);
|
||||
xlow = device_ctx.rr_graph.node_xlow(inode);
|
||||
|
||||
/* Count horizontal (orthogonal to inode) segs first. */
|
||||
|
||||
|
|
|
@ -12,7 +12,7 @@ class RouterLookahead {
|
|||
//
|
||||
// Either compute or read methods must be invoked before invoking
|
||||
// get_expected_cost.
|
||||
virtual float get_expected_cost(int node, int target_node, const t_conn_cost_params& params, float R_upstream) const = 0;
|
||||
virtual float get_expected_cost(const RRNodeId& node, const RRNodeId& target_node, const t_conn_cost_params& params, float R_upstream) const = 0;
|
||||
|
||||
// Compute router lookahead (if needed).
|
||||
virtual void compute(const std::vector<t_segment_inf>& segment_inf) = 0;
|
||||
|
@ -53,7 +53,7 @@ const RouterLookahead* get_cached_router_lookahead(
|
|||
|
||||
class ClassicLookahead : public RouterLookahead {
|
||||
public:
|
||||
float get_expected_cost(int node, int target_node, const t_conn_cost_params& params, float R_upstream) const override;
|
||||
float get_expected_cost(const RRNodeId& node, const RRNodeId& target_node, const t_conn_cost_params& params, float R_upstream) const override;
|
||||
void compute(const std::vector<t_segment_inf>& /*segment_inf*/) override {
|
||||
}
|
||||
|
||||
|
@ -65,12 +65,12 @@ class ClassicLookahead : public RouterLookahead {
|
|||
}
|
||||
|
||||
private:
|
||||
float classic_wire_lookahead_cost(int node, int target_node, float criticality, float R_upstream) const;
|
||||
float classic_wire_lookahead_cost(const RRNodeId& node, const RRNodeId& target_node, float criticality, float R_upstream) const;
|
||||
};
|
||||
|
||||
class MapLookahead : public RouterLookahead {
|
||||
protected:
|
||||
float get_expected_cost(int node, int target_node, const t_conn_cost_params& params, float R_upstream) const override;
|
||||
float get_expected_cost(const RRNodeId& node, const RRNodeId& target_node, const t_conn_cost_params& params, float R_upstream) const override;
|
||||
void compute(const std::vector<t_segment_inf>& segment_inf) override;
|
||||
void read(const std::string& /*file*/) override {
|
||||
VPR_THROW(VPR_ERROR_ROUTE, "MapLookahead::read unimplemented");
|
||||
|
@ -82,7 +82,7 @@ class MapLookahead : public RouterLookahead {
|
|||
|
||||
class NoOpLookahead : public RouterLookahead {
|
||||
protected:
|
||||
float get_expected_cost(int node, int target_node, const t_conn_cost_params& params, float R_upstream) const override;
|
||||
float get_expected_cost(const RRNodeId& node, const RRNodeId& target_node, const t_conn_cost_params& params, float R_upstream) const override;
|
||||
void compute(const std::vector<t_segment_inf>& /*segment_inf*/) override {
|
||||
}
|
||||
void read(const std::string& /*file*/) override {
|
||||
|
|
|
@ -32,12 +32,10 @@ SpatialRouteTreeLookup build_route_tree_spatial_lookup(ClusterNetId net, t_rt_no
|
|||
void update_route_tree_spatial_lookup_recur(t_rt_node* rt_node, SpatialRouteTreeLookup& spatial_lookup) {
|
||||
auto& device_ctx = g_vpr_ctx.device();
|
||||
|
||||
auto& rr_node = device_ctx.rr_nodes[rt_node->inode];
|
||||
|
||||
int bin_xlow = grid_to_bin_x(rr_node.xlow(), spatial_lookup);
|
||||
int bin_ylow = grid_to_bin_y(rr_node.ylow(), spatial_lookup);
|
||||
int bin_xhigh = grid_to_bin_x(rr_node.xhigh(), spatial_lookup);
|
||||
int bin_yhigh = grid_to_bin_y(rr_node.yhigh(), spatial_lookup);
|
||||
int bin_xlow = grid_to_bin_x(device_ctx.rr_graph.node_xlow(rt_node->inode), spatial_lookup);
|
||||
int bin_ylow = grid_to_bin_y(device_ctx.rr_graph.node_ylow(rt_node->inode), spatial_lookup);
|
||||
int bin_xhigh = grid_to_bin_x(device_ctx.rr_graph.node_xhigh(rt_node->inode), spatial_lookup);
|
||||
int bin_yhigh = grid_to_bin_y(device_ctx.rr_graph.node_yhigh(rt_node->inode), spatial_lookup);
|
||||
|
||||
spatial_lookup[bin_xlow][bin_ylow].push_back(rt_node);
|
||||
|
||||
|
|
|
@ -235,22 +235,22 @@ std::vector<std::string> block_type_class_index_to_pin_names(t_physical_tile_typ
|
|||
return pin_names;
|
||||
}
|
||||
|
||||
std::string rr_node_arch_name(int inode) {
|
||||
std::string rr_node_arch_name(const RRNodeId& inode) {
|
||||
auto& device_ctx = g_vpr_ctx.device();
|
||||
|
||||
const t_rr_node& rr_node = device_ctx.rr_nodes[inode];
|
||||
const RRGraph& rr_graph = device_ctx.rr_graph;
|
||||
|
||||
std::string rr_node_arch_name;
|
||||
if (rr_node.type() == OPIN || rr_node.type() == IPIN) {
|
||||
if (rr_graph.node_type(inode) == OPIN || rr_graph.node_type(inode) == IPIN) {
|
||||
//Pin names
|
||||
auto type = device_ctx.grid[rr_node.xlow()][rr_node.ylow()].type;
|
||||
rr_node_arch_name += block_type_pin_index_to_name(type, rr_node.ptc_num());
|
||||
} else if (rr_node.type() == SOURCE || rr_node.type() == SINK) {
|
||||
auto type = device_ctx.grid[rr_graph.node_xlow(inode)][rr_graph.node_ylow(inode)].type;
|
||||
rr_node_arch_name += block_type_pin_index_to_name(type, rr_graph.node_ptc_num(inode));
|
||||
} else if (rr_graph.node_type(inode) == SOURCE || rr_graph.node_type(inode) == SINK) {
|
||||
//Set of pins associated with SOURCE/SINK
|
||||
auto type = device_ctx.grid[rr_node.xlow()][rr_node.ylow()].type;
|
||||
auto pin_names = block_type_class_index_to_pin_names(type, rr_node.ptc_num());
|
||||
auto type = device_ctx.grid[rr_graph.node_xlow(inode)][rr_graph.node_ylow(inode)].type;
|
||||
auto pin_names = block_type_class_index_to_pin_names(type, rr_graph.node_ptc_num(inode));
|
||||
if (pin_names.size() > 1) {
|
||||
rr_node_arch_name += rr_node.type_string();
|
||||
rr_node_arch_name += rr_node_typename[rr_graph.node_type(inode)];
|
||||
rr_node_arch_name += " connected to ";
|
||||
rr_node_arch_name += "{";
|
||||
rr_node_arch_name += vtr::join(pin_names, ", ");
|
||||
|
@ -259,9 +259,9 @@ std::string rr_node_arch_name(int inode) {
|
|||
rr_node_arch_name += pin_names[0];
|
||||
}
|
||||
} else {
|
||||
VTR_ASSERT(rr_node.type() == CHANX || rr_node.type() == CHANY);
|
||||
VTR_ASSERT(rr_graph.node_type(inode) == CHANX || rr_graph.node_type(inode) == CHANY);
|
||||
//Wire segment name
|
||||
auto cost_index = rr_node.cost_index();
|
||||
auto cost_index = rr_graph.node_cost_index(inode);
|
||||
int seg_index = device_ctx.rr_indexed_data[cost_index].seg_index;
|
||||
|
||||
rr_node_arch_name += device_ctx.arch->Segments[seg_index].name;
|
||||
|
|
|
@ -48,7 +48,7 @@ std::string block_type_pin_index_to_name(t_physical_tile_type_ptr type, int pin_
|
|||
std::vector<std::string> block_type_class_index_to_pin_names(t_physical_tile_type_ptr type, int class_index);
|
||||
|
||||
//Returns a user-friendly architectural identifier for the specified RR node
|
||||
std::string rr_node_arch_name(int inode);
|
||||
std::string rr_node_arch_name(const RRNodeId& inode);
|
||||
|
||||
/**************************************************************
|
||||
* Intra-Logic Block Utility Functions
|
||||
|
|
Loading…
Reference in New Issue