Merge branch 'refactoring' into dev

This commit is contained in:
tangxifan 2020-02-04 22:54:19 -07:00
commit 6dc2126ce6
63 changed files with 2299 additions and 1909 deletions

View File

@ -231,7 +231,7 @@ static void process_nodes(std::ifstream& fp, ClusterNetId inet, const char* file
} else if (tokens[0] == "Node:") {
/*An actual line, go through each node and add it to the route tree*/
inode = atoi(tokens[1].c_str());
auto& node = device_ctx.rr_nodes[inode];
const RRNodeId& node = RRNodeId(inode);
/*First node needs to be source. It is isolated to correctly set heap head.*/
if (node_count == 0 && tokens[2] != "SOURCE") {
@ -240,7 +240,7 @@ static void process_nodes(std::ifstream& fp, ClusterNetId inet, const char* file
}
/*Check node types if match rr graph*/
if (tokens[2] != node.type_string()) {
if (tokens[2] != rr_node_typename[device_ctx.rr_graph.node_type(node)]) {
vpr_throw(VPR_ERROR_ROUTE, filename, lineno,
"Node %d has a type that does not match the RR graph", inode);
}
@ -249,13 +249,19 @@ static void process_nodes(std::ifstream& fp, ClusterNetId inet, const char* file
if (tokens[4] == "to") {
format_coordinates(x2, y2, tokens[5], inet, filename, lineno);
if (node.xlow() != x || node.xhigh() != x2 || node.yhigh() != y2 || node.ylow() != y) {
if (device_ctx.rr_graph.node_xlow(node) != x
|| device_ctx.rr_graph.node_xhigh(node) != x2
|| device_ctx.rr_graph.node_yhigh(node) != y2
|| device_ctx.rr_graph.node_ylow(node) != y) {
vpr_throw(VPR_ERROR_ROUTE, filename, lineno,
"The coordinates of node %d does not match the rr graph", inode);
}
offset = 2;
} else {
if (node.xlow() != x || node.xhigh() != x || node.yhigh() != y || node.ylow() != y) {
if (device_ctx.rr_graph.node_xlow(node) != x
|| device_ctx.rr_graph.node_xhigh(node) != x
|| device_ctx.rr_graph.node_yhigh(node) != y
|| device_ctx.rr_graph.node_ylow(node) != y) {
vpr_throw(VPR_ERROR_ROUTE, filename, lineno,
"The coordinates of node %d does not match the rr graph", inode);
}
@ -276,7 +282,7 @@ static void process_nodes(std::ifstream& fp, ClusterNetId inet, const char* file
}
ptc = atoi(tokens[5 + offset].c_str());
if (node.ptc_num() != ptc) {
if (device_ctx.rr_graph.node_ptc_num(node) != ptc) {
vpr_throw(VPR_ERROR_ROUTE, filename, lineno,
"The ptc num of node %d does not match the rr graph", inode);
}
@ -285,7 +291,7 @@ static void process_nodes(std::ifstream& fp, ClusterNetId inet, const char* file
if (tokens[6 + offset] != "Switch:") {
/*This is an opin or ipin, process its pin nums*/
if (!is_io_type(device_ctx.grid[x][y].type) && (tokens[2] == "IPIN" || tokens[2] == "OPIN")) {
int pin_num = device_ctx.rr_nodes[inode].ptc_num();
int pin_num = device_ctx.rr_graph.node_ptc_num(node);
int height_offset = device_ctx.grid[x][y].height_offset;
ClusterBlockId iblock = place_ctx.grid_blocks[x][y - height_offset].blocks[0];
t_pb_graph_pin* pb_pin = get_pb_graph_node_pin_from_block_pin(iblock, pin_num);
@ -312,7 +318,7 @@ static void process_nodes(std::ifstream& fp, ClusterNetId inet, const char* file
/* Allocate and load correct values to trace.head*/
if (node_count == 0) {
route_ctx.trace[inet].head = alloc_trace_data();
route_ctx.trace[inet].head->index = inode;
route_ctx.trace[inet].head->index = node;
route_ctx.trace[inet].head->iswitch = switch_id;
route_ctx.trace[inet].head->next = nullptr;
tptr = route_ctx.trace[inet].head;
@ -320,7 +326,7 @@ static void process_nodes(std::ifstream& fp, ClusterNetId inet, const char* file
} else {
tptr->next = alloc_trace_data();
tptr = tptr->next;
tptr->index = inode;
tptr->index = node;
tptr->iswitch = switch_id;
tptr->next = nullptr;
node_count++;

View File

@ -215,7 +215,7 @@ static void get_channel_occupancy_stats() {
/* Loads the two arrays passed in with the total occupancy at each of the *
* channel segments in the FPGA. */
static void load_channel_occupancies(vtr::Matrix<int>& chanx_occ, vtr::Matrix<int>& chany_occ) {
int i, j, inode;
int i, j;
t_trace* tptr;
t_rr_type rr_type;
@ -235,8 +235,8 @@ static void load_channel_occupancies(vtr::Matrix<int>& chanx_occ, vtr::Matrix<in
tptr = route_ctx.trace[net_id].head;
while (tptr != nullptr) {
inode = tptr->index;
rr_type = device_ctx.rr_nodes[inode].type();
const RRNodeId& inode = tptr->index;
rr_type = device_ctx.rr_graph.node_type(inode);
if (rr_type == SINK) {
tptr = tptr->next; /* Skip next segment. */
@ -245,14 +245,14 @@ static void load_channel_occupancies(vtr::Matrix<int>& chanx_occ, vtr::Matrix<in
}
else if (rr_type == CHANX) {
j = device_ctx.rr_nodes[inode].ylow();
for (i = device_ctx.rr_nodes[inode].xlow(); i <= device_ctx.rr_nodes[inode].xhigh(); i++)
j = device_ctx.rr_graph.node_ylow(inode);
for (i = device_ctx.rr_graph.node_xlow(inode); i <= device_ctx.rr_graph.node_xhigh(inode); i++)
chanx_occ[i][j]++;
}
else if (rr_type == CHANY) {
i = device_ctx.rr_nodes[inode].xlow();
for (j = device_ctx.rr_nodes[inode].ylow(); j <= device_ctx.rr_nodes[inode].yhigh(); j++)
i = device_ctx.rr_graph.node_xlow(inode);
for (j = device_ctx.rr_graph.node_ylow(inode); j <= device_ctx.rr_graph.node_yhigh(inode); j++)
chany_occ[i][j]++;
}
@ -268,7 +268,6 @@ void get_num_bends_and_length(ClusterNetId inet, int* bends_ptr, int* len_ptr, i
auto& device_ctx = g_vpr_ctx.device();
t_trace *tptr, *prevptr;
int inode;
t_rr_type curr_type, prev_type;
int bends, length, segments;
@ -281,27 +280,27 @@ void get_num_bends_and_length(ClusterNetId inet, int* bends_ptr, int* len_ptr, i
VPR_FATAL_ERROR(VPR_ERROR_OTHER,
"in get_num_bends_and_length: net #%lu has no traceback.\n", size_t(inet));
}
inode = prevptr->index;
prev_type = device_ctx.rr_nodes[inode].type();
RRNodeId inode = prevptr->index;
prev_type = device_ctx.rr_graph.node_type(inode);
tptr = prevptr->next;
while (tptr != nullptr) {
inode = tptr->index;
curr_type = device_ctx.rr_nodes[inode].type();
curr_type = device_ctx.rr_graph.node_type(inode);
if (curr_type == SINK) { /* Starting a new segment */
tptr = tptr->next; /* Link to existing path - don't add to len. */
if (tptr == nullptr)
break;
curr_type = device_ctx.rr_nodes[tptr->index].type();
curr_type = device_ctx.rr_graph.node_type(tptr->index);
}
else if (curr_type == CHANX || curr_type == CHANY) {
segments++;
length += 1 + device_ctx.rr_nodes[inode].xhigh() - device_ctx.rr_nodes[inode].xlow()
+ device_ctx.rr_nodes[inode].yhigh() - device_ctx.rr_nodes[inode].ylow();
length += 1 + device_ctx.rr_graph.node_xhigh(inode) - device_ctx.rr_graph.node_xlow(inode)
+ device_ctx.rr_graph.node_yhigh(inode) - device_ctx.rr_graph.node_ylow(inode);
if (curr_type != prev_type && (prev_type == CHANX || prev_type == CHANY))
bends++;

View File

@ -157,10 +157,10 @@ struct DeviceContext : public Context {
std::vector<t_rr_rc_data> rr_rc_data;
//Sets of non-configurably connected nodes
std::vector<std::vector<int>> rr_non_config_node_sets;
std::vector<std::vector<RRNodeId>> rr_non_config_node_sets;
//Reverse look-up from RR node to non-configurably connected node set (index into rr_nonconf_node_sets)
std::unordered_map<int, int> rr_node_to_non_config_node_set;
std::unordered_map<RRNodeId, int> rr_node_to_non_config_node_set;
//The indicies of rr nodes of a given type at a specific x,y grid location
t_rr_node_indices rr_node_indices; //[0..NUM_RR_TYPES-1][0..grid.width()-1][0..grid.width()-1][0..size-1]
@ -178,7 +178,7 @@ struct DeviceContext : public Context {
// Useful for two stage clock routing
// XXX: currently only one place to source the clock networks so only storing
// a single value
int virtual_clock_network_root_idx;
RRNodeId virtual_clock_network_root_idx;
/** Attributes for each rr_node.
* key: rr_node index
@ -284,13 +284,14 @@ struct PlacementContext : public Context {
struct RoutingContext : public Context {
/* [0..num_nets-1] of linked list start pointers. Defines the routing. */
vtr::vector<ClusterNetId, t_traceback> trace;
vtr::vector<ClusterNetId, std::unordered_set<int>> trace_nodes;
vtr::vector<ClusterNetId, std::unordered_set<RRNodeId>> trace_nodes;
vtr::vector<ClusterNetId, std::vector<int>> net_rr_terminals; /* [0..num_nets-1][0..num_pins-1] */
/* Xifan Tang: this should adopt RRNodeId as well */
vtr::vector<ClusterNetId, std::vector<RRNodeId>> net_rr_terminals; /* [0..num_nets-1][0..num_pins-1] */
vtr::vector<ClusterBlockId, std::vector<int>> rr_blk_source; /* [0..num_blocks-1][0..num_class-1] */
vtr::vector<ClusterBlockId, std::vector<RRNodeId>> rr_blk_source; /* [0..num_blocks-1][0..num_class-1] */
std::vector<t_rr_node_route_inf> rr_node_route_inf; /* [0..device_ctx.num_rr_nodes-1] */
vtr::vector<RRNodeId, t_rr_node_route_inf> rr_node_route_inf; /* [0..device_ctx.num_rr_nodes-1] */
//Information about current routing status of each net
vtr::vector<ClusterNetId, t_net_routing_status> net_status; //[0..cluster_ctx.clb_nlist.nets().size()-1]

View File

@ -40,8 +40,9 @@
#include "vtr_flat_map.h"
#include "vtr_cache.h"
/* Header for rr_graph related definition */
/* Xifan Tang - Header for rr_graph related definition */
#include "rr_graph_types.h"
#include "rr_graph_obj.h"
/*******************************************************************************
* Global data types and constants
@ -1135,7 +1136,7 @@ typedef std::vector<std::vector<std::vector<std::vector<std::vector<int>>>>> t_r
* next: Pointer to the next traceback element in this route. */
struct t_trace {
t_trace* next;
int index;
RRNodeId index;
short iswitch;
};
@ -1159,8 +1160,9 @@ struct t_trace {
* Number of times this node must be reached to fully route. *
* occ: The current occupancy of the associated rr node */
struct t_rr_node_route_inf {
int prev_node;
t_edge_size prev_edge;
/* Xifan Tang - prev_node for RRGraph object */
RRNodeId prev_node;
RREdgeId prev_edge;
float pres_cost;
float acc_cost;
@ -1186,13 +1188,13 @@ struct t_net_routing_status {
};
struct t_node_edge {
t_node_edge(int fnode, int tnode) {
t_node_edge(const RRNodeId& fnode, const RRNodeId& tnode) {
from_node = fnode;
to_node = tnode;
}
int from_node;
int to_node;
RRNodeId from_node;
RRNodeId to_node;
//For std::set
friend bool operator<(const t_node_edge& lhs, const t_node_edge& rhs) {
@ -1202,7 +1204,7 @@ struct t_node_edge {
//Non-configurably connected nodes and edges in the RR graph
struct t_non_configurable_rr_sets {
std::set<std::set<int>> node_sets;
std::set<std::set<RRNodeId>> node_sets;
std::set<std::set<t_node_edge>> edge_sets;
};
@ -1288,6 +1290,6 @@ class RouteStatus {
int chan_width_ = -1;
};
typedef vtr::vector<ClusterBlockId, std::vector<std::vector<int>>> t_clb_opins_used; //[0..num_blocks-1][0..class-1][0..used_pins-1]
typedef vtr::vector<ClusterBlockId, std::vector<std::vector<RRNodeId>>> t_clb_opins_used; //[0..num_blocks-1][0..class-1][0..used_pins-1]
#endif

View File

@ -194,6 +194,11 @@ float RRGraph::node_C(const RRNodeId& node) const {
return node_Cs_[node];
}
short RRGraph::node_rc_data_index(const RRNodeId& node) const {
VTR_ASSERT_SAFE(valid_node_id(node));
return node_rc_data_indices_[node];
}
/*
* Get a segment id of a node in rr_graph
*/
@ -353,35 +358,40 @@ RRNodeId RRGraph::find_node(const short& x, const short& y, const t_rr_type& typ
/* Check if x, y, type and ptc, side is valid */
if ((x < 0) /* See if x is smaller than the index of first element */
|| (size_t(x) > node_lookup_.dim_size(0) - 1)) { /* See if x is large than the index of last element */
|| (size_t(x) > node_lookup_.dim_size(0) - 1) /* See if x is large than the index of last element */
|| (0 == node_lookup_.dim_size(0))) { /* See if x is large than the index of last element */
/* Return a zero range! */
return RRNodeId::INVALID();
}
/* Check if x, y, type and ptc, side is valid */
if ((y < 0) /* See if y is smaller than the index of first element */
|| (size_t(y) > node_lookup_.dim_size(1) - 1)) { /* See if y is large than the index of last element */
|| (size_t(y) > node_lookup_.dim_size(1) - 1) /* See if y is large than the index of last element */
|| (0 == node_lookup_.dim_size(1))) { /* See if y is large than the index of last element */
/* Return a zero range! */
return RRNodeId::INVALID();
}
/* Check if x, y, type and ptc, side is valid */
/* itype is always larger than -1, we can skip checking */
if (itype > node_lookup_.dim_size(2) - 1) { /* See if type is large than the index of last element */
if ( (itype > node_lookup_.dim_size(2) - 1) /* See if type is large than the index of last element */
|| (0 == node_lookup_.dim_size(2))) { /* See if type is large than the index of last element */
/* Return a zero range! */
return RRNodeId::INVALID();
}
/* Check if x, y, type and ptc, side is valid */
if ((ptc < 0) /* See if ptc is smaller than the index of first element */
|| (size_t(ptc) > node_lookup_[x][y][type].size() - 1)) { /* See if ptc is large than the index of last element */
|| (size_t(ptc) > node_lookup_[x][y][type].size() - 1) /* See if ptc is large than the index of last element */
|| (0 == node_lookup_[x][y][type].size())) { /* See if ptc is large than the index of last element */
/* Return a zero range! */
return RRNodeId::INVALID();
}
/* Check if x, y, type and ptc, side is valid */
/* iside is always larger than -1, we can skip checking */
if (iside > node_lookup_[x][y][type][ptc].size() - 1) { /* See if side is large than the index of last element */
if ((iside > node_lookup_[x][y][type][ptc].size() - 1) /* See if side is large than the index of last element */
|| (0 == node_lookup_[x][y][type][ptc].size()) ) { /* See if side is large than the index of last element */
/* Return a zero range! */
return RRNodeId::INVALID();
}
@ -769,6 +779,7 @@ void RRGraph::reserve_nodes(const unsigned long& num_nodes) {
this->node_sides_.reserve(num_nodes);
this->node_Rs_.reserve(num_nodes);
this->node_Cs_.reserve(num_nodes);
this->node_rc_data_indices_.reserve(num_nodes);
this->node_segments_.reserve(num_nodes);
/* Edge-related vectors */
@ -818,6 +829,7 @@ RRNodeId RRGraph::create_node(const t_rr_type& type) {
node_sides_.push_back(NUM_SIDES);
node_Rs_.push_back(0.);
node_Cs_.push_back(0.);
node_rc_data_indices_.push_back(-1);
node_segments_.push_back(RRSegmentId::INVALID());
node_edges_.emplace_back(); //Initially empty
@ -834,10 +846,12 @@ RRNodeId RRGraph::create_node(const t_rr_type& type) {
return node_id;
}
RREdgeId RRGraph::create_edge(const RRNodeId& source, const RRNodeId& sink, const RRSwitchId& switch_id) {
RREdgeId RRGraph::create_edge(const RRNodeId& source, const RRNodeId& sink, const RRSwitchId& switch_id, const bool& fake_switch) {
VTR_ASSERT(valid_node_id(source));
VTR_ASSERT(valid_node_id(sink));
VTR_ASSERT(valid_switch_id(switch_id));
if (false == fake_switch) {
VTR_ASSERT(valid_switch_id(switch_id));
}
/* Allocate an ID */
RREdgeId edge_id = RREdgeId(num_edges_);
@ -859,6 +873,12 @@ RREdgeId RRGraph::create_edge(const RRNodeId& source, const RRNodeId& sink, cons
return edge_id;
}
void RRGraph::set_edge_switch(const RREdgeId& edge, const RRSwitchId& switch_id) {
VTR_ASSERT(valid_edge_id(edge));
VTR_ASSERT(valid_switch_id(switch_id));
edge_switches_[edge] = switch_id;
}
RRSwitchId RRGraph::create_switch(const t_rr_switch_inf& switch_info) {
//Allocate an ID
RRSwitchId switch_id = RRSwitchId(switch_ids_.size());
@ -934,6 +954,12 @@ void RRGraph::remove_edge(const RREdgeId& edge) {
set_dirty();
}
void RRGraph::set_node_type(const RRNodeId& node, const t_rr_type& type) {
VTR_ASSERT(valid_node_id(node));
node_types_[node] = type;
}
void RRGraph::set_node_xlow(const RRNodeId& node, const short& xlow) {
VTR_ASSERT(valid_node_id(node));
@ -1028,6 +1054,12 @@ void RRGraph::set_node_C(const RRNodeId& node, const float& C) {
node_Cs_[node] = C;
}
void RRGraph::set_node_rc_data_index(const RRNodeId& node, const short& rc_data_index) {
VTR_ASSERT(valid_node_id(node));
node_rc_data_indices_[node] = rc_data_index;
}
/*
* Set a segment id for a node in rr_graph
*/
@ -1175,8 +1207,8 @@ void RRGraph::build_fast_node_lookup() const {
/* Skip this id */
continue;
}
max_coord.set_x(std::max(max_coord.x(), node_xlow(RRNodeId(id))));
max_coord.set_y(std::max(max_coord.y(), node_ylow(RRNodeId(id))));
max_coord.set_x(std::max(max_coord.x(), std::max(node_bounding_boxes_[RRNodeId(id)].xmax(), node_bounding_boxes_[RRNodeId(id)].xmin())));
max_coord.set_y(std::max(max_coord.y(), std::max(node_bounding_boxes_[RRNodeId(id)].ymax(), node_bounding_boxes_[RRNodeId(id)].ymin())));
}
node_lookup_.resize({(size_t)max_coord.x() + 1, (size_t)max_coord.y() + 1, NUM_RR_TYPES + 1});
@ -1187,8 +1219,12 @@ void RRGraph::build_fast_node_lookup() const {
continue;
}
RRNodeId node = RRNodeId(id);
/* Special for SOURCE and SINK, we should annotate in the look-up
* for all the (x,y) upto (xhigh, yhigh)
*/
size_t x = node_xlow(node);
size_t y = node_ylow(node);
size_t itype = node_type(node);
size_t ptc = node_ptc_num(node);

View File

@ -454,6 +454,13 @@ class RRGraph {
/* Get capacitance of a node, used to built RC tree for timing analysis */
float node_C(const RRNodeId& node) const;
/* Get the index of rc data in the list of rc_data data structure
* It contains the RC parasitics for different nodes in the RRGraph
* when used in evaluate different routing paths
* See cross-reference section in this header file for more details
*/
short node_rc_data_index(const RRNodeId& node) const;
/* Get segment id of a node, containing the information of the routing
* segment that the node represents. See more details in the data structure t_segment_inf
*/
@ -607,7 +614,8 @@ class RRGraph {
* This function will automatically create a node and
* configure the nodes and edges in connection
*/
RREdgeId create_edge(const RRNodeId& source, const RRNodeId& sink, const RRSwitchId& switch_id);
RREdgeId create_edge(const RRNodeId& source, const RRNodeId& sink, const RRSwitchId& switch_id, const bool& fake_switch=false);
void set_edge_switch(const RREdgeId& edge, const RRSwitchId& switch_id);
RRSwitchId create_switch(const t_rr_switch_inf& switch_info);
RRSegmentId create_segment(const t_segment_inf& segment_info);
@ -638,6 +646,8 @@ class RRGraph {
void remove_edge(const RREdgeId& edge);
/* Set node-level information */
void set_node_type(const RRNodeId& node, const t_rr_type& type);
void set_node_xlow(const RRNodeId& node, const short& xlow);
void set_node_ylow(const RRNodeId& node, const short& ylow);
void set_node_xhigh(const RRNodeId& node, const short& xhigh);
@ -694,6 +704,10 @@ class RRGraph {
void set_node_R(const RRNodeId& node, const float& R);
void set_node_C(const RRNodeId& node, const float& C);
/* Set the flyweight RC data index for node, see node_rc_data_index() for details */
/* TODO: the cost index should be changed to a StrongId!!! */
void set_node_rc_data_index(const RRNodeId& node, const short& rc_data_index);
/* Set the routing segment linked to a node, only applicable to CHANX and CHANY */
void set_node_segment(const RRNodeId& node, const RRSegmentId& segment_index);
@ -714,6 +728,9 @@ class RRGraph {
/* top-level function to free, should be called when to delete a RRGraph */
void clear();
/* Due to the rr_graph builder, we have to make this method public!!!! */
void clear_switches();
public: /* Type implementations */
/*
* This class (forward delcared above) is a template used to represent a lazily calculated
@ -764,7 +781,6 @@ class RRGraph {
private: /* Internal free functions */
void clear_nodes();
void clear_edges();
void clear_switches();
void clear_segments();
private: /* Graph Compression related */
@ -841,6 +857,7 @@ class RRGraph {
vtr::vector<RRNodeId, e_side> node_sides_;
vtr::vector<RRNodeId, float> node_Rs_;
vtr::vector<RRNodeId, float> node_Cs_;
vtr::vector<RRNodeId, short> node_rc_data_indices_;
vtr::vector<RRNodeId, RRSegmentId> node_segments_; /* Segment ids for each node */
/*
@ -878,8 +895,11 @@ class RRGraph {
vtr::vector<RRNodeId, std::unique_ptr<RREdgeId[]>> node_edges_;
/* Edge related data */
size_t num_edges_; /* Range of edge ids */
std::unordered_set<RREdgeId> invalid_edge_ids_; /* Invalid edge ids */
/* Range of edge ids, use the unsigned long as
* the number of edges could be >10 times larger than the number of nodes!
*/
unsigned long num_edges_;
std::unordered_set<RREdgeId> invalid_edge_ids_; /* Invalid edge ids */
vtr::vector<RREdgeId, RRNodeId> edge_src_nodes_;
vtr::vector<RREdgeId, RRNodeId> edge_sink_nodes_;
vtr::vector<RREdgeId, RRSwitchId> edge_switches_;

View File

@ -0,0 +1,83 @@
/****************************************************************************
* This file include most-utilized functions that manipulate on the
* RRGraph object
***************************************************************************/
#include "rr_graph_obj.h"
#include "rr_graph_obj_util.h"
/****************************************************************************
* Find the switches interconnecting two nodes
* Return a vector of switch ids
***************************************************************************/
std::vector<RRSwitchId> find_rr_graph_switches(const RRGraph& rr_graph,
const RRNodeId& from_node,
const RRNodeId& to_node) {
std::vector<RRSwitchId> switches;
std::vector<RREdgeId> edges = rr_graph.find_edges(from_node, to_node);
if (true == edges.empty()) {
/* edge is open, we return an empty vector of switches */
return switches;
}
/* Reach here, edge list is not empty, find switch id one by one
* and update the switch list
*/
for (auto edge : edges) {
switches.push_back(rr_graph.edge_switch(edge));
}
return switches;
}
/*********************************************************************
* Like the RRGraph.find_node() but returns all matching nodes,
* rather than just the first. This is particularly useful for getting all instances
* of a specific IPIN/OPIN at a specific gird tile (x,y) location.
**********************************************************************/
std::vector<RRNodeId> find_rr_graph_nodes(const RRGraph& rr_graph,
const int& x,
const int& y,
const t_rr_type& rr_type,
const int& ptc) {
std::vector<RRNodeId> indices;
if (rr_type == IPIN || rr_type == OPIN) {
//For pins we need to look at all the sides of the current grid tile
for (e_side side : SIDES) {
RRNodeId rr_node_index = rr_graph.find_node(x, y, rr_type, ptc, side);
if (rr_node_index != RRNodeId::INVALID()) {
indices.push_back(rr_node_index);
}
}
} else {
//Sides do not effect non-pins so there should only be one per ptc
RRNodeId rr_node_index = rr_graph.find_node(x, y, rr_type, ptc);
if (rr_node_index != RRNodeId::INVALID()) {
indices.push_back(rr_node_index);
}
}
return indices;
}
std::vector<RRNodeId> find_rr_graph_chan_nodes(const RRGraph& rr_graph,
const int& x,
const int& y,
const t_rr_type& rr_type) {
std::vector<RRNodeId> indices;
VTR_ASSERT(rr_type == CHANX || rr_type == CHANY);
for (short track = 0; track < rr_graph.chan_num_tracks(x, y, rr_type); ++track) {
RRNodeId rr_node_index = rr_graph.find_node(x, y, rr_type, track);
if (rr_node_index != RRNodeId::INVALID()) {
indices.push_back(rr_node_index);
}
}
return indices;
}

View File

@ -0,0 +1,27 @@
#ifndef RR_GRAPH_OBJ_UTIL_H
#define RR_GRAPH_OBJ_UTIL_H
/* Include header files which include data structures used by
* the function declaration
*/
#include <vector>
#include "rr_graph_obj.h"
/* Get node-to-node switches in a RRGraph */
std::vector<RRSwitchId> find_rr_graph_switches(const RRGraph& rr_graph,
const RRNodeId& from_node,
const RRNodeId& to_node);
std::vector<RRNodeId> find_rr_graph_nodes(const RRGraph& rr_graph,
const int& x,
const int& y,
const t_rr_type& rr_type,
const int& ptc);
std::vector<RRNodeId> find_rr_graph_chan_nodes(const RRGraph& rr_graph,
const int& x,
const int& y,
const t_rr_type& rr_type);
#endif

View File

@ -19,6 +19,9 @@ enum e_direction : unsigned char {
constexpr std::array<const char*, NUM_DIRECTIONS> DIRECTION_STRING = {{"INC_DIRECTION", "DEC_DIRECTION", "BI_DIRECTION", "NO_DIRECTION"}};
/* Xifan Tang - string used in describe_rr_node() and write_xml_rr_graph_obj() */
constexpr std::array<const char*, NUM_DIRECTIONS> DIRECTION_STRING_WRITE_XML = {{"INC_DIR", "DEC_DIR", "BI_DIR", "NO_DIR"}};
/* Type of a routing resource node. x-directed channel segment, *
* y-directed channel segment, input pin to a clb to pad, output *
* from a clb or pad (i.e. output pin of a net) and: *

View File

@ -1,30 +0,0 @@
/****************************************************************************
* This file include most-utilized functions that manipulate on the
* RRGraph object
***************************************************************************/
#include "rr_graph_util.h"
#include "rr_graph_obj.h"
/****************************************************************************
* Find the switches interconnecting two nodes
* Return a vector of switch ids
***************************************************************************/
std::vector<RRSwitchId> find_rr_graph_switches(const RRGraph& rr_graph,
const RRNodeId& from_node,
const RRNodeId& to_node) {
std::vector<RRSwitchId> switches;
std::vector<RREdgeId> edges = rr_graph.find_edges(from_node, to_node);
if (true == edges.empty()) {
/* edge is open, we return an empty vector of switches */
return switches;
}
/* Reach here, edge list is not empty, find switch id one by one
* and update the switch list
*/
for (auto edge : edges) {
switches.push_back(rr_graph.edge_switch(edge));
}
return switches;
}

View File

@ -1,15 +0,0 @@
#ifndef RR_GRAPH_UTIL_H
#define RR_GRAPH_UTIL_H
/* Include header files which include data structures used by
* the function declaration
*/
#include <vector>
#include "rr_graph_fwd.h"
/* Get node-to-node switches in a RRGraph */
std::vector<RRSwitchId> find_rr_graph_switches(const RRGraph& rr_graph,
const RRNodeId& from_node,
const RRNodeId& to_node);
#endif

View File

@ -55,8 +55,6 @@ void write_rr_graph_node(fstream &fp, const RRGraph& rr_graph) {
/* TODO: we should make it function full independent from device_ctx !!! */
auto& device_ctx = g_vpr_ctx.device();
std::array<const char*, NUM_DIRECTIONS> DIRECTION_STRING_WRITE_XML = {{"INC_DIR", "DEC_DIR", "BI_DIR", "NO_DIR"}};
fp << "\t<rr_nodes>" << endl;
for (auto node : rr_graph.nodes()) {

File diff suppressed because it is too large Load Diff

View File

@ -31,8 +31,7 @@ void free_draw_structs();
#ifndef NO_GRAPHICS
void draw_get_rr_pin_coords(int inode, float* xcen, float* ycen);
void draw_get_rr_pin_coords(const t_rr_node* node, float* xcen, float* ycen);
void draw_get_rr_pin_coords(const RRNodeId& inode, float* xcen, float* ycen);
void draw_triangle_along_line(ezgl::renderer* g, ezgl::point2d start, ezgl::point2d end, float relative_position = 1., float arrow_size = DEFAULT_ARROW_SIZE);
void draw_triangle_along_line(ezgl::renderer* g, ezgl::point2d loc, ezgl::point2d start, ezgl::point2d end, float arrow_size = DEFAULT_ARROW_SIZE);
@ -58,11 +57,11 @@ ezgl::color to_ezgl_color(vtr::Color<float> color);
void draw_screen();
// search bar related functions
ezgl::rectangle draw_get_rr_chan_bbox(int inode);
ezgl::rectangle draw_get_rr_chan_bbox(const RRNodeId& inode);
void draw_highlight_blocks_color(t_logical_block_type_ptr type, ClusterBlockId blk_id);
void highlight_nets(char* message, int hit_node);
void draw_highlight_fan_in_fan_out(const std::set<int>& nodes);
std::set<int> draw_expand_non_configurable_rr_nodes(int hit_node);
void highlight_nets(char* message, const RRNodeId& hit_node);
void draw_highlight_fan_in_fan_out(const std::set<RRNodeId>& nodes);
std::set<RRNodeId> draw_expand_non_configurable_rr_nodes(const RRNodeId& hit_node);
void deselect_all();
// toggle functions

View File

@ -172,7 +172,7 @@ struct t_draw_state {
e_route_type draw_route_type = GLOBAL;
char default_message[vtr::bufsize];
vtr::vector<ClusterNetId, ezgl::color> net_color;
t_draw_rr_node* draw_rr_node = nullptr;
vtr::vector<RRNodeId, t_draw_rr_node> draw_rr_node;
std::shared_ptr<const SetupTimingInfo> setup_timing_info;
const t_arch* arch_info = nullptr;
std::unique_ptr<const vtr::ColorMap> color_map = nullptr;

View File

@ -68,14 +68,14 @@ void search_and_highlight(GtkWidget* /*widget*/, ezgl::application* app) {
ss >> rr_node_id;
// valid rr node id check
if (rr_node_id < 0 || rr_node_id >= int(device_ctx.rr_nodes.size())) {
if (rr_node_id < 0 || rr_node_id >= int(device_ctx.rr_graph.nodes().size())) {
warning_dialog_box("Invalid RR Node ID");
app->refresh_drawing();
return;
}
highlight_rr_nodes(rr_node_id);
auto_zoom_rr_node(rr_node_id);
highlight_rr_nodes(RRNodeId(rr_node_id));
auto_zoom_rr_node(RRNodeId(rr_node_id));
}
else if (search_type == "Block ID") {
@ -125,12 +125,12 @@ void search_and_highlight(GtkWidget* /*widget*/, ezgl::application* app) {
app->refresh_drawing();
}
bool highlight_rr_nodes(int hit_node) {
bool highlight_rr_nodes(const RRNodeId& hit_node) {
t_draw_state* draw_state = get_draw_state_vars();
char message[250] = "";
if (hit_node != OPEN) {
if (hit_node != RRNodeId::INVALID()) {
auto nodes = draw_expand_non_configurable_rr_nodes(hit_node);
for (auto node : nodes) {
if (draw_state->draw_rr_node[node].color != ezgl::MAGENTA) {
@ -177,21 +177,21 @@ bool highlight_rr_nodes(int hit_node) {
return true;
}
void auto_zoom_rr_node(int rr_node_id) {
void auto_zoom_rr_node(const RRNodeId& rr_node_id) {
t_draw_coords* draw_coords = get_draw_coords_vars();
auto& device_ctx = g_vpr_ctx.device();
ezgl::rectangle rr_node;
// find the location of the node
switch (device_ctx.rr_nodes[rr_node_id].type()) {
switch (device_ctx.rr_graph.node_type(rr_node_id)) {
case IPIN:
case OPIN: {
int i = device_ctx.rr_nodes[rr_node_id].xlow();
int j = device_ctx.rr_nodes[rr_node_id].ylow();
int i = device_ctx.rr_graph.node_xlow(rr_node_id);
int j = device_ctx.rr_graph.node_ylow(rr_node_id);
t_physical_tile_type_ptr type = device_ctx.grid[i][j].type;
int width_offset = device_ctx.grid[i][j].width_offset;
int height_offset = device_ctx.grid[i][j].height_offset;
int ipin = device_ctx.rr_nodes[rr_node_id].ptc_num();
int ipin = device_ctx.rr_graph.node_ptc_num(rr_node_id);
float xcen, ycen;
int iside;

View File

@ -11,8 +11,8 @@
# include "draw_color.h"
void search_and_highlight(GtkWidget* /*widget*/, ezgl::application* app);
bool highlight_rr_nodes(int hit_node);
void auto_zoom_rr_node(int rr_node_id);
bool highlight_rr_nodes(const RRNodeId& hit_node);
void auto_zoom_rr_node(const RRNodeId& rr_node_id);
void highlight_blocks(ClusterBlockId clb_index);
void highlight_nets(ClusterNetId net_id);
void highlight_nets(std::string net_name);

View File

@ -34,6 +34,8 @@
#include "router_delay_profiling.h"
#include "place_delay_model.h"
#include "rr_graph_obj_util.h"
/*To compute delay between blocks we calculate the delay between */
/*different nodes in the FPGA. From this procedure we generate
* a lookup table which tells us the delay between different locations in*/
@ -113,8 +115,8 @@ static bool find_direct_connect_sample_locations(const t_direct_inf* direct,
t_physical_tile_type_ptr to_type,
int to_pin,
int to_pin_class,
int* src_rr,
int* sink_rr);
RRNodeId& src_rr,
RRNodeId& sink_rr);
static bool verify_delta_delays(const vtr::Matrix<float>& delta_delays);
@ -301,19 +303,25 @@ static float route_connection_delay(
auto best_driver_ptcs = get_best_classes(DRIVER, device_ctx.grid[source_x][source_y].type);
auto best_sink_ptcs = get_best_classes(RECEIVER, device_ctx.grid[sink_x][sink_y].type);
int src_width_offset = device_ctx.grid[source_x][source_y].width_offset;
int src_height_offset = device_ctx.grid[source_x][source_y].height_offset;
int sink_width_offset = device_ctx.grid[sink_x][sink_y].width_offset;
int sink_height_offset = device_ctx.grid[sink_x][sink_y].height_offset;
for (int driver_ptc : best_driver_ptcs) {
VTR_ASSERT(driver_ptc != OPEN);
int source_rr_node = get_rr_node_index(device_ctx.rr_node_indices, source_x, source_y, SOURCE, driver_ptc);
RRNodeId source_rr_node = device_ctx.rr_graph.find_node(source_x - src_width_offset, source_y - src_height_offset, SOURCE, driver_ptc);
VTR_ASSERT(source_rr_node != OPEN);
VTR_ASSERT(source_rr_node != RRNodeId::INVALID());
for (int sink_ptc : best_sink_ptcs) {
VTR_ASSERT(sink_ptc != OPEN);
int sink_rr_node = get_rr_node_index(device_ctx.rr_node_indices, sink_x, sink_y, SINK, sink_ptc);
RRNodeId sink_rr_node = device_ctx.rr_graph.find_node(sink_x - sink_width_offset, sink_y - sink_height_offset, SINK, sink_ptc);
VTR_ASSERT(sink_rr_node != OPEN);
VTR_ASSERT(sink_rr_node != RRNodeId::INVALID());
if (!measure_directconnect && directconnect_exists(source_rr_node, sink_rr_node)) {
//Skip if we shouldn't measure direct connects and a direct connect exists
@ -741,8 +749,8 @@ static bool find_direct_connect_sample_locations(const t_direct_inf* direct,
t_physical_tile_type_ptr to_type,
int to_pin,
int to_pin_class,
int* src_rr,
int* sink_rr) {
RRNodeId& src_rr,
RRNodeId& sink_rr) {
VTR_ASSERT(from_type != nullptr);
VTR_ASSERT(to_type != nullptr);
@ -765,10 +773,10 @@ static bool find_direct_connect_sample_locations(const t_direct_inf* direct,
//(with multi-width/height blocks pins may not exist at all locations)
bool from_pin_found = false;
if (direct->from_side != NUM_SIDES) {
int from_pin_rr = get_rr_node_index(device_ctx.rr_node_indices, from_x, from_y, OPIN, from_pin, direct->from_side);
from_pin_found = (from_pin_rr != OPEN);
RRNodeId from_pin_rr = device_ctx.rr_graph.find_node(from_x, from_y, OPIN, from_pin, direct->from_side);
from_pin_found = (from_pin_rr != RRNodeId::INVALID());
} else {
std::vector<int> from_pin_rrs = get_rr_node_indices(device_ctx.rr_node_indices, from_x, from_y, OPIN, from_pin);
std::vector<RRNodeId> from_pin_rrs = find_rr_graph_nodes(device_ctx.rr_graph, from_x, from_y, OPIN, from_pin);
from_pin_found = !from_pin_rrs.empty();
}
if (!from_pin_found) continue;
@ -782,10 +790,10 @@ static bool find_direct_connect_sample_locations(const t_direct_inf* direct,
//(with multi-width/height blocks pins may not exist at all locations)
bool to_pin_found = false;
if (direct->to_side != NUM_SIDES) {
int to_pin_rr = get_rr_node_index(device_ctx.rr_node_indices, to_x, to_y, IPIN, to_pin, direct->to_side);
to_pin_found = (to_pin_rr != OPEN);
RRNodeId to_pin_rr = device_ctx.rr_graph.find_node(to_x, to_y, IPIN, to_pin, direct->to_side);
to_pin_found = (to_pin_rr != RRNodeId::INVALID());
} else {
std::vector<int> to_pin_rrs = get_rr_node_indices(device_ctx.rr_node_indices, to_x, to_y, IPIN, to_pin);
std::vector<RRNodeId> to_pin_rrs = find_rr_graph_nodes(device_ctx.rr_graph, to_x, to_y, IPIN, to_pin);
to_pin_found = !to_pin_rrs.empty();
}
if (!to_pin_found) continue;
@ -822,14 +830,14 @@ static bool find_direct_connect_sample_locations(const t_direct_inf* direct,
//Find a source/sink RR node associated with the pins of the direct
//
auto source_rr_nodes = get_rr_node_indices(device_ctx.rr_node_indices, from_x, from_y, SOURCE, from_pin_class);
std::vector<RRNodeId> source_rr_nodes = find_rr_graph_nodes(device_ctx.rr_graph, from_x, from_y, SOURCE, from_pin_class);
VTR_ASSERT(source_rr_nodes.size() > 0);
auto sink_rr_nodes = get_rr_node_indices(device_ctx.rr_node_indices, to_x, to_y, SINK, to_pin_class);
std::vector<RRNodeId> sink_rr_nodes = find_rr_graph_nodes(device_ctx.rr_graph, to_x, to_y, SINK, to_pin_class);
VTR_ASSERT(sink_rr_nodes.size() > 0);
*src_rr = source_rr_nodes[0];
*sink_rr = sink_rr_nodes[0];
src_rr = source_rr_nodes[0];
sink_rr = sink_rr_nodes[0];
return true;
}
@ -884,7 +892,7 @@ void OverrideDelayModel::compute_override_delay_model(
//sampled_rr_pairs and skipping them if they occur multiple times.
int missing_instances = 0;
int missing_paths = 0;
std::set<std::pair<int, int>> sampled_rr_pairs;
std::set<std::pair<RRNodeId, RRNodeId>> sampled_rr_pairs;
for (int iconn = 0; iconn < num_conns; ++iconn) {
//Find the associated pins
int from_pin = find_pin(from_type, from_port.port_name(), from_port.port_low_index() + iconn);
@ -899,9 +907,9 @@ void OverrideDelayModel::compute_override_delay_model(
int to_pin_class = find_pin_class(to_type, to_port.port_name(), to_port.port_low_index() + iconn, RECEIVER);
VTR_ASSERT(to_pin_class != OPEN);
int src_rr = OPEN;
int sink_rr = OPEN;
bool found_sample_points = find_direct_connect_sample_locations(direct, from_type, from_pin, from_pin_class, to_type, to_pin, to_pin_class, &src_rr, &sink_rr);
RRNodeId src_rr = RRNodeId::INVALID();
RRNodeId sink_rr = RRNodeId::INVALID();
bool found_sample_points = find_direct_connect_sample_locations(direct, from_type, from_pin, from_pin_class, to_type, to_pin, to_pin_class, src_rr, sink_rr);
if (!found_sample_points) {
++missing_instances;
@ -912,8 +920,8 @@ void OverrideDelayModel::compute_override_delay_model(
//sampled the associated source/sink pair and don't need to do so again
if (sampled_rr_pairs.count({src_rr, sink_rr})) continue;
VTR_ASSERT(src_rr != OPEN);
VTR_ASSERT(sink_rr != OPEN);
VTR_ASSERT(src_rr != RRNodeId::INVALID());
VTR_ASSERT(sink_rr != RRNodeId::INVALID());
float direct_connect_delay = std::numeric_limits<float>::quiet_NaN();
bool found_routing_path = route_profiler.calculate_delay(src_rr, sink_rr, router_opts2, &direct_connect_delay);
@ -933,29 +941,29 @@ void OverrideDelayModel::compute_override_delay_model(
}
}
bool directconnect_exists(int src_rr_node, int sink_rr_node) {
bool directconnect_exists(RRNodeId src_rr_node, RRNodeId sink_rr_node) {
//Returns true if there is a directconnect between the two RR nodes
//
//This is checked by looking for a SOURCE -> OPIN -> IPIN -> SINK path
//which starts at src_rr_node and ends at sink_rr_node
auto& device_ctx = g_vpr_ctx.device();
auto& rr_nodes = device_ctx.rr_nodes;
auto& rr_graph = device_ctx.rr_graph;
VTR_ASSERT(rr_nodes[src_rr_node].type() == SOURCE && rr_nodes[sink_rr_node].type() == SINK);
VTR_ASSERT(rr_graph.node_type(src_rr_node) == SOURCE && rr_graph.node_type(sink_rr_node) == SINK);
//TODO: This is a constant depth search, but still may be too slow
for (t_edge_size i_src_edge = 0; i_src_edge < rr_nodes[src_rr_node].num_edges(); ++i_src_edge) {
int opin_rr_node = rr_nodes[src_rr_node].edge_sink_node(i_src_edge);
for (const RREdgeId& src_edge : rr_graph.node_out_edges(src_rr_node)) {
RRNodeId opin_rr_node = rr_graph.edge_sink_node(src_edge);
if (rr_nodes[opin_rr_node].type() != OPIN) continue;
if (rr_graph.node_type(opin_rr_node) != OPIN) continue;
for (t_edge_size i_opin_edge = 0; i_opin_edge < rr_nodes[opin_rr_node].num_edges(); ++i_opin_edge) {
int ipin_rr_node = rr_nodes[opin_rr_node].edge_sink_node(i_opin_edge);
for (const RREdgeId& opin_edge : rr_graph.node_out_edges(opin_rr_node)) {
RRNodeId ipin_rr_node = rr_graph.edge_sink_node(opin_edge);
if (rr_nodes[ipin_rr_node].type() != IPIN) continue;
if (rr_graph.node_type(ipin_rr_node) != IPIN) continue;
for (t_edge_size i_ipin_edge = 0; i_ipin_edge < rr_nodes[ipin_rr_node].num_edges(); ++i_ipin_edge) {
if (sink_rr_node == rr_nodes[ipin_rr_node].edge_sink_node(i_ipin_edge)) {
for (const RREdgeId& ipin_edge : rr_graph.node_out_edges(ipin_rr_node)) {
if (sink_rr_node == rr_graph.edge_sink_node(ipin_edge)) {
return true;
}
}

View File

@ -1,6 +1,7 @@
#ifndef TIMING_PLACE_LOOKUP_H
#define TIMING_PLACE_LOOKUP_H
#include "place_delay_model.h"
#include "rr_graph_obj.h"
std::unique_ptr<PlaceDelayModel> compute_place_delay_model(const t_placer_opts& placer_opts,
const t_router_opts& router_opts,
@ -11,6 +12,6 @@ std::unique_ptr<PlaceDelayModel> compute_place_delay_model(const t_placer_opts&
const int num_directs);
std::vector<int> get_best_classes(enum e_pin_type pintype, t_physical_tile_type_ptr type);
bool directconnect_exists(int src_rr_node, int sink_rr_node);
bool directconnect_exists(RRNodeId src_rr_node, RRNodeId sink_rr_node);
#endif

View File

@ -62,7 +62,7 @@ typedef enum {
} e_power_breakdown_entry_type;
/************************* File Scope **********************************/
static t_rr_node_power* rr_node_power;
static vtr::vector<RRNodeId, t_rr_node_power> rr_node_power;
/************************* Function Declarations ********************/
/* Routing */
@ -794,7 +794,7 @@ static void power_usage_routing(t_power_usage* power_usage,
power_ctx.commonly_used->total_cb_buffer_size = 0.;
/* Reset rr graph net indices */
for (size_t rr_node_idx = 0; rr_node_idx < device_ctx.rr_nodes.size(); rr_node_idx++) {
for (const RRNodeId& rr_node_idx : device_ctx.rr_graph.nodes()) {
rr_node_power[rr_node_idx].net_num = ClusterNetId::INVALID();
rr_node_power[rr_node_idx].num_inputs = 0;
rr_node_power[rr_node_idx].selected_input = 0;
@ -815,19 +815,20 @@ static void power_usage_routing(t_power_usage* power_usage,
t_trace* trace;
for (trace = route_ctx.trace[net_id].head; trace != nullptr; trace = trace->next) {
auto node = &device_ctx.rr_nodes[trace->index];
RRNodeId node = trace->index;
const RRGraph& rr_graph = device_ctx.rr_graph;
t_rr_node_power* node_power = &rr_node_power[trace->index];
if (node_power->visited) {
continue;
}
for (t_edge_size edge_idx = 0; edge_idx < node->num_edges(); edge_idx++) {
if (node->edge_sink_node(edge_idx) != OPEN) {
auto next_node = &device_ctx.rr_nodes[node->edge_sink_node(edge_idx)];
t_rr_node_power* next_node_power = &rr_node_power[node->edge_sink_node(edge_idx)];
for (const RREdgeId& edge_idx : rr_graph.node_out_edges(node)) {
if (rr_graph.edge_sink_node(edge_idx) != RRNodeId::INVALID()) {
RRNodeId next_node = rr_graph.edge_sink_node(edge_idx);
t_rr_node_power* next_node_power = &rr_node_power[next_node];
switch (next_node->type()) {
switch (rr_graph.node_type(next_node)) {
case CHANX:
case CHANY:
case IPIN:
@ -837,9 +838,9 @@ static void power_usage_routing(t_power_usage* power_usage,
next_node_power->in_dens[next_node_power->num_inputs] = clb_net_density(node_power->net_num);
next_node_power->in_prob[next_node_power->num_inputs] = clb_net_prob(node_power->net_num);
next_node_power->num_inputs++;
if (next_node_power->num_inputs > next_node->fan_in()) {
if (next_node_power->num_inputs > rr_graph.node_in_edges(next_node).size()) {
VTR_LOG("%d %d\n", next_node_power->num_inputs,
next_node->fan_in());
rr_graph.node_in_edges(next_node).size());
fflush(nullptr);
VTR_ASSERT(0);
}
@ -855,9 +856,9 @@ static void power_usage_routing(t_power_usage* power_usage,
}
/* Calculate power of all routing entities */
for (size_t rr_node_idx = 0; rr_node_idx < device_ctx.rr_nodes.size(); rr_node_idx++) {
for (const RRNodeId& rr_node_idx : device_ctx.rr_graph.nodes()) {
t_power_usage sub_power_usage;
auto node = &device_ctx.rr_nodes[rr_node_idx];
const RRGraph& rr_graph = device_ctx.rr_graph;
t_rr_node_power* node_power = &rr_node_power[rr_node_idx];
float C_wire;
float buffer_size;
@ -866,7 +867,7 @@ static void power_usage_routing(t_power_usage* power_usage,
//float C_per_seg_split;
int wire_length;
switch (node->type()) {
switch (rr_graph.node_type(rr_node_idx)) {
case SOURCE:
case SINK:
case OPIN:
@ -877,13 +878,13 @@ static void power_usage_routing(t_power_usage* power_usage,
* - Driver (accounted for at end of CHANX/Y - see below)
* - Multiplexor */
if (node->fan_in()) {
if (rr_graph.node_in_edges(rr_node_idx).size()) {
VTR_ASSERT(node_power->in_dens);
VTR_ASSERT(node_power->in_prob);
/* Multiplexor */
power_usage_mux_multilevel(&sub_power_usage,
power_get_mux_arch(node->fan_in(),
power_get_mux_arch(rr_graph.node_in_edges(rr_node_idx).size(),
power_ctx.arch->mux_transistor_size),
node_power->in_prob, node_power->in_dens,
node_power->selected_input, true,
@ -904,19 +905,19 @@ static void power_usage_routing(t_power_usage* power_usage,
VTR_ASSERT(node_power->in_prob);
wire_length = 0;
if (node->type() == CHANX) {
wire_length = node->xhigh() - node->xlow() + 1;
} else if (node->type() == CHANY) {
wire_length = node->yhigh() - node->ylow() + 1;
if (rr_graph.node_type(rr_node_idx) == CHANX) {
wire_length = rr_graph.node_xhigh(rr_node_idx) - rr_graph.node_xlow(rr_node_idx) + 1;
} else if (rr_graph.node_type(rr_node_idx) == CHANY) {
wire_length = rr_graph.node_yhigh(rr_node_idx) - rr_graph.node_ylow(rr_node_idx) + 1;
}
C_wire = wire_length
* segment_inf[device_ctx.rr_indexed_data[node->cost_index()].seg_index].Cmetal;
* segment_inf[device_ctx.rr_indexed_data[rr_graph.node_cost_index(rr_node_idx)].seg_index].Cmetal;
//(double)power_ctx.commonly_used->tile_length);
VTR_ASSERT(node_power->selected_input < node->fan_in());
VTR_ASSERT(node_power->selected_input < rr_graph.node_in_edges(rr_node_idx).size());
/* Multiplexor */
power_usage_mux_multilevel(&sub_power_usage,
power_get_mux_arch(node->fan_in(),
power_get_mux_arch(rr_graph.node_in_edges(rr_node_idx).size(),
power_ctx.arch->mux_transistor_size),
node_power->in_prob, node_power->in_dens,
node_power->selected_input, true, power_ctx.solution_inf.T_crit);
@ -979,10 +980,10 @@ static void power_usage_routing(t_power_usage* power_usage,
/* Determine types of switches that this wire drives */
connectionbox_fanout = 0;
switchbox_fanout = 0;
for (t_edge_size iedge = 0; iedge < node->num_edges(); iedge++) {
if (node->edge_switch(iedge) == routing_arch->wire_to_rr_ipin_switch) {
for (const RREdgeId& iedge : rr_graph.node_out_edges(rr_node_idx)) {
if ((short)size_t(rr_graph.edge_switch(iedge)) == routing_arch->wire_to_rr_ipin_switch) {
connectionbox_fanout++;
} else if (node->edge_switch(iedge) == routing_arch->delayless_switch) {
} else if ((short)size_t(rr_graph.edge_switch(iedge)) == routing_arch->delayless_switch) {
/* Do nothing */
} else {
switchbox_fanout++;
@ -1191,9 +1192,8 @@ void power_routing_init(const t_det_routing_arch* routing_arch) {
}
/* Initialize RR Graph Structures */
rr_node_power = (t_rr_node_power*)vtr::calloc(device_ctx.rr_nodes.size(),
sizeof(t_rr_node_power));
for (size_t rr_node_idx = 0; rr_node_idx < device_ctx.rr_nodes.size(); rr_node_idx++) {
rr_node_power.resize(device_ctx.rr_graph.nodes().size());
for (const RRNodeId& rr_node_idx : device_ctx.rr_graph.nodes()) {
rr_node_power[rr_node_idx].driver_switch_type = OPEN;
}
@ -1202,40 +1202,40 @@ void power_routing_init(const t_det_routing_arch* routing_arch) {
max_IPIN_fanin = 0;
max_seg_to_seg_fanout = 0;
max_seg_to_IPIN_fanout = 0;
for (size_t rr_node_idx = 0; rr_node_idx < device_ctx.rr_nodes.size(); rr_node_idx++) {
for (const RRNodeId& rr_node_idx : device_ctx.rr_graph.nodes()) {
int fanout_to_IPIN = 0;
int fanout_to_seg = 0;
auto node = &device_ctx.rr_nodes[rr_node_idx];
const RRGraph& rr_graph = device_ctx.rr_graph;
t_rr_node_power* node_power = &rr_node_power[rr_node_idx];
switch (node->type()) {
switch (rr_graph.node_type(rr_node_idx)) {
case IPIN:
max_IPIN_fanin = std::max(max_IPIN_fanin,
static_cast<int>(node->fan_in()));
max_fanin = std::max(max_fanin, static_cast<int>(node->fan_in()));
static_cast<int>(rr_graph.node_in_edges(rr_node_idx).size()));
max_fanin = std::max(max_fanin, static_cast<int>(rr_graph.node_in_edges(rr_node_idx).size()));
node_power->in_dens = (float*)vtr::calloc(node->fan_in(),
node_power->in_dens = (float*)vtr::calloc(rr_graph.node_in_edges(rr_node_idx).size(),
sizeof(float));
node_power->in_prob = (float*)vtr::calloc(node->fan_in(),
node_power->in_prob = (float*)vtr::calloc(rr_graph.node_in_edges(rr_node_idx).size(),
sizeof(float));
break;
case CHANX:
case CHANY:
for (t_edge_size iedge = 0; iedge < node->num_edges(); iedge++) {
if (node->edge_switch(iedge) == routing_arch->wire_to_rr_ipin_switch) {
for (const RREdgeId& iedge : rr_graph.node_out_edges(rr_node_idx)) {
if ((short)size_t(rr_graph.edge_switch(iedge)) == routing_arch->wire_to_rr_ipin_switch) {
fanout_to_IPIN++;
} else if (node->edge_switch(iedge) != routing_arch->delayless_switch) {
} else if ((short)size_t(rr_graph.edge_switch(iedge)) != routing_arch->delayless_switch) {
fanout_to_seg++;
}
}
max_seg_to_IPIN_fanout = std::max(max_seg_to_IPIN_fanout,
fanout_to_IPIN);
max_seg_to_seg_fanout = std::max(max_seg_to_seg_fanout, fanout_to_seg);
max_fanin = std::max(max_fanin, static_cast<int>(node->fan_in()));
max_fanin = std::max(max_fanin, static_cast<int>(rr_graph.node_in_edges(rr_node_idx).size()));
node_power->in_dens = (float*)vtr::calloc(node->fan_in(),
node_power->in_dens = (float*)vtr::calloc(rr_graph.node_in_edges(rr_node_idx).size(),
sizeof(float));
node_power->in_prob = (float*)vtr::calloc(node->fan_in(),
node_power->in_prob = (float*)vtr::calloc(rr_graph.node_in_edges(rr_node_idx).size(),
sizeof(float));
break;
default:
@ -1253,15 +1253,15 @@ void power_routing_init(const t_det_routing_arch* routing_arch) {
#endif
/* Populate driver switch type */
for (size_t rr_node_idx = 0; rr_node_idx < device_ctx.rr_nodes.size(); rr_node_idx++) {
auto node = &device_ctx.rr_nodes[rr_node_idx];
for (const RRNodeId& rr_node_idx : device_ctx.rr_graph.nodes()) {
const RRGraph& rr_graph = device_ctx.rr_graph;
for (t_edge_size edge_idx = 0; edge_idx < node->num_edges(); edge_idx++) {
if (node->edge_sink_node(edge_idx) != OPEN) {
if (rr_node_power[node->edge_sink_node(edge_idx)].driver_switch_type == OPEN) {
rr_node_power[node->edge_sink_node(edge_idx)].driver_switch_type = node->edge_switch(edge_idx);
for (const RREdgeId& edge_idx : rr_graph.node_out_edges(rr_node_idx)) {
if (rr_graph.edge_sink_node(edge_idx) != RRNodeId::INVALID()) {
if (rr_node_power[rr_graph.edge_sink_node(edge_idx)].driver_switch_type == OPEN) {
rr_node_power[rr_graph.edge_sink_node(edge_idx)].driver_switch_type = (short)size_t(rr_graph.edge_switch(edge_idx));
} else {
VTR_ASSERT(rr_node_power[node->edge_sink_node(edge_idx)].driver_switch_type == node->edge_switch(edge_idx));
VTR_ASSERT(rr_node_power[rr_graph.edge_sink_node(edge_idx)].driver_switch_type == (short)size_t(rr_graph.edge_switch(edge_idx)));
}
}
}
@ -1269,14 +1269,14 @@ void power_routing_init(const t_det_routing_arch* routing_arch) {
/* Find Max Fanout of Routing Buffer */
t_edge_size max_seg_fanout = 0;
for (size_t rr_node_idx = 0; rr_node_idx < device_ctx.rr_nodes.size(); rr_node_idx++) {
auto node = &device_ctx.rr_nodes[rr_node_idx];
for (const RRNodeId& rr_node_idx : device_ctx.rr_graph.nodes()) {
const RRGraph& rr_graph = device_ctx.rr_graph;
switch (node->type()) {
switch (rr_graph.node_type(rr_node_idx)) {
case CHANX:
case CHANY:
if (node->num_edges() > max_seg_fanout) {
max_seg_fanout = node->num_edges();
if (rr_graph.node_out_edges(rr_node_idx).size() > max_seg_fanout) {
max_seg_fanout = rr_graph.node_out_edges(rr_node_idx).size();
}
break;
default:
@ -1357,15 +1357,15 @@ bool power_uninit() {
auto& power_ctx = g_vpr_ctx.power();
bool error = false;
for (size_t rr_node_idx = 0; rr_node_idx < device_ctx.rr_nodes.size(); rr_node_idx++) {
auto node = &device_ctx.rr_nodes[rr_node_idx];
for (const RRNodeId& rr_node_idx : device_ctx.rr_graph.nodes()) {
const RRGraph& rr_graph = device_ctx.rr_graph;
t_rr_node_power* node_power = &rr_node_power[rr_node_idx];
switch (node->type()) {
switch (rr_graph.node_type(rr_node_idx)) {
case CHANX:
case CHANY:
case IPIN:
if (node->fan_in()) {
if (rr_graph.node_in_edges(rr_node_idx).size()) {
free(node_power->in_dens);
free(node_power->in_prob);
}
@ -1375,7 +1375,7 @@ bool power_uninit() {
break;
}
}
free(rr_node_power);
rr_node_power.clear();
/* Free mux architectures */
for (std::map<float, t_power_mux_info*>::iterator it = power_ctx.commonly_used->mux_info.begin();

View File

@ -17,13 +17,13 @@
#include "route_tree_timing.h"
/******************** Subroutines local to this module **********************/
static void check_node_and_range(int inode, enum e_route_type route_type);
static void check_source(int inode, ClusterNetId net_id);
static void check_sink(int inode, ClusterNetId net_id, bool* pin_done);
static void check_node_and_range(const RRNodeId& inode, enum e_route_type route_type);
static void check_source(const RRNodeId& inode, ClusterNetId net_id);
static void check_sink(const RRNodeId& inode, ClusterNetId net_id, bool* pin_done);
static void check_switch(t_trace* tptr, int num_switch);
static bool check_adjacent(int from_node, int to_node);
static int chanx_chany_adjacent(int chanx_node, int chany_node);
static void reset_flags(ClusterNetId inet, bool* connected_to_route);
static bool check_adjacent(const RRNodeId& from_node, const RRNodeId& to_node);
static int chanx_chany_adjacent(const RRNodeId& chanx_node, const RRNodeId& chany_node);
static void reset_flags(ClusterNetId inet, vtr::vector<RRNodeId, bool>& connected_to_route);
static void check_locally_used_clb_opins(const t_clb_opins_used& clb_opins_used_locally,
enum e_route_type route_type);
@ -39,10 +39,11 @@ void check_route(enum e_route_type route_type) {
* oversubscribed (the occupancy of everything is recomputed from *
* scratch). */
int max_pins, inode, prev_node;
int max_pins;
RRNodeId inode, prev_node;
unsigned int ipin;
bool valid, connects;
bool* connected_to_route; /* [0 .. device_ctx.rr_nodes.size()-1] */
vtr::vector<RRNodeId, bool> connected_to_route; /* [0 .. device_ctx.rr_nodes.size()-1] */
t_trace* tptr;
bool* pin_done;
@ -70,7 +71,7 @@ void check_route(enum e_route_type route_type) {
auto non_configurable_rr_sets = identify_non_configurable_rr_sets();
connected_to_route = (bool*)vtr::calloc(device_ctx.rr_nodes.size(), sizeof(bool));
connected_to_route.resize(device_ctx.rr_graph.nodes().size(), false);
max_pins = 0;
for (auto net_id : cluster_ctx.clb_nlist.nets())
@ -115,7 +116,7 @@ void check_route(enum e_route_type route_type) {
if (prev_switch == OPEN) { //Start of a new branch
if (connected_to_route[inode] == false) {
VPR_ERROR(VPR_ERROR_ROUTE,
"in check_route: node %d does not link into existing routing for net %d.\n", inode, size_t(net_id));
"in check_route: node %d does not link into existing routing for net %d.\n", size_t(inode), size_t(net_id));
}
} else { //Continuing along existing branch
connects = check_adjacent(prev_node, inode);
@ -131,7 +132,7 @@ void check_route(enum e_route_type route_type) {
connected_to_route[inode] = true; /* Mark as in path. */
if (device_ctx.rr_nodes[inode].type() == SINK) {
if (device_ctx.rr_graph.node_type(inode) == SINK) {
check_sink(inode, net_id, pin_done);
num_sinks += 1;
}
@ -165,24 +166,24 @@ void check_route(enum e_route_type route_type) {
} /* End for each net */
free(pin_done);
free(connected_to_route);
connected_to_route.clear();
VTR_LOG("Completed routing consistency check successfully.\n");
VTR_LOG("\n");
}
/* Checks that this SINK node is one of the terminals of inet, and marks *
* the appropriate pin as being reached. */
static void check_sink(int inode, ClusterNetId net_id, bool* pin_done) {
static void check_sink(const RRNodeId& inode, ClusterNetId net_id, bool* pin_done) {
auto& device_ctx = g_vpr_ctx.device();
auto& cluster_ctx = g_vpr_ctx.clustering();
auto& place_ctx = g_vpr_ctx.placement();
VTR_ASSERT(device_ctx.rr_nodes[inode].type() == SINK);
int i = device_ctx.rr_nodes[inode].xlow();
int j = device_ctx.rr_nodes[inode].ylow();
VTR_ASSERT(device_ctx.rr_graph.node_type(inode) == SINK);
int i = device_ctx.rr_graph.node_xlow(inode);
int j = device_ctx.rr_graph.node_ylow(inode);
auto type = device_ctx.grid[i][j].type;
/* For sinks, ptc_num is the class */
int ptc_num = device_ctx.rr_nodes[inode].ptc_num();
int ptc_num = device_ctx.rr_graph.node_ptc_num(inode);
int ifound = 0;
for (int iblk = 0; iblk < type->capacity; iblk++) {
@ -215,26 +216,26 @@ static void check_sink(int inode, ClusterNetId net_id, bool* pin_done) {
"in check_sink: node %d does not connect to any terminal of net %s #%lu.\n"
"This error is usually caused by incorrectly specified logical equivalence in your architecture file.\n"
"You should try to respecify what pins are equivalent or turn logical equivalence off.\n",
inode, cluster_ctx.clb_nlist.net_name(net_id).c_str(), size_t(net_id));
size_t(inode), cluster_ctx.clb_nlist.net_name(net_id).c_str(), size_t(net_id));
}
}
/* Checks that the node passed in is a valid source for this net. */
static void check_source(int inode, ClusterNetId net_id) {
static void check_source(const RRNodeId& inode, ClusterNetId net_id) {
auto& device_ctx = g_vpr_ctx.device();
auto& cluster_ctx = g_vpr_ctx.clustering();
auto& place_ctx = g_vpr_ctx.placement();
t_rr_type rr_type = device_ctx.rr_nodes[inode].type();
t_rr_type rr_type = device_ctx.rr_graph.node_type(inode);
if (rr_type != SOURCE) {
VPR_FATAL_ERROR(VPR_ERROR_ROUTE,
"in check_source: net %d begins with a node of type %d.\n", size_t(net_id), rr_type);
}
int i = device_ctx.rr_nodes[inode].xlow();
int j = device_ctx.rr_nodes[inode].ylow();
int i = device_ctx.rr_graph.node_xlow(inode);
int j = device_ctx.rr_graph.node_ylow(inode);
/* for sinks and sources, ptc_num is class */
int ptc_num = device_ctx.rr_nodes[inode].ptc_num();
int ptc_num = device_ctx.rr_graph.node_ptc_num(inode);
/* First node_block for net is the source */
ClusterBlockId blk_id = cluster_ctx.clb_nlist.net_driver_block(net_id);
auto type = device_ctx.grid[i][j].type;
@ -259,7 +260,7 @@ static void check_switch(t_trace* tptr, int num_switch) {
/* Checks that the switch leading from this traceback element to the next *
* one is a legal switch type. */
int inode;
RRNodeId inode;
short switch_type;
auto& device_ctx = g_vpr_ctx.device();
@ -267,12 +268,12 @@ static void check_switch(t_trace* tptr, int num_switch) {
inode = tptr->index;
switch_type = tptr->iswitch;
if (device_ctx.rr_nodes[inode].type() != SINK) {
if (device_ctx.rr_graph.node_type(inode) != SINK) {
if (switch_type >= num_switch) {
VPR_FATAL_ERROR(VPR_ERROR_ROUTE,
"in check_switch: rr_node %d left via switch type %d.\n"
"\tSwitch type is out of range.\n",
inode, switch_type);
size_t(inode), switch_type);
}
}
@ -283,19 +284,19 @@ static void check_switch(t_trace* tptr, int num_switch) {
if (switch_type != OPEN) {
VPR_FATAL_ERROR(VPR_ERROR_ROUTE,
"in check_switch: rr_node %d is a SINK, but attempts to use a switch of type %d.\n", inode, switch_type);
"in check_switch: rr_node %d is a SINK, but attempts to use a switch of type %d.\n", size_t(inode), switch_type);
}
}
}
static void reset_flags(ClusterNetId inet, bool* connected_to_route) {
static void reset_flags(ClusterNetId inet, vtr::vector<RRNodeId, bool>& connected_to_route) {
/* This routine resets the flags of all the channel segments contained *
* in the traceback of net inet to 0. This allows us to check the *
* next net for connectivity (and the default state of the flags *
* should always be zero after they have been used). */
t_trace* tptr;
int inode;
RRNodeId inode;
auto& route_ctx = g_vpr_ctx.routing();
@ -308,7 +309,7 @@ static void reset_flags(ClusterNetId inet, bool* connected_to_route) {
}
}
static bool check_adjacent(int from_node, int to_node) {
static bool check_adjacent(const RRNodeId& from_node, const RRNodeId& to_node) {
/* This routine checks if the rr_node to_node is reachable from from_node. *
* It returns true if is reachable and false if it is not. Check_node has *
* already been used to verify that both nodes are valid rr_nodes, so only *
@ -327,8 +328,8 @@ static bool check_adjacent(int from_node, int to_node) {
reached = false;
for (t_edge_size iconn = 0; iconn < device_ctx.rr_nodes[from_node].num_edges(); iconn++) {
if (device_ctx.rr_nodes[from_node].edge_sink_node(iconn) == to_node) {
for (const RREdgeId& iconn : device_ctx.rr_graph.node_out_edges(from_node)) {
if (device_ctx.rr_graph.edge_sink_node(iconn) == to_node) {
reached = true;
break;
}
@ -343,18 +344,18 @@ static bool check_adjacent(int from_node, int to_node) {
num_adj = 0;
from_type = device_ctx.rr_nodes[from_node].type();
from_xlow = device_ctx.rr_nodes[from_node].xlow();
from_ylow = device_ctx.rr_nodes[from_node].ylow();
from_xhigh = device_ctx.rr_nodes[from_node].xhigh();
from_yhigh = device_ctx.rr_nodes[from_node].yhigh();
from_ptc = device_ctx.rr_nodes[from_node].ptc_num();
to_type = device_ctx.rr_nodes[to_node].type();
to_xlow = device_ctx.rr_nodes[to_node].xlow();
to_ylow = device_ctx.rr_nodes[to_node].ylow();
to_xhigh = device_ctx.rr_nodes[to_node].xhigh();
to_yhigh = device_ctx.rr_nodes[to_node].yhigh();
to_ptc = device_ctx.rr_nodes[to_node].ptc_num();
from_type = device_ctx.rr_graph.node_type(from_node);
from_xlow = device_ctx.rr_graph.node_xlow(from_node);
from_ylow = device_ctx.rr_graph.node_ylow(from_node);
from_xhigh = device_ctx.rr_graph.node_xhigh(from_node);
from_yhigh = device_ctx.rr_graph.node_yhigh(from_node);
from_ptc = device_ctx.rr_graph.node_ptc_num(from_node);
to_type = device_ctx.rr_graph.node_type(to_node);
to_xlow = device_ctx.rr_graph.node_xlow(to_node);
to_ylow = device_ctx.rr_graph.node_ylow(to_node);
to_xhigh = device_ctx.rr_graph.node_xhigh(to_node);
to_yhigh = device_ctx.rr_graph.node_yhigh(to_node);
to_ptc = device_ctx.rr_graph.node_ptc_num(to_node);
switch (from_type) {
case SOURCE:
@ -411,8 +412,8 @@ static bool check_adjacent(int from_node, int to_node) {
if (to_type == IPIN) {
num_adj += 1; //adjacent
} else if (to_type == CHANX) {
from_xhigh = device_ctx.rr_nodes[from_node].xhigh();
to_xhigh = device_ctx.rr_nodes[to_node].xhigh();
from_xhigh = device_ctx.rr_graph.node_xhigh(from_node);
to_xhigh = device_ctx.rr_graph.node_xhigh(to_node);
if (from_ylow == to_ylow) {
/* UDSD Modification by WMF Begin */
/*For Fs > 3, can connect to overlapping wire segment */
@ -436,7 +437,7 @@ static bool check_adjacent(int from_node, int to_node) {
num_adj += chanx_chany_adjacent(from_node, to_node);
} else {
VPR_FATAL_ERROR(VPR_ERROR_ROUTE,
"in check_adjacent: %d and %d are not adjacent", from_node, to_node);
"in check_adjacent: %d and %d are not adjacent", size_t(from_node), size_t(to_node));
}
break;
@ -444,8 +445,8 @@ static bool check_adjacent(int from_node, int to_node) {
if (to_type == IPIN) {
num_adj += 1; //adjacent
} else if (to_type == CHANY) {
from_yhigh = device_ctx.rr_nodes[from_node].yhigh();
to_yhigh = device_ctx.rr_nodes[to_node].yhigh();
from_yhigh = device_ctx.rr_graph.node_yhigh(from_node);
to_yhigh = device_ctx.rr_graph.node_yhigh(to_node);
if (from_xlow == to_xlow) {
/* UDSD Modification by WMF Begin */
if (to_yhigh == from_ylow - 1 || from_yhigh == to_ylow - 1) {
@ -468,7 +469,7 @@ static bool check_adjacent(int from_node, int to_node) {
num_adj += chanx_chany_adjacent(to_node, from_node);
} else {
VPR_FATAL_ERROR(VPR_ERROR_ROUTE,
"in check_adjacent: %d and %d are not adjacent", from_node, to_node);
"in check_adjacent: %d and %d are not adjacent", size_t(from_node), size_t(to_node));
}
break;
@ -486,7 +487,7 @@ static bool check_adjacent(int from_node, int to_node) {
return false; //Should not reach here once thrown
}
static int chanx_chany_adjacent(int chanx_node, int chany_node) {
static int chanx_chany_adjacent(const RRNodeId& chanx_node, const RRNodeId& chany_node) {
/* Returns 1 if the specified CHANX and CHANY nodes are adjacent, 0 *
* otherwise. */
@ -495,13 +496,13 @@ static int chanx_chany_adjacent(int chanx_node, int chany_node) {
auto& device_ctx = g_vpr_ctx.device();
chanx_y = device_ctx.rr_nodes[chanx_node].ylow();
chanx_xlow = device_ctx.rr_nodes[chanx_node].xlow();
chanx_xhigh = device_ctx.rr_nodes[chanx_node].xhigh();
chanx_y = device_ctx.rr_graph.node_ylow(chanx_node);
chanx_xlow = device_ctx.rr_graph.node_xlow(chanx_node);
chanx_xhigh = device_ctx.rr_graph.node_xhigh(chanx_node);
chany_x = device_ctx.rr_nodes[chany_node].xlow();
chany_ylow = device_ctx.rr_nodes[chany_node].ylow();
chany_yhigh = device_ctx.rr_nodes[chany_node].yhigh();
chany_x = device_ctx.rr_graph.node_xlow(chany_node);
chany_ylow = device_ctx.rr_graph.node_ylow(chany_node);
chany_yhigh = device_ctx.rr_graph.node_yhigh(chany_node);
if (chany_ylow > chanx_y + 1 || chany_yhigh < chanx_y)
return (0);
@ -519,7 +520,8 @@ void recompute_occupancy_from_scratch() {
* brute force recompute from scratch that is useful for sanity checking.
*/
int inode, iclass, ipin, num_local_opins;
int iclass, ipin, num_local_opins;
RRNodeId inode;
t_trace* tptr;
auto& route_ctx = g_vpr_ctx.mutable_routing();
@ -528,8 +530,9 @@ void recompute_occupancy_from_scratch() {
/* First set the occupancy of everything to zero. */
for (size_t inode_idx = 0; inode_idx < device_ctx.rr_nodes.size(); inode_idx++)
for (const RRNodeId& inode_idx : device_ctx.rr_graph.nodes()) {
route_ctx.rr_node_route_inf[inode_idx].set_occ(0);
}
/* Now go through each net and count the tracks and pins used everywhere */
@ -575,7 +578,8 @@ static void check_locally_used_clb_opins(const t_clb_opins_used& clb_opins_used_
/* Checks that enough OPINs on CLBs have been set aside (used up) to make a *
* legal routing if subblocks connect to OPINs directly. */
int iclass, num_local_opins, inode, ipin;
int iclass, num_local_opins, ipin;
RRNodeId inode;
t_rr_type rr_type;
auto& cluster_ctx = g_vpr_ctx.clustering();
@ -592,35 +596,35 @@ static void check_locally_used_clb_opins(const t_clb_opins_used& clb_opins_used_
/* Now check that node is an OPIN of the right type. */
rr_type = device_ctx.rr_nodes[inode].type();
rr_type = device_ctx.rr_graph.node_type(inode);
if (rr_type != OPIN) {
VPR_FATAL_ERROR(VPR_ERROR_ROUTE,
"in check_locally_used_opins: block #%lu (%s)\n"
"\tClass %d local OPIN is wrong rr_type -- rr_node #%d of type %d.\n",
size_t(blk_id), cluster_ctx.clb_nlist.block_name(blk_id).c_str(), iclass, inode, rr_type);
size_t(blk_id), cluster_ctx.clb_nlist.block_name(blk_id).c_str(), iclass, size_t(inode), rr_type);
}
ipin = device_ctx.rr_nodes[inode].ptc_num();
ipin = device_ctx.rr_graph.node_ptc_num(inode);
if (physical_tile_type(blk_id)->pin_class[ipin] != iclass) {
VPR_FATAL_ERROR(VPR_ERROR_ROUTE,
"in check_locally_used_opins: block #%lu (%s):\n"
"\tExpected class %d local OPIN has class %d -- rr_node #: %d.\n",
size_t(blk_id), cluster_ctx.clb_nlist.block_name(blk_id).c_str(), iclass, physical_tile_type(blk_id)->pin_class[ipin], inode);
size_t(blk_id), cluster_ctx.clb_nlist.block_name(blk_id).c_str(), iclass, physical_tile_type(blk_id)->pin_class[ipin], size_t(inode));
}
}
}
}
}
static void check_node_and_range(int inode, enum e_route_type route_type) {
static void check_node_and_range(const RRNodeId& inode, enum e_route_type route_type) {
/* Checks that inode is within the legal range, then calls check_node to *
* check that everything else about the node is OK. */
auto& device_ctx = g_vpr_ctx.device();
if (inode < 0 || inode >= (int)device_ctx.rr_nodes.size()) {
if (false == device_ctx.rr_graph.valid_node_id(inode)) {
VPR_FATAL_ERROR(VPR_ERROR_ROUTE,
"in check_node_and_range: rr_node #%d is out of legal, range (0 to %d).\n", inode, device_ctx.rr_nodes.size() - 1);
"in check_node_and_range: rr_node #%d is out of legal, range (0 to %d).\n", size_t(inode), device_ctx.rr_graph.nodes().size() - 1);
}
check_rr_node(inode, route_type, device_ctx);
}
@ -637,16 +641,16 @@ static bool check_non_configurable_edges(ClusterNetId net, const t_non_configura
//Collect all the edges used by this net's routing
std::set<t_node_edge> routing_edges;
std::set<int> routing_nodes;
std::set<RRNodeId> routing_nodes;
for (t_trace* trace = head; trace != nullptr; trace = trace->next) {
int inode = trace->index;
RRNodeId inode = trace->index;
routing_nodes.insert(inode);
if (trace->iswitch == OPEN) {
continue; //End of branch
} else if (trace->next) {
int inode_next = trace->next->index;
RRNodeId inode_next = trace->next->index;
t_node_edge edge = {inode, inode_next};
@ -668,7 +672,7 @@ static bool check_non_configurable_edges(ClusterNetId net, const t_non_configura
//within a set is used by the routing
for (const auto& rr_nodes : non_configurable_rr_sets.node_sets) {
//Compute the intersection of the routing and current non-configurable nodes set
std::vector<int> intersection;
std::vector<RRNodeId> intersection;
std::set_intersection(routing_nodes.begin(), routing_nodes.end(),
rr_nodes.begin(), rr_nodes.end(),
std::back_inserter(intersection));
@ -683,7 +687,7 @@ static bool check_non_configurable_edges(ClusterNetId net, const t_non_configura
//Compute the difference to identify the missing nodes
//for detailed error reporting -- the nodes
//which are in rr_nodes but not in routing_nodes.
std::vector<int> difference;
std::vector<RRNodeId> difference;
std::set_difference(rr_nodes.begin(), rr_nodes.end(),
routing_nodes.begin(), routing_nodes.end(),
std::back_inserter(difference));
@ -762,7 +766,7 @@ static bool check_non_configurable_edges(ClusterNetId net, const t_non_configura
for (t_node_edge missing_edge : dedupped_difference) {
msg += vtr::string_fmt(" Expected RR Node: %d and RR Node: %d to be non-configurably connected, but edge missing from routing:\n",
missing_edge.from_node, missing_edge.to_node);
size_t(missing_edge.from_node), size_t(missing_edge.to_node));
msg += vtr::string_fmt(" %s\n", describe_rr_node(missing_edge.from_node).c_str());
msg += vtr::string_fmt(" %s\n", describe_rr_node(missing_edge.to_node).c_str());
}
@ -786,7 +790,7 @@ class StubFinder {
bool CheckNet(ClusterNetId net);
// Returns set of stub nodes.
const std::set<int>& stub_nodes() {
const std::set<RRNodeId>& stub_nodes() {
return stub_nodes_;
}
@ -796,7 +800,7 @@ class StubFinder {
// Set of stub nodes
// Note this is an ordered set so that node output is sorted by node
// id.
std::set<int> stub_nodes_;
std::set<RRNodeId> stub_nodes_;
};
//Cheks for stubs in a net's routing.
@ -813,7 +817,7 @@ void check_net_for_stubs(ClusterNetId net) {
auto& cluster_ctx = g_vpr_ctx.clustering();
std::string msg = vtr::string_fmt("Route tree for net '%s' (#%zu) contains stub branches rooted at:\n",
cluster_ctx.clb_nlist.net_name(net).c_str(), size_t(net));
for (int inode : stub_finder.stub_nodes()) {
for (const RRNodeId& inode : stub_finder.stub_nodes()) {
msg += vtr::string_fmt(" %s\n", describe_rr_node(inode).c_str());
}
@ -837,7 +841,7 @@ bool StubFinder::RecurseTree(t_rt_node* rt_root) {
if (rt_root->u.child_list == nullptr) {
//If a leaf of the route tree is not a SINK, then it is a stub
if (device_ctx.rr_nodes[rt_root->inode].type() != SINK) {
if (device_ctx.rr_graph.node_type(rt_root->inode) != SINK) {
return true; //It is the current root of this stub
} else {
return false;

View File

@ -10,13 +10,13 @@
/*********************** Subroutines local to this module *******************/
static bool rr_node_is_global_clb_ipin(int inode);
static bool rr_node_is_global_clb_ipin(const RRNodeId& inode);
static void check_unbuffered_edges(int from_node);
static void check_unbuffered_edges(const RRNodeId& from_node);
static bool has_adjacent_channel(const t_rr_node& node, const DeviceGrid& grid);
static bool has_adjacent_channel(const RRGraph& rr_graph, const RRNodeId& node, const DeviceGrid& grid);
static void check_rr_edge(int from_node, int from_edge, int to_node);
static void check_rr_edge(const RREdgeId& from_edge, const RRNodeId& to_node);
/************************ Subroutine definitions ****************************/
@ -30,69 +30,67 @@ void check_rr_graph(const t_graph_type graph_type,
auto& device_ctx = g_vpr_ctx.device();
auto total_edges_to_node = std::vector<int>(device_ctx.rr_nodes.size());
auto switch_types_from_current_to_node = std::vector<unsigned char>(device_ctx.rr_nodes.size());
auto total_edges_to_node = vtr::vector<RRNodeId, int>(device_ctx.rr_graph.nodes().size());
auto switch_types_from_current_to_node = vtr::vector<RRNodeId, unsigned char>(device_ctx.rr_graph.nodes().size());
const int num_rr_switches = device_ctx.rr_switch_inf.size();
for (size_t inode = 0; inode < device_ctx.rr_nodes.size(); inode++) {
device_ctx.rr_nodes[inode].validate();
for (const RRNodeId& inode : device_ctx.rr_graph.nodes()) {
/* Ignore any uninitialized rr_graph nodes */
if ((device_ctx.rr_nodes[inode].type() == SOURCE)
&& (device_ctx.rr_nodes[inode].xlow() == 0) && (device_ctx.rr_nodes[inode].ylow() == 0)
&& (device_ctx.rr_nodes[inode].xhigh() == 0) && (device_ctx.rr_nodes[inode].yhigh() == 0)) {
if ((device_ctx.rr_graph.node_type(inode) == SOURCE)
&& (device_ctx.rr_graph.node_xlow(inode) == 0) && (device_ctx.rr_graph.node_ylow(inode) == 0)
&& (device_ctx.rr_graph.node_xhigh(inode) == 0) && (device_ctx.rr_graph.node_yhigh(inode) == 0)) {
continue;
}
t_rr_type rr_type = device_ctx.rr_nodes[inode].type();
int num_edges = device_ctx.rr_nodes[inode].num_edges();
t_rr_type rr_type = device_ctx.rr_graph.node_type(inode);
check_rr_node(inode, route_type, device_ctx);
/* Check all the connectivity (edges, etc.) information. */
std::map<int, std::vector<int>> edges_from_current_to_node;
for (int iedge = 0; iedge < num_edges; iedge++) {
int to_node = device_ctx.rr_nodes[inode].edge_sink_node(iedge);
std::map<RRNodeId, std::vector<RREdgeId>> edges_from_current_to_node;
for (const RREdgeId& iedge : device_ctx.rr_graph.node_out_edges(inode)) {
RRNodeId to_node = device_ctx.rr_graph.edge_sink_node(iedge);
if (to_node < 0 || to_node >= (int)device_ctx.rr_nodes.size()) {
if (false == device_ctx.rr_graph.valid_node_id(to_node)) {
VPR_FATAL_ERROR(VPR_ERROR_ROUTE,
"in check_rr_graph: node %d has an edge %d.\n"
"\tEdge is out of range.\n",
inode, to_node);
size_t(inode), size_t(to_node));
}
check_rr_edge(inode, iedge, to_node);
check_rr_edge(iedge, to_node);
edges_from_current_to_node[to_node].push_back(iedge);
total_edges_to_node[to_node]++;
auto switch_type = device_ctx.rr_nodes[inode].edge_switch(iedge);
auto switch_type = size_t(device_ctx.rr_graph.edge_switch(iedge));
if (switch_type < 0 || switch_type >= num_rr_switches) {
if (switch_type >= (size_t)num_rr_switches) {
VPR_FATAL_ERROR(VPR_ERROR_ROUTE,
"in check_rr_graph: node %d has a switch type %d.\n"
"\tSwitch type is out of range.\n",
inode, switch_type);
size_t(inode), switch_type);
}
} /* End for all edges of node. */
//Check that multiple edges between the same from/to nodes make sense
for (int iedge = 0; iedge < num_edges; iedge++) {
int to_node = device_ctx.rr_nodes[inode].edge_sink_node(iedge);
for (const RREdgeId& iedge : device_ctx.rr_graph.node_out_edges(inode)) {
RRNodeId to_node = device_ctx.rr_graph.edge_sink_node(iedge);
if (edges_from_current_to_node[to_node].size() == 1) continue; //Single edges are always OK
VTR_ASSERT_MSG(edges_from_current_to_node[to_node].size() > 1, "Expect multiple edges");
t_rr_type to_rr_type = device_ctx.rr_nodes[to_node].type();
t_rr_type to_rr_type = device_ctx.rr_graph.node_type(to_node);
//Only expect chan <-> chan connections to have multiple edges
if ((to_rr_type != CHANX && to_rr_type != CHANY)
|| (rr_type != CHANX && rr_type != CHANY)) {
VPR_ERROR(VPR_ERROR_ROUTE,
"in check_rr_graph: node %d (%s) connects to node %d (%s) %zu times - multi-connections only expected for CHAN->CHAN.\n",
inode, rr_node_typename[rr_type], to_node, rr_node_typename[to_rr_type], edges_from_current_to_node[to_node].size());
size_t(inode), rr_node_typename[rr_type], size_t(to_node), rr_node_typename[to_rr_type], edges_from_current_to_node[to_node].size());
}
//Between two wire segments
@ -105,7 +103,7 @@ void check_rr_graph(const t_graph_type graph_type,
//Identify any such edges with identical switches
std::map<short, int> switch_counts;
for (auto edge : edges_from_current_to_node[to_node]) {
auto edge_switch = device_ctx.rr_nodes[inode].edge_switch(edge);
auto edge_switch = size_t(device_ctx.rr_graph.edge_switch(edge));
switch_counts[edge_switch]++;
}
@ -117,7 +115,7 @@ void check_rr_graph(const t_graph_type graph_type,
auto switch_type = device_ctx.rr_switch_inf[kv.first].type();
VPR_ERROR(VPR_ERROR_ROUTE, "in check_rr_graph: node %d has %d redundant connections to node %d of switch type %d (%s)",
inode, kv.second, to_node, kv.first, SWITCH_TYPE_STRINGS[size_t(switch_type)]);
size_t(inode), kv.second, size_t(to_node), kv.first, SWITCH_TYPE_STRINGS[size_t(switch_type)]);
}
}
@ -125,17 +123,17 @@ void check_rr_graph(const t_graph_type graph_type,
check_unbuffered_edges(inode);
//Check that all config/non-config edges are appropriately organized
for (auto edge : device_ctx.rr_nodes[inode].configurable_edges()) {
if (!device_ctx.rr_nodes[inode].edge_is_configurable(edge)) {
for (auto edge : device_ctx.rr_graph.node_configurable_out_edges(inode)) {
if (!device_ctx.rr_graph.edge_is_configurable(edge)) {
VPR_FATAL_ERROR(VPR_ERROR_ROUTE, "in check_rr_graph: node %d edge %d is non-configurable, but in configurable edges",
inode, edge);
size_t(inode), size_t(edge));
}
}
for (auto edge : device_ctx.rr_nodes[inode].non_configurable_edges()) {
if (device_ctx.rr_nodes[inode].edge_is_configurable(edge)) {
for (auto edge : device_ctx.rr_graph.node_non_configurable_out_edges(inode)) {
if (device_ctx.rr_graph.edge_is_configurable(edge)) {
VPR_FATAL_ERROR(VPR_ERROR_ROUTE, "in check_rr_graph: node %d edge %d is configurable, but in non-configurable edges",
inode, edge);
size_t(inode), size_t(edge));
}
}
@ -145,8 +143,8 @@ void check_rr_graph(const t_graph_type graph_type,
* now I check that everything is reachable. */
bool is_fringe_warning_sent = false;
for (size_t inode = 0; inode < device_ctx.rr_nodes.size(); inode++) {
t_rr_type rr_type = device_ctx.rr_nodes[inode].type();
for (const RRNodeId& inode : device_ctx.rr_graph.nodes()) {
t_rr_type rr_type = device_ctx.rr_graph.node_type(inode);
if (rr_type != SOURCE) {
if (total_edges_to_node[inode] < 1 && !rr_node_is_global_clb_ipin(inode)) {
@ -156,7 +154,7 @@ void check_rr_graph(const t_graph_type graph_type,
*/
bool is_chain = false;
if (rr_type == IPIN) {
t_physical_tile_type_ptr type = device_ctx.grid[device_ctx.rr_nodes[inode].xlow()][device_ctx.rr_nodes[inode].ylow()].type;
t_physical_tile_type_ptr type = device_ctx.grid[device_ctx.rr_graph.node_xlow(inode)][device_ctx.rr_graph.node_ylow(inode)].type;
for (const t_fc_specification& fc_spec : types[type->index].fc_specs) {
if (fc_spec.fc_value == 0 && fc_spec.seg_index == 0) {
is_chain = true;
@ -164,45 +162,43 @@ void check_rr_graph(const t_graph_type graph_type,
}
}
const auto& node = device_ctx.rr_nodes[inode];
bool is_fringe = ((device_ctx.rr_nodes[inode].xlow() == 1)
|| (device_ctx.rr_nodes[inode].ylow() == 1)
|| (device_ctx.rr_nodes[inode].xhigh() == int(grid.width()) - 2)
|| (device_ctx.rr_nodes[inode].yhigh() == int(grid.height()) - 2));
bool is_wire = (device_ctx.rr_nodes[inode].type() == CHANX
|| device_ctx.rr_nodes[inode].type() == CHANY);
bool is_fringe = ((device_ctx.rr_graph.node_xlow(inode) == 1)
|| (device_ctx.rr_graph.node_ylow(inode) == 1)
|| (device_ctx.rr_graph.node_xhigh(inode) == int(grid.width()) - 2)
|| (device_ctx.rr_graph.node_yhigh(inode) == int(grid.height()) - 2));
bool is_wire = (device_ctx.rr_graph.node_type(inode) == CHANX
|| device_ctx.rr_graph.node_type(inode) == CHANY);
if (!is_chain && !is_fringe && !is_wire) {
if (node.type() == IPIN || node.type() == OPIN) {
if (has_adjacent_channel(node, device_ctx.grid)) {
auto block_type = device_ctx.grid[node.xlow()][node.ylow()].type;
std::string pin_name = block_type_pin_index_to_name(block_type, node.pin_num());
if (device_ctx.rr_graph.node_type(inode) == IPIN || device_ctx.rr_graph.node_type(inode) == OPIN) {
if (has_adjacent_channel(device_ctx.rr_graph, inode, device_ctx.grid)) {
auto block_type = device_ctx.grid[device_ctx.rr_graph.node_xlow(inode)][device_ctx.rr_graph.node_ylow(inode)].type;
std::string pin_name = block_type_pin_index_to_name(block_type, device_ctx.rr_graph.node_pin_num(inode));
VTR_LOG_ERROR("in check_rr_graph: node %d (%s) at (%d,%d) block=%s side=%s pin=%s has no fanin.\n",
inode, node.type_string(), node.xlow(), node.ylow(), block_type->name, node.side_string(), pin_name.c_str());
size_t(inode), rr_node_typename[device_ctx.rr_graph.node_type(inode)], device_ctx.rr_graph.node_xlow(inode), device_ctx.rr_graph.node_ylow(inode), block_type->name, SIDE_STRING[device_ctx.rr_graph.node_side(inode)], pin_name.c_str());
}
} else {
VTR_LOG_ERROR("in check_rr_graph: node %d (%s) has no fanin.\n",
inode, device_ctx.rr_nodes[inode].type_string());
size_t(inode), rr_node_typename[device_ctx.rr_graph.node_type(inode)]);
}
} else if (!is_chain && !is_fringe_warning_sent) {
VTR_LOG_WARN(
"in check_rr_graph: fringe node %d %s at (%d,%d) has no fanin.\n"
"\t This is possible on a fringe node based on low Fc_out, N, and certain lengths.\n",
inode, device_ctx.rr_nodes[inode].type_string(), device_ctx.rr_nodes[inode].xlow(), device_ctx.rr_nodes[inode].ylow());
size_t(inode), rr_node_typename[device_ctx.rr_graph.node_type(inode)], device_ctx.rr_graph.node_xlow(inode), device_ctx.rr_graph.node_ylow(inode));
is_fringe_warning_sent = true;
}
}
} else { /* SOURCE. No fanin for now; change if feedthroughs allowed. */
if (total_edges_to_node[inode] != 0) {
VTR_LOG_ERROR("in check_rr_graph: SOURCE node %d has a fanin of %d, expected 0.\n",
inode, total_edges_to_node[inode]);
size_t(inode), total_edges_to_node[inode]);
}
}
}
}
static bool rr_node_is_global_clb_ipin(int inode) {
static bool rr_node_is_global_clb_ipin(const RRNodeId& inode) {
/* Returns true if inode refers to a global CLB input pin node. */
int ipin;
@ -210,17 +206,17 @@ static bool rr_node_is_global_clb_ipin(int inode) {
auto& device_ctx = g_vpr_ctx.device();
type = device_ctx.grid[device_ctx.rr_nodes[inode].xlow()][device_ctx.rr_nodes[inode].ylow()].type;
type = device_ctx.grid[device_ctx.rr_graph.node_xlow(inode)][device_ctx.rr_graph.node_ylow(inode)].type;
if (device_ctx.rr_nodes[inode].type() != IPIN)
if (device_ctx.rr_graph.node_type(inode) != IPIN)
return (false);
ipin = device_ctx.rr_nodes[inode].ptc_num();
ipin = device_ctx.rr_graph.node_ptc_num(inode);
return type->is_ignored_pin[ipin];
}
void check_rr_node(int inode, enum e_route_type route_type, const DeviceContext& device_ctx) {
void check_rr_node(const RRNodeId& inode, enum e_route_type route_type, const DeviceContext& device_ctx) {
/* This routine checks that the rr_node is inside the grid and has a valid
* pin number, etc.
*/
@ -231,14 +227,14 @@ void check_rr_node(int inode, enum e_route_type route_type, const DeviceContext&
int nodes_per_chan, tracks_per_node, num_edges, cost_index;
float C, R;
rr_type = device_ctx.rr_nodes[inode].type();
xlow = device_ctx.rr_nodes[inode].xlow();
xhigh = device_ctx.rr_nodes[inode].xhigh();
ylow = device_ctx.rr_nodes[inode].ylow();
yhigh = device_ctx.rr_nodes[inode].yhigh();
ptc_num = device_ctx.rr_nodes[inode].ptc_num();
capacity = device_ctx.rr_nodes[inode].capacity();
cost_index = device_ctx.rr_nodes[inode].cost_index();
rr_type = device_ctx.rr_graph.node_type(inode);
xlow = device_ctx.rr_graph.node_xlow(inode);
xhigh = device_ctx.rr_graph.node_xhigh(inode);
ylow = device_ctx.rr_graph.node_ylow(inode);
yhigh = device_ctx.rr_graph.node_yhigh(inode);
ptc_num = device_ctx.rr_graph.node_ptc_num(inode);
capacity = device_ctx.rr_graph.node_capacity(inode);
cost_index = device_ctx.rr_graph.node_cost_index(inode);
type = nullptr;
const auto& grid = device_ctx.grid;
@ -413,7 +409,7 @@ void check_rr_node(int inode, enum e_route_type route_type, const DeviceContext&
}
/* Check that the number of (out) edges is reasonable. */
num_edges = device_ctx.rr_nodes[inode].num_edges();
num_edges = device_ctx.rr_graph.node_out_edges(inode).size();
if (rr_type != SINK && rr_type != IPIN) {
if (num_edges <= 0) {
@ -424,7 +420,7 @@ void check_rr_node(int inode, enum e_route_type route_type, const DeviceContext&
//Don't worry about disconnect PINs which have no adjacent channels (i.e. on the device perimeter)
bool check_for_out_edges = true;
if (rr_type == IPIN || rr_type == OPIN) {
if (!has_adjacent_channel(device_ctx.rr_nodes[inode], device_ctx.grid)) {
if (!has_adjacent_channel(device_ctx.rr_graph, inode, device_ctx.grid)) {
check_for_out_edges = false;
}
}
@ -437,52 +433,49 @@ void check_rr_node(int inode, enum e_route_type route_type, const DeviceContext&
} else if (rr_type == SINK) { /* SINK -- remove this check if feedthroughs allowed */
if (num_edges != 0) {
VPR_FATAL_ERROR(VPR_ERROR_ROUTE,
"in check_rr_node: node %d is a sink, but has %d edges.\n", inode, num_edges);
"in check_rr_node: node %d is a sink, but has %d edges.\n", size_t(inode), num_edges);
}
}
/* Check that the capacitance and resistance are reasonable. */
C = device_ctx.rr_nodes[inode].C();
R = device_ctx.rr_nodes[inode].R();
C = device_ctx.rr_graph.node_C(inode);
R = device_ctx.rr_graph.node_R(inode);
if (rr_type == CHANX || rr_type == CHANY) {
if (C < 0. || R < 0.) {
VPR_ERROR(VPR_ERROR_ROUTE,
"in check_rr_node: node %d of type %d has R = %g and C = %g.\n", inode, rr_type, R, C);
"in check_rr_node: node %d of type %d has R = %g and C = %g.\n", size_t(inode), rr_type, R, C);
}
} else {
if (C != 0. || R != 0.) {
VPR_ERROR(VPR_ERROR_ROUTE,
"in check_rr_node: node %d of type %d has R = %g and C = %g.\n", inode, rr_type, R, C);
"in check_rr_node: node %d of type %d has R = %g and C = %g.\n", size_t(inode), rr_type, R, C);
}
}
}
static void check_unbuffered_edges(int from_node) {
static void check_unbuffered_edges(const RRNodeId& from_node) {
/* This routine checks that all pass transistors in the routing truly are *
* bidirectional. It may be a slow check, so don't use it all the time. */
int from_edge, to_node, to_edge, from_num_edges, to_num_edges;
t_rr_type from_rr_type, to_rr_type;
short from_switch_type;
bool trans_matched;
auto& device_ctx = g_vpr_ctx.device();
from_rr_type = device_ctx.rr_nodes[from_node].type();
from_rr_type = device_ctx.rr_graph.node_type(from_node);
if (from_rr_type != CHANX && from_rr_type != CHANY)
return;
from_num_edges = device_ctx.rr_nodes[from_node].num_edges();
for (from_edge = 0; from_edge < from_num_edges; from_edge++) {
to_node = device_ctx.rr_nodes[from_node].edge_sink_node(from_edge);
to_rr_type = device_ctx.rr_nodes[to_node].type();
for (const RREdgeId& from_edge : device_ctx.rr_graph.node_out_edges(from_node)) {
RRNodeId to_node = device_ctx.rr_graph.edge_sink_node(from_edge);
to_rr_type = device_ctx.rr_graph.node_type(to_node);
if (to_rr_type != CHANX && to_rr_type != CHANY)
continue;
from_switch_type = device_ctx.rr_nodes[from_node].edge_switch(from_edge);
from_switch_type = size_t(device_ctx.rr_graph.edge_switch(from_edge));
if (device_ctx.rr_switch_inf[from_switch_type].buffered())
continue;
@ -491,12 +484,11 @@ static void check_unbuffered_edges(int from_node) {
* check that there is a corresponding edge from to_node back to *
* from_node. */
to_num_edges = device_ctx.rr_nodes[to_node].num_edges();
trans_matched = false;
for (to_edge = 0; to_edge < to_num_edges; to_edge++) {
if (device_ctx.rr_nodes[to_node].edge_sink_node(to_edge) == from_node
&& device_ctx.rr_nodes[to_node].edge_switch(to_edge) == from_switch_type) {
for (const RREdgeId& to_edge : device_ctx.rr_graph.node_out_edges(to_node)) {
if (device_ctx.rr_graph.edge_sink_node(to_edge) == from_node
&& (short)size_t(device_ctx.rr_graph.edge_switch(to_edge)) == from_switch_type) {
trans_matched = true;
break;
}
@ -507,33 +499,33 @@ static void check_unbuffered_edges(int from_node) {
"in check_unbuffered_edges:\n"
"connection from node %d to node %d uses an unbuffered switch (switch type %d '%s')\n"
"but there is no corresponding unbuffered switch edge in the other direction.\n",
from_node, to_node, from_switch_type, device_ctx.rr_switch_inf[from_switch_type].name);
size_t(from_node), size_t(to_node), from_switch_type, device_ctx.rr_switch_inf[from_switch_type].name);
}
} /* End for all from_node edges */
}
static bool has_adjacent_channel(const t_rr_node& node, const DeviceGrid& grid) {
VTR_ASSERT(node.type() == IPIN || node.type() == OPIN);
static bool has_adjacent_channel(const RRGraph& rr_graph, const RRNodeId& node, const DeviceGrid& grid) {
VTR_ASSERT(rr_graph.node_type(node) == IPIN || rr_graph.node_type(node) == OPIN);
if ((node.xlow() == 0 && node.side() != RIGHT) //left device edge connects only along block's right side
|| (node.ylow() == int(grid.height() - 1) && node.side() != BOTTOM) //top device edge connects only along block's bottom side
|| (node.xlow() == int(grid.width() - 1) && node.side() != LEFT) //right deivce edge connects only along block's left side
|| (node.ylow() == 0 && node.side() != TOP) //bottom deivce edge connects only along block's top side
if ((rr_graph.node_xlow(node) == 0 && rr_graph.node_side(node) != RIGHT) //left device edge connects only along block's right side
|| (rr_graph.node_ylow(node) == int(grid.height() - 1) && rr_graph.node_side(node) != BOTTOM) //top device edge connects only along block's bottom side
|| (rr_graph.node_xlow(node) == int(grid.width() - 1) && rr_graph.node_side(node) != LEFT) //right deivce edge connects only along block's left side
|| (rr_graph.node_ylow(node) == 0 && rr_graph.node_side(node) != TOP) //bottom deivce edge connects only along block's top side
) {
return false;
}
return true; //All other blocks will be surrounded on all sides by channels
}
static void check_rr_edge(int from_node, int iedge, int to_node) {
static void check_rr_edge(const RREdgeId& iedge, const RRNodeId& to_node) {
auto& device_ctx = g_vpr_ctx.device();
//Check that to to_node's fan-in is correct, given the switch type
int iswitch = device_ctx.rr_nodes[from_node].edge_switch(iedge);
int iswitch = (int)size_t(device_ctx.rr_graph.edge_switch(iedge));
auto switch_type = device_ctx.rr_switch_inf[iswitch].type();
int to_fanin = device_ctx.rr_nodes[to_node].fan_in();
int to_fanin = device_ctx.rr_graph.node_in_edges(to_node).size();
switch (switch_type) {
case SwitchType::BUFFER:
//Buffer switches are non-configurable, and uni-directional -- they must have only one driver

View File

@ -6,6 +6,6 @@ void check_rr_graph(const t_graph_type graph_type,
const DeviceGrid& grid,
const std::vector<t_physical_tile_type>& types);
void check_rr_node(int inode, enum e_route_type route_type, const DeviceContext& device_ctx);
void check_rr_node(const RRNodeId& inode, enum e_route_type route_type, const DeviceContext& device_ctx);
#endif

View File

@ -2,6 +2,7 @@
#include "globals.h"
#include "rr_graph2.h"
#include "rr_graph_obj_util.h"
#include "vtr_assert.h"
#include "vtr_log.h"
@ -46,17 +47,16 @@ void RoutingToClockConnection::create_switches(const ClockRRGraphBuilder& clock_
std::srand(seed);
auto& device_ctx = g_vpr_ctx.mutable_device();
auto& rr_nodes = device_ctx.rr_nodes;
auto& rr_node_indices = device_ctx.rr_node_indices;
auto& rr_graph = device_ctx.rr_graph;
int virtual_clock_network_root_idx = create_virtual_clock_network_sink_node(switch_location.x, switch_location.y);
RRNodeId virtual_clock_network_root_idx = create_virtual_clock_network_sink_node(switch_location.x, switch_location.y);
device_ctx.virtual_clock_network_root_idx = virtual_clock_network_root_idx;
// rr_node indices for x and y channel routing wires and clock wires to connect to
auto x_wire_indices = get_rr_node_chan_wires_at_location(
rr_node_indices, CHANX, switch_location.x, switch_location.y);
auto y_wire_indices = get_rr_node_chan_wires_at_location(
rr_node_indices, CHANY, switch_location.x, switch_location.y);
auto x_wire_indices = find_rr_graph_chan_nodes(
rr_graph, switch_location.x, switch_location.y, CHANX);
auto y_wire_indices = find_rr_graph_chan_nodes(
rr_graph, switch_location.x, switch_location.y, CHANY);
auto clock_indices = clock_graph.get_rr_node_indices_at_switch_location(
clock_to_connect_to, switch_point_name, switch_location.x, switch_location.y);
@ -68,36 +68,40 @@ void RoutingToClockConnection::create_switches(const ClockRRGraphBuilder& clock_
// Connect to x-channel wires
unsigned num_wires_x = x_wire_indices.size() * fc;
for (size_t i = 0; i < num_wires_x; i++) {
rr_nodes[x_wire_indices[i]].add_edge(clock_index, rr_switch_idx);
rr_graph.create_edge(x_wire_indices[i], clock_index, RRSwitchId(rr_switch_idx), true);
}
// Connect to y-channel wires
unsigned num_wires_y = y_wire_indices.size() * fc;
for (size_t i = 0; i < num_wires_y; i++) {
rr_nodes[y_wire_indices[i]].add_edge(clock_index, rr_switch_idx);
rr_graph.create_edge(y_wire_indices[i], clock_index, RRSwitchId(rr_switch_idx), true);
}
// Connect to virtual clock sink node
// used by the two stage router
rr_nodes[clock_index].add_edge(virtual_clock_network_root_idx, rr_switch_idx);
rr_graph.create_edge(clock_index, virtual_clock_network_root_idx, RRSwitchId(rr_switch_idx), true);
}
}
int RoutingToClockConnection::create_virtual_clock_network_sink_node(
RRNodeId RoutingToClockConnection::create_virtual_clock_network_sink_node(
int x,
int y) {
auto& device_ctx = g_vpr_ctx.mutable_device();
auto& rr_nodes = device_ctx.rr_nodes;
rr_nodes.emplace_back();
auto node_index = rr_nodes.size() - 1;
auto& rr_graph = device_ctx.rr_graph;
rr_nodes[node_index].set_coordinates(x, y, x, y);
rr_nodes[node_index].set_capacity(1);
rr_nodes[node_index].set_cost_index(SINK_COST_INDEX);
rr_nodes[node_index].set_type(SINK);
RRNodeId node_index = rr_graph.create_node(SINK);
rr_graph.set_node_bounding_box(node_index, vtr::Rect<short>(x, y, x, y));
rr_graph.set_node_capacity(node_index, 1);
rr_graph.set_node_cost_index(node_index, SINK_COST_INDEX);
float R = 0.;
float C = 0.;
rr_nodes[node_index].set_rc_index(find_create_rr_rc_data(R, C));
rr_graph.set_node_rc_data_index(node_index, find_create_rr_rc_data(R, C));
/* Set a ptc_num here as RRGraph object does need it to build fast look-up
* Legacy codes fail to do this!
*/
rr_graph.set_node_ptc_num(node_index, device_ctx.grid[x][y].type->num_class);
return node_index;
}
@ -137,7 +141,7 @@ void ClockToClockConneciton::set_fc_val(float fc_val) {
void ClockToClockConneciton::create_switches(const ClockRRGraphBuilder& clock_graph) {
auto& device_ctx = g_vpr_ctx.mutable_device();
auto& grid = device_ctx.grid;
auto& rr_nodes = device_ctx.rr_nodes;
auto& rr_graph = device_ctx.rr_graph;
auto to_locations = clock_graph.get_switch_locations(to_clock, to_switch);
@ -179,7 +183,7 @@ void ClockToClockConneciton::create_switches(const ClockRRGraphBuilder& clock_gr
if (from_itter == from_rr_node_indices.end()) {
from_itter = from_rr_node_indices.begin();
}
rr_nodes[*from_itter].add_edge(to_index, rr_switch_idx);
rr_graph.create_edge(*from_itter, to_index, RRSwitchId(rr_switch_idx), true);
from_itter++;
}
}
@ -213,8 +217,7 @@ void ClockToPinsConnection::set_fc_val(float fc_val) {
void ClockToPinsConnection::create_switches(const ClockRRGraphBuilder& clock_graph) {
auto& device_ctx = g_vpr_ctx.mutable_device();
auto& rr_nodes = device_ctx.rr_nodes;
auto& rr_node_indices = device_ctx.rr_node_indices;
auto& rr_graph = device_ctx.rr_graph;
auto& grid = device_ctx.grid;
for (size_t x = 0; x < grid.width(); x++) {
@ -274,8 +277,7 @@ void ClockToPinsConnection::create_switches(const ClockRRGraphBuilder& clock_gra
clock_y_offset = -1; // pick the chanx below the block
}
auto clock_pin_node_idx = get_rr_node_index(
rr_node_indices,
auto clock_pin_node_idx = rr_graph.find_node(
x,
y,
IPIN,
@ -290,7 +292,7 @@ void ClockToPinsConnection::create_switches(const ClockRRGraphBuilder& clock_gra
//Create edges depending on Fc
for (size_t i = 0; i < clock_network_indices.size() * fc; i++) {
rr_nodes[clock_network_indices[i]].add_edge(clock_pin_node_idx, rr_switch_idx);
rr_graph.create_edge(clock_network_indices[i], clock_pin_node_idx, RRSwitchId(rr_switch_idx), true);
}
}
}

View File

@ -54,7 +54,7 @@ class RoutingToClockConnection : public ClockConnection {
*/
/* Connects the inter-block routing to the clock source at the specified coordinates */
void create_switches(const ClockRRGraphBuilder& clock_graph);
int create_virtual_clock_network_sink_node(int x, int y);
RRNodeId create_virtual_clock_network_sink_node(int x, int y);
};
class ClockToClockConneciton : public ClockConnection {

View File

@ -180,7 +180,7 @@ void ClockRib::create_rr_nodes_and_internal_edges_for_one_instance(ClockRRGraphB
(void)num_segments;
auto& device_ctx = g_vpr_ctx.mutable_device();
auto& rr_nodes = device_ctx.rr_nodes;
auto& rr_graph = device_ctx.rr_graph;
auto& grid = device_ctx.grid;
int ptc_num = clock_graph.get_and_increment_chanx_ptc_num(); // used for drawing
@ -231,7 +231,7 @@ void ClockRib::create_rr_nodes_and_internal_edges_for_one_instance(ClockRRGraphB
y,
ptc_num,
BI_DIRECTION,
rr_nodes);
rr_graph);
clock_graph.add_switch_location(get_name(), drive.name, drive_x, y, drive_node_idx);
// create rib wire to the right and left of the drive point
@ -240,13 +240,13 @@ void ClockRib::create_rr_nodes_and_internal_edges_for_one_instance(ClockRRGraphB
y,
ptc_num,
DEC_DIRECTION,
rr_nodes);
rr_graph);
auto right_node_idx = create_chanx_wire(drive_x + 1,
x_end,
y,
ptc_num,
INC_DIRECTION,
rr_nodes);
rr_graph);
record_tap_locations(x_start + x_offset,
x_end,
y,
@ -255,28 +255,26 @@ void ClockRib::create_rr_nodes_and_internal_edges_for_one_instance(ClockRRGraphB
clock_graph);
// connect drive point to each half rib using a directed switch
rr_nodes[drive_node_idx].add_edge(left_node_idx, drive.switch_idx);
rr_nodes[drive_node_idx].add_edge(right_node_idx, drive.switch_idx);
rr_graph.create_edge(drive_node_idx, left_node_idx, RRSwitchId(drive.switch_idx), true);
rr_graph.create_edge(drive_node_idx, right_node_idx, RRSwitchId(drive.switch_idx), true);
}
}
}
int ClockRib::create_chanx_wire(int x_start,
int x_end,
int y,
int ptc_num,
e_direction direction,
std::vector<t_rr_node>& rr_nodes) {
rr_nodes.emplace_back();
auto node_index = rr_nodes.size() - 1;
RRNodeId ClockRib::create_chanx_wire(int x_start,
int x_end,
int y,
int ptc_num,
e_direction direction,
RRGraph& rr_graph) {
RRNodeId node_index = rr_graph.create_node(CHANX);
rr_nodes[node_index].set_coordinates(x_start, y, x_end, y);
rr_nodes[node_index].set_type(CHANX);
rr_nodes[node_index].set_capacity(1);
rr_nodes[node_index].set_track_num(ptc_num);
rr_nodes[node_index].set_rc_index(find_create_rr_rc_data(
rr_graph.set_node_bounding_box(node_index, vtr::Rect<short>(x_start, y, x_end, y));
rr_graph.set_node_capacity(node_index, 1);
rr_graph.set_node_track_num(node_index, ptc_num);
rr_graph.set_node_rc_data_index(node_index, find_create_rr_rc_data(
x_chan_wire.layer.r_metal, x_chan_wire.layer.c_metal));
rr_nodes[node_index].set_direction(direction);
rr_graph.set_node_direction(node_index, direction);
short seg_index = 0;
switch (direction) {
@ -293,7 +291,7 @@ int ClockRib::create_chanx_wire(int x_start,
VTR_ASSERT_MSG(false, "Unidentified direction type for clock rib");
break;
}
rr_nodes[node_index].set_cost_index(CHANX_COST_INDEX_START + seg_index); // Actual value set later
rr_graph.set_node_cost_index(node_index, CHANX_COST_INDEX_START + seg_index); // Actual value set later
return node_index;
}
@ -301,8 +299,8 @@ int ClockRib::create_chanx_wire(int x_start,
void ClockRib::record_tap_locations(unsigned x_start,
unsigned x_end,
unsigned y,
int left_rr_node_idx,
int right_rr_node_idx,
const RRNodeId& left_rr_node_idx,
const RRNodeId& right_rr_node_idx,
ClockRRGraphBuilder& clock_graph) {
for (unsigned x = x_start + tap.offset; x <= x_end; x += tap.increment) {
if (x < (x_start + drive.offset - 1)) {
@ -422,7 +420,7 @@ void ClockSpine::create_segments(std::vector<t_segment_inf>& segment_inf) {
void ClockSpine::create_rr_nodes_and_internal_edges_for_one_instance(ClockRRGraphBuilder& clock_graph,
int num_segments) {
auto& device_ctx = g_vpr_ctx.mutable_device();
auto& rr_nodes = device_ctx.rr_nodes;
auto& rr_graph = device_ctx.rr_graph;
auto& grid = device_ctx.grid;
int ptc_num = clock_graph.get_and_increment_chany_ptc_num(); // used for drawing
@ -473,7 +471,7 @@ void ClockSpine::create_rr_nodes_and_internal_edges_for_one_instance(ClockRRGrap
x,
ptc_num,
BI_DIRECTION,
rr_nodes,
rr_graph,
num_segments);
clock_graph.add_switch_location(get_name(), drive.name, x, drive_y, drive_node_idx);
@ -483,14 +481,14 @@ void ClockSpine::create_rr_nodes_and_internal_edges_for_one_instance(ClockRRGrap
x,
ptc_num,
DEC_DIRECTION,
rr_nodes,
rr_graph,
num_segments);
auto right_node_idx = create_chany_wire(drive_y + 1,
y_end,
x,
ptc_num,
INC_DIRECTION,
rr_nodes,
rr_graph,
num_segments);
// Keep a record of the rr_node idx that we will use to connects switches to at
@ -503,29 +501,27 @@ void ClockSpine::create_rr_nodes_and_internal_edges_for_one_instance(ClockRRGrap
clock_graph);
// connect drive point to each half spine using a directed switch
rr_nodes[drive_node_idx].add_edge(left_node_idx, drive.switch_idx);
rr_nodes[drive_node_idx].add_edge(right_node_idx, drive.switch_idx);
rr_graph.create_edge(drive_node_idx, left_node_idx, RRSwitchId(drive.switch_idx), true);
rr_graph.create_edge(drive_node_idx, right_node_idx, RRSwitchId(drive.switch_idx), true);
}
}
}
int ClockSpine::create_chany_wire(int y_start,
int y_end,
int x,
int ptc_num,
e_direction direction,
std::vector<t_rr_node>& rr_nodes,
int num_segments) {
rr_nodes.emplace_back();
auto node_index = rr_nodes.size() - 1;
RRNodeId ClockSpine::create_chany_wire(int y_start,
int y_end,
int x,
int ptc_num,
e_direction direction,
RRGraph& rr_graph,
int num_segments) {
RRNodeId node_index = rr_graph.create_node(CHANY);
rr_nodes[node_index].set_coordinates(x, y_start, x, y_end);
rr_nodes[node_index].set_type(CHANY);
rr_nodes[node_index].set_capacity(1);
rr_nodes[node_index].set_track_num(ptc_num);
rr_nodes[node_index].set_rc_index(find_create_rr_rc_data(
rr_graph.set_node_bounding_box(node_index, vtr::Rect<short>(x, y_start, x, y_end));
rr_graph.set_node_capacity(node_index, 1);
rr_graph.set_node_track_num(node_index, ptc_num);
rr_graph.set_node_rc_data_index(node_index, find_create_rr_rc_data(
y_chan_wire.layer.r_metal, y_chan_wire.layer.c_metal));
rr_nodes[node_index].set_direction(direction);
rr_graph.set_node_direction(node_index, direction);
short seg_index = 0;
switch (direction) {
@ -542,7 +538,7 @@ int ClockSpine::create_chany_wire(int y_start,
VTR_ASSERT_MSG(false, "Unidentified direction type for clock rib");
break;
}
rr_nodes[node_index].set_cost_index(CHANX_COST_INDEX_START + num_segments + seg_index);
rr_graph.set_node_cost_index(node_index, CHANX_COST_INDEX_START + num_segments + seg_index);
return node_index;
}
@ -550,8 +546,8 @@ int ClockSpine::create_chany_wire(int y_start,
void ClockSpine::record_tap_locations(unsigned y_start,
unsigned y_end,
unsigned x,
int left_node_idx,
int right_node_idx,
const RRNodeId& left_node_idx,
const RRNodeId& right_node_idx,
ClockRRGraphBuilder& clock_graph) {
for (unsigned y = y_start + tap.offset; y <= y_end; y += tap.increment) {
if (y < (y_start + drive.offset - 1)) {

View File

@ -155,17 +155,17 @@ class ClockRib : public ClockNetwork {
void create_segments(std::vector<t_segment_inf>& segment_inf);
void create_rr_nodes_and_internal_edges_for_one_instance(ClockRRGraphBuilder& clock_graph,
int num_segments);
int create_chanx_wire(int x_start,
int x_end,
int y,
int ptc_num,
e_direction direction,
std::vector<t_rr_node>& rr_nodes);
RRNodeId create_chanx_wire(int x_start,
int x_end,
int y,
int ptc_num,
e_direction direction,
RRGraph& rr_graph);
void record_tap_locations(unsigned x_start,
unsigned x_end,
unsigned y,
int left_rr_node_idx,
int right_rr_node_idx,
const RRNodeId& left_rr_node_idx,
const RRNodeId& right_rr_node_idx,
ClockRRGraphBuilder& clock_graph);
};
@ -211,18 +211,18 @@ class ClockSpine : public ClockNetwork {
void create_segments(std::vector<t_segment_inf>& segment_inf);
void create_rr_nodes_and_internal_edges_for_one_instance(ClockRRGraphBuilder& clock_graph,
int num_segments);
int create_chany_wire(int y_start,
int y_end,
int x,
int ptc_num,
e_direction direction,
std::vector<t_rr_node>& rr_nodes,
int num_segments);
RRNodeId create_chany_wire(int y_start,
int y_end,
int x,
int ptc_num,
e_direction direction,
RRGraph& rr_graph,
int num_segments);
void record_tap_locations(unsigned y_start,
unsigned y_end,
unsigned x,
int left_node_idx,
int right_node_idx,
const RRNodeId& left_node_idx,
const RRNodeId& right_node_idx,
ClockRRGraphBuilder& clock_graph);
};

View File

@ -34,7 +34,7 @@ class Connection_based_routing_resources {
Connection_based_routing_resources();
// adding to the resources when they are reached during pruning
// mark rr sink node as something that still needs to be reached
void toreach_rr_sink(int rr_sink_node) { remaining_targets.push_back(rr_sink_node); }
void toreach_rr_sink(const int& rr_sink_node) { remaining_targets.push_back(rr_sink_node); }
// mark rt sink node as something that has been legally reached
void reached_rt_sink(t_rt_node* rt_sink) { reached_rt_sinks.push_back(rt_sink); }

View File

@ -17,13 +17,13 @@ static bool breadth_first_route_net(ClusterNetId net_id, float bend_cost);
static void breadth_first_expand_trace_segment(t_trace* start_ptr,
int remaining_connections_to_sink,
std::vector<int>& modified_rr_node_inf);
std::vector<RRNodeId>& modified_rr_node_inf);
static void breadth_first_expand_neighbours(int inode, float pcost, ClusterNetId net_id, float bend_cost);
static void breadth_first_expand_neighbours(const RRNodeId& inode, float pcost, ClusterNetId net_id, float bend_cost);
static void breadth_first_add_to_heap(const float path_cost, const float bend_cost, const int from_node, const int to_node, const int iconn);
static void breadth_first_add_to_heap(const float path_cost, const float bend_cost, const RRNodeId& from_node, const RRNodeId& to_node, const RREdgeId& iconn);
static float evaluate_node_cost(const float prev_path_cost, const float bend_cost, const int from_node, const int to_node);
static float evaluate_node_cost(const float prev_path_cost, const float bend_cost, const RRNodeId& from_node, const RRNodeId& to_node);
static void breadth_first_add_source_to_heap(ClusterNetId net_id);
@ -156,7 +156,8 @@ static bool breadth_first_route_net(ClusterNetId net_id, float bend_cost) {
* lack of potential paths, rather than congestion), it returns false, as *
* routing is impossible on this architecture. Otherwise it returns true. */
int inode, remaining_connections_to_sink;
int remaining_connections_to_sink;
RRNodeId inode;
float pcost, new_pcost;
t_heap* current;
t_trace* tptr;
@ -178,7 +179,7 @@ static bool breadth_first_route_net(ClusterNetId net_id, float bend_cost) {
auto src_pin_id = cluster_ctx.clb_nlist.net_driver(net_id);
std::vector<int> modified_rr_node_inf; //RR node indicies with modified rr_node_route_inf
std::vector<RRNodeId> modified_rr_node_inf; //RR node indicies with modified rr_node_route_inf
for (auto pin_id : cluster_ctx.clb_nlist.net_sinks(net_id)) { /* Need n-1 wires to connect n pins */
@ -197,7 +198,7 @@ static bool breadth_first_route_net(ClusterNetId net_id, float bend_cost) {
inode = current->index;
#ifdef ROUTER_DEBUG
VTR_LOG(" Popped node %d\n", inode);
VTR_LOG(" Popped node %d\n", size_t(inode));
#endif
while (route_ctx.rr_node_route_inf[inode].target_flag == 0) {
@ -269,7 +270,7 @@ static bool breadth_first_route_net(ClusterNetId net_id, float bend_cost) {
static void breadth_first_expand_trace_segment(t_trace* start_ptr,
int remaining_connections_to_sink,
std::vector<int>& modified_rr_node_inf) {
std::vector<RRNodeId>& modified_rr_node_inf) {
/* Adds all the rr_nodes in the traceback segment starting at tptr (and *
* continuing to the end of the traceback) to the heap with a cost of zero. *
* This allows expansion to begin from the existing wiring. The *
@ -287,13 +288,13 @@ static void breadth_first_expand_trace_segment(t_trace* start_ptr,
* this means two connections to the same SINK. */
t_trace *tptr, *next_ptr;
int inode, sink_node, last_ipin_node;
RRNodeId inode, sink_node, last_ipin_node;
auto& device_ctx = g_vpr_ctx.device();
auto& route_ctx = g_vpr_ctx.mutable_routing();
tptr = start_ptr;
if (tptr != nullptr && device_ctx.rr_nodes[tptr->index].type() == SINK) {
if (tptr != nullptr && device_ctx.rr_graph.node_type(tptr->index) == SINK) {
/* During logical equivalence case, only use one opin */
tptr = tptr->next;
}
@ -301,9 +302,9 @@ static void breadth_first_expand_trace_segment(t_trace* start_ptr,
if (remaining_connections_to_sink == 0) { /* Usual case. */
while (tptr != nullptr) {
#ifdef ROUTER_DEBUG
VTR_LOG(" Adding previous routing node %d to heap\n", tptr->index);
VTR_LOG(" Adding previous routing node %d to heap\n", size_t(tptr->index));
#endif
node_to_heap(tptr->index, 0., NO_PREVIOUS, NO_PREVIOUS, OPEN, OPEN);
node_to_heap(tptr->index, 0., RRNodeId::INVALID(), RREdgeId::INVALID(), OPEN, OPEN);
tptr = tptr->next;
}
} else { /* This case never executes for most logic blocks. */
@ -318,7 +319,7 @@ static void breadth_first_expand_trace_segment(t_trace* start_ptr,
return; /* No route yet */
next_ptr = tptr->next;
last_ipin_node = OPEN; /* Stops compiler from complaining. */
last_ipin_node = RRNodeId::INVALID(); /* Stops compiler from complaining. */
/* Can't put last SINK on heap with NO_PREVIOUS, etc, since that won't let *
* us reach it again. Instead, leave the last traceback element (SINK) off *
@ -327,17 +328,17 @@ static void breadth_first_expand_trace_segment(t_trace* start_ptr,
while (next_ptr != nullptr) {
inode = tptr->index;
#ifdef ROUTER_DEBUG
VTR_LOG(" Adding previous routing node %d to heap*\n", tptr->index);
VTR_LOG(" Adding previous routing node %d to heap*\n", size_t(tptr->index));
#endif
node_to_heap(inode, 0., NO_PREVIOUS, NO_PREVIOUS, OPEN, OPEN);
node_to_heap(inode, 0., RRNodeId::INVALID(), RREdgeId::INVALID(), OPEN, OPEN);
if (device_ctx.rr_nodes[inode].type() == IPIN)
if (device_ctx.rr_graph.node_type(inode) == IPIN)
last_ipin_node = inode;
tptr = next_ptr;
next_ptr = tptr->next;
}
VTR_ASSERT(last_ipin_node >= 0);
VTR_ASSERT(true == device_ctx.rr_graph.valid_node_id(last_ipin_node));
/* This will stop the IPIN node used to get to this SINK from being *
* reexpanded for the remainder of this net's routing. This will make us *
@ -363,24 +364,21 @@ static void breadth_first_expand_trace_segment(t_trace* start_ptr,
}
}
static void breadth_first_expand_neighbours(int inode, float pcost, ClusterNetId net_id, float bend_cost) {
static void breadth_first_expand_neighbours(const RRNodeId& inode, float pcost, ClusterNetId net_id, float bend_cost) {
/* Puts all the rr_nodes adjacent to inode on the heap. rr_nodes outside *
* the expanded bounding box specified in route_bb are not added to the *
* heap. pcost is the path_cost to get to inode. */
int iconn, to_node, num_edges;
auto& device_ctx = g_vpr_ctx.device();
auto& route_ctx = g_vpr_ctx.routing();
num_edges = device_ctx.rr_nodes[inode].num_edges();
for (iconn = 0; iconn < num_edges; iconn++) {
to_node = device_ctx.rr_nodes[inode].edge_sink_node(iconn);
for (const RREdgeId& iconn : device_ctx.rr_graph.node_out_edges(inode)) {
const RRNodeId& to_node = device_ctx.rr_graph.edge_sink_node(iconn);
if (device_ctx.rr_nodes[to_node].xhigh() < route_ctx.route_bb[net_id].xmin
|| device_ctx.rr_nodes[to_node].xlow() > route_ctx.route_bb[net_id].xmax
|| device_ctx.rr_nodes[to_node].yhigh() < route_ctx.route_bb[net_id].ymin
|| device_ctx.rr_nodes[to_node].ylow() > route_ctx.route_bb[net_id].ymax)
if (device_ctx.rr_graph.node_xhigh(to_node) < route_ctx.route_bb[net_id].xmin
|| device_ctx.rr_graph.node_xlow(to_node) > route_ctx.route_bb[net_id].xmax
|| device_ctx.rr_graph.node_yhigh(to_node) < route_ctx.route_bb[net_id].ymin
|| device_ctx.rr_graph.node_ylow(to_node) > route_ctx.route_bb[net_id].ymax)
continue; /* Node is outside (expanded) bounding box. */
breadth_first_add_to_heap(pcost, bend_cost, inode, to_node, iconn);
@ -388,9 +386,9 @@ static void breadth_first_expand_neighbours(int inode, float pcost, ClusterNetId
}
//Add to_node to the heap, and also add any nodes which are connected by non-configurable edges
static void breadth_first_add_to_heap(const float path_cost, const float bend_cost, const int from_node, const int to_node, const int iconn) {
static void breadth_first_add_to_heap(const float path_cost, const float bend_cost, const RRNodeId& from_node, const RRNodeId& to_node, const RREdgeId& iconn) {
#ifdef ROUTER_DEBUG
VTR_LOG(" Expanding node %d\n", to_node);
VTR_LOG(" Expanding node %d\n", size_t(to_node));
#endif
//Create a heap element to represent this node (and any non-configurably connected nodes)
@ -413,14 +411,14 @@ static void breadth_first_add_to_heap(const float path_cost, const float bend_co
add_to_heap(next);
}
static float evaluate_node_cost(const float prev_path_cost, const float bend_cost, const int from_node, const int to_node) {
static float evaluate_node_cost(const float prev_path_cost, const float bend_cost, const RRNodeId& from_node, const RRNodeId& to_node) {
auto& device_ctx = g_vpr_ctx.device();
float tot_cost = prev_path_cost + get_rr_cong_cost(to_node);
if (bend_cost != 0.) {
t_rr_type from_type = device_ctx.rr_nodes[from_node].type();
t_rr_type to_type = device_ctx.rr_nodes[to_node].type();
t_rr_type from_type = device_ctx.rr_graph.node_type(from_node);
t_rr_type to_type = device_ctx.rr_graph.node_type(to_node);
if ((from_type == CHANX && to_type == CHANY)
|| (from_type == CHANY && to_type == CHANX))
tot_cost += bend_cost;
@ -432,7 +430,7 @@ static float evaluate_node_cost(const float prev_path_cost, const float bend_cos
static void breadth_first_add_source_to_heap(ClusterNetId net_id) {
/* Adds the SOURCE of this net to the heap. Used to start a net's routing. */
int inode;
RRNodeId inode;
float cost;
auto& route_ctx = g_vpr_ctx.routing();
@ -444,5 +442,5 @@ static void breadth_first_add_source_to_heap(ClusterNetId net_id) {
VTR_LOG(" Adding Source node %d to heap\n", inode);
#endif
node_to_heap(inode, cost, NO_PREVIOUS, NO_PREVIOUS, OPEN, OPEN);
node_to_heap(inode, cost, RRNodeId::INVALID(), RREdgeId::INVALID(), OPEN, OPEN);
}

View File

@ -92,19 +92,19 @@ static int num_linked_f_pointer_allocated = 0;
* */
/******************** Subroutines local to route_common.c *******************/
static t_trace_branch traceback_branch(int node, std::unordered_set<int>& main_branch_visited);
static std::pair<t_trace*, t_trace*> add_trace_non_configurable(t_trace* head, t_trace* tail, int node, std::unordered_set<int>& visited);
static std::pair<t_trace*, t_trace*> add_trace_non_configurable_recurr(int node, std::unordered_set<int>& visited, int depth = 0);
static t_trace_branch traceback_branch(const RRNodeId& node, std::unordered_set<RRNodeId>& main_branch_visited);
static std::pair<t_trace*, t_trace*> add_trace_non_configurable(t_trace* head, t_trace* tail, const RRNodeId& node, std::unordered_set<RRNodeId>& visited);
static std::pair<t_trace*, t_trace*> add_trace_non_configurable_recurr(const RRNodeId& node, std::unordered_set<RRNodeId>& visited, int depth = 0);
static vtr::vector<ClusterNetId, std::vector<int>> load_net_rr_terminals(const t_rr_node_indices& L_rr_node_indices);
static vtr::vector<ClusterBlockId, std::vector<int>> load_rr_clb_sources(const t_rr_node_indices& L_rr_node_indices);
static vtr::vector<ClusterNetId, std::vector<RRNodeId>> load_net_rr_terminals(const RRGraph& rr_graph);
static vtr::vector<ClusterBlockId, std::vector<RRNodeId>> load_rr_clb_sources(const RRGraph& rr_graph);
static t_clb_opins_used alloc_and_load_clb_opins_used_locally();
static void adjust_one_rr_occ_and_apcost(int inode, int add_or_sub, float pres_fac, float acc_fac);
static void adjust_one_rr_occ_and_apcost(const RRNodeId& inode, int add_or_sub, float pres_fac, float acc_fac);
bool validate_traceback_recurr(t_trace* trace, std::set<int>& seen_rr_nodes);
static bool validate_trace_nodes(t_trace* head, const std::unordered_set<int>& trace_nodes);
static float get_single_rr_cong_cost(int inode);
bool validate_traceback_recurr(t_trace* trace, std::set<RRNodeId>& seen_rr_nodes);
static bool validate_trace_nodes(t_trace* head, const std::unordered_set<RRNodeId>& trace_nodes);
static float get_single_rr_cong_cost(const RRNodeId& inode);
/************************** Subroutine definitions ***************************/
@ -179,7 +179,8 @@ void restore_routing(vtr::vector<ClusterNetId, t_trace*>& best_routing,
* Use this number as a routing serial number to ensure that programming *
* changes do not break the router. */
void get_serial_num() {
int serial_num, inode;
int serial_num;
RRNodeId inode;
t_trace* tptr;
auto& cluster_ctx = g_vpr_ctx.clustering();
@ -196,11 +197,11 @@ void get_serial_num() {
while (tptr != nullptr) {
inode = tptr->index;
serial_num += (size_t(net_id) + 1)
* (device_ctx.rr_nodes[inode].xlow() * (device_ctx.grid.width()) - device_ctx.rr_nodes[inode].yhigh());
* (device_ctx.rr_graph.node_xlow(inode) * (device_ctx.grid.width()) - device_ctx.rr_graph.node_yhigh(inode));
serial_num -= device_ctx.rr_nodes[inode].ptc_num() * (size_t(net_id) + 1) * 10;
serial_num -= device_ctx.rr_graph.node_ptc_num(inode) * (size_t(net_id) + 1) * 10;
serial_num -= device_ctx.rr_nodes[inode].type() * (size_t(net_id) + 1) * 100;
serial_num -= device_ctx.rr_graph.node_type(inode) * (size_t(net_id) + 1) * 100;
serial_num %= 2000000000; /* Prevent overflow */
tptr = tptr->next;
}
@ -336,8 +337,8 @@ bool feasible_routing() {
auto& device_ctx = g_vpr_ctx.device();
auto& route_ctx = g_vpr_ctx.routing();
for (size_t inode = 0; inode < device_ctx.rr_nodes.size(); inode++) {
if (route_ctx.rr_node_route_inf[inode].occ() > device_ctx.rr_nodes[inode].capacity()) {
for (const RRNodeId& inode : device_ctx.rr_graph.nodes()) {
if (route_ctx.rr_node_route_inf[inode].occ() > device_ctx.rr_graph.node_capacity(inode)) {
return (false);
}
}
@ -346,14 +347,14 @@ bool feasible_routing() {
}
//Returns all RR nodes in the current routing which are congested
std::vector<int> collect_congested_rr_nodes() {
std::vector<RRNodeId> collect_congested_rr_nodes() {
auto& device_ctx = g_vpr_ctx.device();
auto& route_ctx = g_vpr_ctx.routing();
std::vector<int> congested_rr_nodes;
for (size_t inode = 0; inode < device_ctx.rr_nodes.size(); inode++) {
std::vector<RRNodeId> congested_rr_nodes;
for (const RRNodeId& inode : device_ctx.rr_graph.nodes()) {
short occ = route_ctx.rr_node_route_inf[inode].occ();
short capacity = device_ctx.rr_nodes[inode].capacity();
short capacity = device_ctx.rr_graph.node_capacity(inode);
if (occ > capacity) {
congested_rr_nodes.push_back(inode);
@ -362,18 +363,18 @@ std::vector<int> collect_congested_rr_nodes() {
return congested_rr_nodes;
}
/* Returns a vector from [0..device_ctx.rr_nodes.size()-1] containing the set
/* Returns a vector from [0..device_ctx.rr_graph.nodes().size()-1] containing the set
* of nets using each RR node */
std::vector<std::set<ClusterNetId>> collect_rr_node_nets() {
vtr::vector<RRNodeId, std::set<ClusterNetId>> collect_rr_node_nets() {
auto& device_ctx = g_vpr_ctx.device();
auto& route_ctx = g_vpr_ctx.routing();
auto& cluster_ctx = g_vpr_ctx.clustering();
std::vector<std::set<ClusterNetId>> rr_node_nets(device_ctx.rr_nodes.size());
vtr::vector<RRNodeId, std::set<ClusterNetId>> rr_node_nets(device_ctx.rr_graph.nodes().size());
for (ClusterNetId inet : cluster_ctx.clb_nlist.nets()) {
t_trace* trace_elem = route_ctx.trace[inet].head;
while (trace_elem) {
int rr_node = trace_elem->index;
const RRNodeId& rr_node = trace_elem->index;
rr_node_nets[rr_node].insert(inet);
@ -414,7 +415,7 @@ void pathfinder_update_path_cost(t_trace* route_segment_start,
} /* End while loop -- did an entire traceback. */
}
void pathfinder_update_single_node_cost(int inode, int add_or_sub, float pres_fac) {
void pathfinder_update_single_node_cost(const RRNodeId& inode, int add_or_sub, float pres_fac) {
/* Updates pathfinder's congestion cost by either adding or removing the
* usage of a resource node. pres_cost is Pn in the Pathfinder paper.
* pres_cost is set according to the overuse that would result from having
@ -428,7 +429,7 @@ void pathfinder_update_single_node_cost(int inode, int add_or_sub, float pres_fa
// can't have negative occupancy
VTR_ASSERT(occ >= 0);
int capacity = device_ctx.rr_nodes[inode].capacity();
int capacity = device_ctx.rr_graph.node_capacity(inode);
if (occ < capacity) {
route_ctx.rr_node_route_inf[inode].pres_cost = 1.0;
} else {
@ -449,9 +450,9 @@ void pathfinder_update_cost(float pres_fac, float acc_fac) {
auto& device_ctx = g_vpr_ctx.device();
auto& route_ctx = g_vpr_ctx.mutable_routing();
for (size_t inode = 0; inode < device_ctx.rr_nodes.size(); inode++) {
for (const RRNodeId& inode : device_ctx.rr_graph.nodes()) {
occ = route_ctx.rr_node_route_inf[inode].occ();
capacity = device_ctx.rr_nodes[inode].capacity();
capacity = device_ctx.rr_graph.node_capacity(inode);
if (occ > capacity) {
route_ctx.rr_node_route_inf[inode].acc_cost += (occ - capacity) * acc_fac;
@ -496,9 +497,9 @@ void init_route_structs(int bb_factor) {
init_heap(device_ctx.grid);
//Various look-ups
route_ctx.net_rr_terminals = load_net_rr_terminals(device_ctx.rr_node_indices);
route_ctx.net_rr_terminals = load_net_rr_terminals(device_ctx.rr_graph);
route_ctx.route_bb = load_route_bb(bb_factor);
route_ctx.rr_blk_source = load_rr_clb_sources(device_ctx.rr_node_indices);
route_ctx.rr_blk_source = load_rr_clb_sources(device_ctx.rr_graph);
route_ctx.clb_opins_used_locally = alloc_and_load_clb_opins_used_locally();
route_ctx.net_status.resize(cluster_ctx.clb_nlist.nets().size());
@ -550,14 +551,15 @@ update_traceback(t_heap* hptr, ClusterNetId net_id) {
//Traces back a new routing branch starting from the specified 'node' and working backwards to any existing routing.
//Returns the new branch, and also updates trace_nodes for any new nodes which are included in the branches traceback.
static t_trace_branch traceback_branch(int node, std::unordered_set<int>& trace_nodes) {
static t_trace_branch traceback_branch(const RRNodeId& node, std::unordered_set<RRNodeId>& trace_nodes) {
auto& device_ctx = g_vpr_ctx.device();
auto& route_ctx = g_vpr_ctx.routing();
auto rr_type = device_ctx.rr_nodes[node].type();
auto rr_type = device_ctx.rr_graph.node_type(node);
if (rr_type != SINK) {
VPR_FATAL_ERROR(VPR_ERROR_ROUTE,
"in traceback_branch: Expected type = SINK (%d).\n");
"in traceback_branch: Expected type = SINK (%d).\n",
size_t(node));
}
//We construct the main traceback by walking from the given node back to the source,
@ -570,16 +572,16 @@ static t_trace_branch traceback_branch(int node, std::unordered_set<int>& trace_
trace_nodes.insert(node);
std::vector<int> new_nodes_added_to_traceback = {node};
std::vector<RRNodeId> new_nodes_added_to_traceback = {node};
auto iedge = route_ctx.rr_node_route_inf[node].prev_edge;
int inode = route_ctx.rr_node_route_inf[node].prev_node;
RREdgeId iedge = route_ctx.rr_node_route_inf[node].prev_edge;
RRNodeId inode = route_ctx.rr_node_route_inf[node].prev_node;
while (inode != NO_PREVIOUS) {
while (inode != RRNodeId::INVALID()) {
//Add the current node to the head of traceback
t_trace* prev_ptr = alloc_trace_data();
prev_ptr->index = inode;
prev_ptr->iswitch = device_ctx.rr_nodes[inode].edge_switch(iedge);
prev_ptr->iswitch = (short)size_t(device_ctx.rr_graph.edge_switch(iedge));
prev_ptr->next = branch_head;
branch_head = prev_ptr;
@ -597,7 +599,7 @@ static t_trace_branch traceback_branch(int node, std::unordered_set<int>& trace_
//We next re-expand all the main-branch nodes to add any non-configurably connected side branches
// We are careful to do this *after* the main branch is constructed to ensure nodes which are both
// non-configurably connected *and* part of the main branch are only added to the traceback once.
for (int new_node : new_nodes_added_to_traceback) {
for (const RRNodeId& new_node : new_nodes_added_to_traceback) {
//Expand each main branch node
std::tie(branch_head, branch_tail) = add_trace_non_configurable(branch_head, branch_tail, new_node, trace_nodes);
}
@ -608,7 +610,7 @@ static t_trace_branch traceback_branch(int node, std::unordered_set<int>& trace_
//Traces any non-configurable subtrees from branch_head, returning the new branch_head and updating trace_nodes
//
//This effectively does a depth-first traversal
static std::pair<t_trace*, t_trace*> add_trace_non_configurable(t_trace* head, t_trace* tail, int node, std::unordered_set<int>& trace_nodes) {
static std::pair<t_trace*, t_trace*> add_trace_non_configurable(t_trace* head, t_trace* tail, const RRNodeId& node, std::unordered_set<RRNodeId>& trace_nodes) {
//Trace any non-configurable subtrees
t_trace* subtree_head = nullptr;
t_trace* subtree_tail = nullptr;
@ -632,17 +634,17 @@ static std::pair<t_trace*, t_trace*> add_trace_non_configurable(t_trace* head, t
}
//Recursive helper function for add_trace_non_configurable()
static std::pair<t_trace*, t_trace*> add_trace_non_configurable_recurr(int node, std::unordered_set<int>& trace_nodes, int depth) {
static std::pair<t_trace*, t_trace*> add_trace_non_configurable_recurr(const RRNodeId& node, std::unordered_set<RRNodeId>& trace_nodes, int depth) {
t_trace* head = nullptr;
t_trace* tail = nullptr;
//Record the non-configurable out-going edges
std::vector<t_edge_size> unvisited_non_configurable_edges;
std::vector<RREdgeId> unvisited_non_configurable_edges;
auto& device_ctx = g_vpr_ctx.device();
for (auto iedge : device_ctx.rr_nodes[node].non_configurable_edges()) {
VTR_ASSERT_SAFE(!device_ctx.rr_nodes[node].edge_is_configurable(iedge));
for (auto iedge : device_ctx.rr_graph.node_non_configurable_out_edges(node)) {
VTR_ASSERT_SAFE(!device_ctx.rr_graph.edge_is_configurable(iedge));
int to_node = device_ctx.rr_nodes[node].edge_sink_node(iedge);
const RRNodeId& to_node = device_ctx.rr_graph.edge_sink_node(iedge);
if (!trace_nodes.count(to_node)) {
unvisited_non_configurable_edges.push_back(iedge);
@ -665,8 +667,8 @@ static std::pair<t_trace*, t_trace*> add_trace_non_configurable_recurr(int node,
} else {
//Recursive case: intermediate node with non-configurable edges
for (auto iedge : unvisited_non_configurable_edges) {
int to_node = device_ctx.rr_nodes[node].edge_sink_node(iedge);
int iswitch = device_ctx.rr_nodes[node].edge_switch(iedge);
const RRNodeId& to_node = device_ctx.rr_graph.edge_sink_node(iedge);
int iswitch = (int)size_t(device_ctx.rr_graph.edge_switch(iedge));
VTR_ASSERT(!trace_nodes.count(to_node));
trace_nodes.insert(node);
@ -706,26 +708,26 @@ static std::pair<t_trace*, t_trace*> add_trace_non_configurable_recurr(int node,
/* The routine sets the path_cost to HUGE_POSITIVE_FLOAT for *
* all channel segments touched by previous routing phases. */
void reset_path_costs(const std::vector<int>& visited_rr_nodes) {
void reset_path_costs(const std::vector<RRNodeId>& visited_rr_nodes) {
auto& route_ctx = g_vpr_ctx.mutable_routing();
for (auto node : visited_rr_nodes) {
route_ctx.rr_node_route_inf[node].path_cost = std::numeric_limits<float>::infinity();
route_ctx.rr_node_route_inf[node].backward_path_cost = std::numeric_limits<float>::infinity();
route_ctx.rr_node_route_inf[node].prev_node = NO_PREVIOUS;
route_ctx.rr_node_route_inf[node].prev_edge = NO_PREVIOUS;
route_ctx.rr_node_route_inf[node].prev_node = RRNodeId::INVALID();
route_ctx.rr_node_route_inf[node].prev_edge = RREdgeId::INVALID();
}
}
/* Returns the *congestion* cost of using this rr_node. */
float get_rr_cong_cost(int inode) {
float get_rr_cong_cost(const RRNodeId& inode) {
auto& device_ctx = g_vpr_ctx.device();
float cost = get_single_rr_cong_cost(inode);
auto itr = device_ctx.rr_node_to_non_config_node_set.find(inode);
if (itr != device_ctx.rr_node_to_non_config_node_set.end()) {
for (int node : device_ctx.rr_non_config_node_sets[itr->second]) {
for (RRNodeId node : device_ctx.rr_non_config_node_sets[itr->second]) {
if (node == inode) {
continue; //Already included above
}
@ -738,11 +740,11 @@ float get_rr_cong_cost(int inode) {
/* Returns the congestion cost of using this rr_node, *ignoring*
* non-configurable edges */
static float get_single_rr_cong_cost(int inode) {
static float get_single_rr_cong_cost(const RRNodeId& inode) {
auto& device_ctx = g_vpr_ctx.device();
auto& route_ctx = g_vpr_ctx.routing();
auto cost_index = device_ctx.rr_nodes[inode].cost_index();
auto cost_index = device_ctx.rr_graph.node_cost_index(inode);
float cost = device_ctx.rr_indexed_data[cost_index].base_cost
* route_ctx.rr_node_route_inf[inode].acc_cost
* route_ctx.rr_node_route_inf[inode].pres_cost;
@ -755,14 +757,12 @@ static float get_single_rr_cong_cost(int inode) {
* the same net to two inputs of an and-gate (and-gate inputs are logically *
* equivalent, so both will connect to the same SINK). */
void mark_ends(ClusterNetId net_id) {
unsigned int ipin;
int inode;
auto& cluster_ctx = g_vpr_ctx.clustering();
auto& route_ctx = g_vpr_ctx.mutable_routing();
for (ipin = 1; ipin < cluster_ctx.clb_nlist.net_pins(net_id).size(); ipin++) {
inode = route_ctx.net_rr_terminals[net_id][ipin];
for (unsigned int ipin = 1; ipin < cluster_ctx.clb_nlist.net_pins(net_id).size(); ipin++) {
const RRNodeId& inode = route_ctx.net_rr_terminals[net_id][ipin];
route_ctx.rr_node_route_inf[inode].target_flag++;
}
}
@ -770,11 +770,11 @@ void mark_ends(ClusterNetId net_id) {
void mark_remaining_ends(const std::vector<int>& remaining_sinks) {
// like mark_ends, but only performs it for the remaining sinks of a net
auto& route_ctx = g_vpr_ctx.mutable_routing();
for (int sink_node : remaining_sinks)
++route_ctx.rr_node_route_inf[sink_node].target_flag;
for (const int& sink_node : remaining_sinks)
++route_ctx.rr_node_route_inf[RRNodeId(sink_node)].target_flag;
}
void node_to_heap(int inode, float total_cost, int prev_node, int prev_edge, float backward_path_cost, float R_upstream) {
void node_to_heap(const RRNodeId& inode, float total_cost, const RRNodeId& prev_node, const RREdgeId& prev_edge, float backward_path_cost, float R_upstream) {
/* Puts an rr_node on the heap, if the new cost given is lower than the *
* current path_cost to this channel segment. The index of its predecessor *
* is stored to make traceback easy. The index of the edge used to get *
@ -790,8 +790,8 @@ void node_to_heap(int inode, float total_cost, int prev_node, int prev_edge, flo
t_heap* hptr = alloc_heap_data();
hptr->index = inode;
hptr->cost = total_cost;
VTR_ASSERT_SAFE(hptr->u.prev.node == NO_PREVIOUS);
VTR_ASSERT_SAFE(hptr->u.prev.edge == NO_PREVIOUS);
VTR_ASSERT_SAFE(hptr->u.prev.node == RRNodeId::INVALID());
VTR_ASSERT_SAFE(hptr->u.prev.edge == RREdgeId::INVALID());
hptr->u.prev.node = prev_node;
hptr->u.prev.edge = prev_edge;
hptr->backward_path_cost = backward_path_cost;
@ -1003,7 +1003,7 @@ void alloc_and_load_rr_node_route_structs() {
auto& route_ctx = g_vpr_ctx.mutable_routing();
auto& device_ctx = g_vpr_ctx.device();
route_ctx.rr_node_route_inf.resize(device_ctx.rr_nodes.size());
route_ctx.rr_node_route_inf.resize(device_ctx.rr_graph.nodes().size());
reset_rr_node_route_structs();
}
@ -1014,11 +1014,11 @@ void reset_rr_node_route_structs() {
auto& route_ctx = g_vpr_ctx.mutable_routing();
auto& device_ctx = g_vpr_ctx.device();
VTR_ASSERT(route_ctx.rr_node_route_inf.size() == size_t(device_ctx.rr_nodes.size()));
VTR_ASSERT(route_ctx.rr_node_route_inf.size() == size_t(device_ctx.rr_graph.nodes().size()));
for (size_t inode = 0; inode < device_ctx.rr_nodes.size(); inode++) {
route_ctx.rr_node_route_inf[inode].prev_node = NO_PREVIOUS;
route_ctx.rr_node_route_inf[inode].prev_edge = NO_PREVIOUS;
for (const RRNodeId& inode : device_ctx.rr_graph.nodes()) {
route_ctx.rr_node_route_inf[inode].prev_node = RRNodeId::INVALID();
route_ctx.rr_node_route_inf[inode].prev_edge = RREdgeId::INVALID();
route_ctx.rr_node_route_inf[inode].pres_cost = 1.0;
route_ctx.rr_node_route_inf[inode].acc_cost = 1.0;
route_ctx.rr_node_route_inf[inode].path_cost = std::numeric_limits<float>::infinity();
@ -1031,8 +1031,8 @@ void reset_rr_node_route_structs() {
/* Allocates and loads the route_ctx.net_rr_terminals data structure. For each net it stores the rr_node *
* index of the SOURCE of the net and all the SINKs of the net [clb_nlist.nets()][clb_nlist.net_pins()]. *
* Entry [inet][pnum] stores the rr index corresponding to the SOURCE (opin) or SINK (ipin) of the pin. */
static vtr::vector<ClusterNetId, std::vector<int>> load_net_rr_terminals(const t_rr_node_indices& L_rr_node_indices) {
vtr::vector<ClusterNetId, std::vector<int>> net_rr_terminals;
static vtr::vector<ClusterNetId, std::vector<RRNodeId>> load_net_rr_terminals(const RRGraph& rr_graph) {
vtr::vector<ClusterNetId, std::vector<RRNodeId>> net_rr_terminals;
auto& cluster_ctx = g_vpr_ctx.clustering();
auto& place_ctx = g_vpr_ctx.placement();
@ -1058,8 +1058,8 @@ static vtr::vector<ClusterNetId, std::vector<int>> load_net_rr_terminals(const t
int iclass = type->pin_class[phys_pin];
int inode = get_rr_node_index(L_rr_node_indices, i, j, (pin_count == 0 ? SOURCE : SINK), /* First pin is driver */
iclass);
RRNodeId inode = rr_graph.find_node(i, j, (pin_count == 0 ? SOURCE : SINK), /* First pin is driver */
iclass);
net_rr_terminals[net_id][pin_count] = inode;
pin_count++;
}
@ -1073,10 +1073,9 @@ static vtr::vector<ClusterNetId, std::vector<int>> load_net_rr_terminals(const t
* they are used only to reserve pins for locally used OPINs in the router. *
* [0..cluster_ctx.clb_nlist.blocks().size()-1][0..num_class-1]. *
* The values for blocks that are padsare NOT valid. */
static vtr::vector<ClusterBlockId, std::vector<int>> load_rr_clb_sources(const t_rr_node_indices& L_rr_node_indices) {
vtr::vector<ClusterBlockId, std::vector<int>> rr_blk_source;
static vtr::vector<ClusterBlockId, std::vector<RRNodeId>> load_rr_clb_sources(const RRGraph& rr_graph) {
vtr::vector<ClusterBlockId, std::vector<RRNodeId>> rr_blk_source;
int i, j, iclass, inode;
int class_low, class_high;
t_rr_type rr_type;
@ -1089,20 +1088,20 @@ static vtr::vector<ClusterBlockId, std::vector<int>> load_rr_clb_sources(const t
auto type = physical_tile_type(blk_id);
get_class_range_for_block(blk_id, &class_low, &class_high);
rr_blk_source[blk_id].resize(type->num_class);
for (iclass = 0; iclass < type->num_class; iclass++) {
for (int iclass = 0; iclass < type->num_class; iclass++) {
if (iclass >= class_low && iclass <= class_high) {
i = place_ctx.block_locs[blk_id].loc.x;
j = place_ctx.block_locs[blk_id].loc.y;
int i = place_ctx.block_locs[blk_id].loc.x;
int j = place_ctx.block_locs[blk_id].loc.y;
if (type->class_inf[iclass].type == DRIVER)
rr_type = SOURCE;
else
rr_type = SINK;
inode = get_rr_node_index(L_rr_node_indices, i, j, rr_type, iclass);
const RRNodeId& inode = rr_graph.find_node(i, j, rr_type, iclass);
rr_blk_source[blk_id][iclass] = inode;
} else {
rr_blk_source[blk_id][iclass] = OPEN;
rr_blk_source[blk_id][iclass] = RRNodeId::INVALID();
}
}
}
@ -1148,31 +1147,29 @@ t_bb load_net_route_bb(ClusterNetId net_id, int bb_factor) {
int max_dim = std::max<int>(device_ctx.grid.width() - 1, device_ctx.grid.height() - 1);
bb_factor = std::min(bb_factor, max_dim);
int driver_rr = route_ctx.net_rr_terminals[net_id][0];
const t_rr_node& source_node = device_ctx.rr_nodes[driver_rr];
VTR_ASSERT(source_node.type() == SOURCE);
const RRNodeId& driver_rr = route_ctx.net_rr_terminals[net_id][0];
VTR_ASSERT(device_ctx.rr_graph.node_type(driver_rr) == SOURCE);
VTR_ASSERT(source_node.xlow() <= source_node.xhigh());
VTR_ASSERT(source_node.ylow() <= source_node.yhigh());
VTR_ASSERT(device_ctx.rr_graph.node_xlow(driver_rr) <= device_ctx.rr_graph.node_xhigh(driver_rr));
VTR_ASSERT(device_ctx.rr_graph.node_ylow(driver_rr) <= device_ctx.rr_graph.node_yhigh(driver_rr));
int xmin = source_node.xlow();
int ymin = source_node.ylow();
int xmax = source_node.xhigh();
int ymax = source_node.yhigh();
int xmin = device_ctx.rr_graph.node_xlow(driver_rr);
int ymin = device_ctx.rr_graph.node_ylow(driver_rr);
int xmax = device_ctx.rr_graph.node_xhigh(driver_rr);
int ymax = device_ctx.rr_graph.node_yhigh(driver_rr);
auto net_sinks = cluster_ctx.clb_nlist.net_sinks(net_id);
for (size_t ipin = 1; ipin < net_sinks.size() + 1; ++ipin) { //Start at 1 since looping through sinks
int sink_rr = route_ctx.net_rr_terminals[net_id][ipin];
const t_rr_node& sink_node = device_ctx.rr_nodes[sink_rr];
VTR_ASSERT(sink_node.type() == SINK);
const RRNodeId& sink_rr = route_ctx.net_rr_terminals[net_id][ipin];
VTR_ASSERT(device_ctx.rr_graph.node_type(sink_rr) == SINK);
VTR_ASSERT(sink_node.xlow() <= sink_node.xhigh());
VTR_ASSERT(sink_node.ylow() <= sink_node.yhigh());
VTR_ASSERT(device_ctx.rr_graph.node_xlow(sink_rr) <= device_ctx.rr_graph.node_xhigh(sink_rr));
VTR_ASSERT(device_ctx.rr_graph.node_ylow(sink_rr) <= device_ctx.rr_graph.node_yhigh(sink_rr));
xmin = std::min<int>(xmin, sink_node.xlow());
xmax = std::max<int>(xmax, sink_node.xhigh());
ymin = std::min<int>(ymin, sink_node.ylow());
ymax = std::max<int>(ymax, sink_node.yhigh());
xmin = std::min<int>(xmin, device_ctx.rr_graph.node_xlow(sink_rr));
xmax = std::max<int>(xmax, device_ctx.rr_graph.node_xhigh(sink_rr));
ymin = std::min<int>(ymin, device_ctx.rr_graph.node_ylow(sink_rr));
ymax = std::max<int>(ymax, device_ctx.rr_graph.node_yhigh(sink_rr));
}
/* Want the channels on all 4 sides to be usuable, even if bb_factor = 0. */
@ -1192,7 +1189,7 @@ t_bb load_net_route_bb(ClusterNetId net_id, int bb_factor) {
return bb;
}
void add_to_mod_list(int inode, std::vector<int>& modified_rr_node_inf) {
void add_to_mod_list(const RRNodeId& inode, std::vector<RRNodeId>& modified_rr_node_inf) {
auto& route_ctx = g_vpr_ctx.routing();
if (std::isinf(route_ctx.rr_node_route_inf[inode].path_cost)) {
@ -1264,7 +1261,7 @@ void push_back(t_heap* const hptr) {
++heap_tail;
}
void push_back_node(int inode, float total_cost, int prev_node, int prev_edge, float backward_path_cost, float R_upstream) {
void push_back_node(const RRNodeId& inode, float total_cost, const RRNodeId& prev_node, const RREdgeId& prev_edge, float backward_path_cost, float R_upstream) {
/* Puts an rr_node on the heap with the same condition as node_to_heap,
* but do not fix heap property yet as that is more efficiently done from
* bottom up with build_heap */
@ -1316,7 +1313,7 @@ void verify_extract_top() {
std::cout << "starting to compare top elements\n";
size_t i = 0;
while (!is_empty_heap()) {
while (heap_copy[i]->index == OPEN)
while (heap_copy[i]->index == RRNodeId::INVALID())
++i; // skip the ones that won't be extracted
auto top = get_heap_head();
if (abs(top->cost - heap_copy[i]->cost) > float_epsilon)
@ -1371,7 +1368,7 @@ get_heap_head() {
}
heap_::sift_up(hole, heap[heap_tail]);
} while (cheapest->index == OPEN); /* Get another one if invalid entry. */
} while (cheapest->index == RRNodeId::INVALID()); /* Get another one if invalid entry. */
return (cheapest);
}
@ -1400,9 +1397,9 @@ alloc_heap_data() {
temp_ptr->cost = 0.;
temp_ptr->backward_path_cost = 0.;
temp_ptr->R_upstream = 0.;
temp_ptr->index = OPEN;
temp_ptr->u.prev.node = NO_PREVIOUS;
temp_ptr->u.prev.edge = NO_PREVIOUS;
temp_ptr->index = RRNodeId::INVALID();
temp_ptr->u.prev.node = RRNodeId::INVALID();
temp_ptr->u.prev.edge = RREdgeId::INVALID();
return (temp_ptr);
}
@ -1412,7 +1409,7 @@ void free_heap_data(t_heap* hptr) {
num_heap_allocated--;
}
void invalidate_heap_entries(int sink_node, int ipin_node) {
void invalidate_heap_entries(const RRNodeId& sink_node, const RRNodeId& ipin_node) {
/* Marks all the heap entries consisting of sink_node, where it was reached *
* via ipin_node, as invalid (OPEN). Used only by the breadth_first router *
* and even then only in rare circumstances. */
@ -1420,7 +1417,7 @@ void invalidate_heap_entries(int sink_node, int ipin_node) {
for (int i = 1; i < heap_tail; i++) {
if (heap[i]->index == sink_node) {
if (heap[i]->u.prev.node == ipin_node) {
heap[i]->index = OPEN; /* Invalid. */
heap[i]->index = RRNodeId::INVALID(); /* Invalid. */
break;
}
}
@ -1466,18 +1463,18 @@ void print_route(FILE* fp, const vtr::vector<ClusterNetId, t_traceback>& traceba
t_trace* tptr = route_ctx.trace[net_id].head;
while (tptr != nullptr) {
int inode = tptr->index;
t_rr_type rr_type = device_ctx.rr_nodes[inode].type();
int ilow = device_ctx.rr_nodes[inode].xlow();
int jlow = device_ctx.rr_nodes[inode].ylow();
RRNodeId inode = tptr->index;
t_rr_type rr_type = device_ctx.rr_graph.node_type(inode);
int ilow = device_ctx.rr_graph.node_xlow(inode);
int jlow = device_ctx.rr_graph.node_ylow(inode);
fprintf(fp, "Node:\t%d\t%6s (%d,%d) ", inode,
device_ctx.rr_nodes[inode].type_string(), ilow, jlow);
fprintf(fp, "Node:\t%ld\t%6s (%d,%d) ", size_t(inode),
rr_node_typename[device_ctx.rr_graph.node_type(inode)], ilow, jlow);
if ((ilow != device_ctx.rr_nodes[inode].xhigh())
|| (jlow != device_ctx.rr_nodes[inode].yhigh()))
fprintf(fp, "to (%d,%d) ", device_ctx.rr_nodes[inode].xhigh(),
device_ctx.rr_nodes[inode].yhigh());
if ((ilow != device_ctx.rr_graph.node_xhigh(inode))
|| (jlow != device_ctx.rr_graph.node_yhigh(inode)))
fprintf(fp, "to (%d,%d) ", device_ctx.rr_graph.node_xhigh(inode),
device_ctx.rr_graph.node_yhigh(inode));
switch (rr_type) {
case IPIN:
@ -1506,14 +1503,14 @@ void print_route(FILE* fp, const vtr::vector<ClusterNetId, t_traceback>& traceba
default:
VPR_FATAL_ERROR(VPR_ERROR_ROUTE,
"in print_route: Unexpected traceback element type: %d (%s).\n",
rr_type, device_ctx.rr_nodes[inode].type_string());
rr_type, rr_node_typename[device_ctx.rr_graph.node_type(inode)]);
break;
}
fprintf(fp, "%d ", device_ctx.rr_nodes[inode].ptc_num());
fprintf(fp, "%d ", device_ctx.rr_graph.node_ptc_num(inode));
if (!is_io_type(device_ctx.grid[ilow][jlow].type) && (rr_type == IPIN || rr_type == OPIN)) {
int pin_num = device_ctx.rr_nodes[inode].ptc_num();
int pin_num = device_ctx.rr_graph.node_ptc_num(inode);
int xoffset = device_ctx.grid[ilow][jlow].width_offset;
int yoffset = device_ctx.grid[ilow][jlow].height_offset;
ClusterBlockId iblock = place_ctx.grid_blocks[ilow - xoffset][jlow - yoffset].blocks[0];
@ -1599,8 +1596,7 @@ void print_route(const char* placement_file, const char* route_file) {
// To model this we 'reserve' these locally used outputs, ensuring that the router will not use them (as if it did
// this would equate to duplicating a BLE into an already in-use BLE instance, which is clearly incorrect).
void reserve_locally_used_opins(float pres_fac, float acc_fac, bool rip_up_local_opins) {
int num_local_opin, inode, from_node, iconn, num_edges, to_node;
int iclass, ipin;
int num_local_opin;
float cost;
t_heap* heap_head_ptr;
t_physical_tile_type_ptr type;
@ -1612,15 +1608,15 @@ void reserve_locally_used_opins(float pres_fac, float acc_fac, bool rip_up_local
if (rip_up_local_opins) {
for (auto blk_id : cluster_ctx.clb_nlist.blocks()) {
type = physical_tile_type(blk_id);
for (iclass = 0; iclass < type->num_class; iclass++) {
for (int iclass = 0; iclass < type->num_class; iclass++) {
num_local_opin = route_ctx.clb_opins_used_locally[blk_id][iclass].size();
if (num_local_opin == 0) continue;
VTR_ASSERT(type->class_inf[iclass].equivalence == PortEquivalence::INSTANCE);
/* Always 0 for pads and for RECEIVER (IPIN) classes */
for (ipin = 0; ipin < num_local_opin; ipin++) {
inode = route_ctx.clb_opins_used_locally[blk_id][iclass][ipin];
for (int ipin = 0; ipin < num_local_opin; ipin++) {
const RRNodeId& inode = route_ctx.clb_opins_used_locally[blk_id][iclass][ipin];
adjust_one_rr_occ_and_apcost(inode, -1, pres_fac, acc_fac);
}
}
@ -1629,7 +1625,7 @@ void reserve_locally_used_opins(float pres_fac, float acc_fac, bool rip_up_local
for (auto blk_id : cluster_ctx.clb_nlist.blocks()) {
type = physical_tile_type(blk_id);
for (iclass = 0; iclass < type->num_class; iclass++) {
for (int iclass = 0; iclass < type->num_class; iclass++) {
num_local_opin = route_ctx.clb_opins_used_locally[blk_id][iclass].size();
if (num_local_opin == 0) continue;
@ -1641,25 +1637,24 @@ void reserve_locally_used_opins(float pres_fac, float acc_fac, bool rip_up_local
//congestion cost are popped-off/reserved first. (Intuitively, we want
//the reserved OPINs to move out of the way of congestion, by preferring
//to reserve OPINs with lower congestion costs).
from_node = route_ctx.rr_blk_source[blk_id][iclass];
num_edges = device_ctx.rr_nodes[from_node].num_edges();
for (iconn = 0; iconn < num_edges; iconn++) {
to_node = device_ctx.rr_nodes[from_node].edge_sink_node(iconn);
const RRNodeId& from_node = route_ctx.rr_blk_source[blk_id][iclass];
for (const RREdgeId& iconn : device_ctx.rr_graph.node_out_edges(from_node)) {
const RRNodeId& to_node = device_ctx.rr_graph.edge_sink_node(iconn);
VTR_ASSERT(device_ctx.rr_nodes[to_node].type() == OPIN);
VTR_ASSERT(device_ctx.rr_graph.node_type(to_node) == OPIN);
//Add the OPIN to the heap according to it's congestion cost
cost = get_rr_cong_cost(to_node);
node_to_heap(to_node, cost, OPEN, OPEN, 0., 0.);
node_to_heap(to_node, cost, RRNodeId::INVALID(), RREdgeId::INVALID(), 0., 0.);
}
for (ipin = 0; ipin < num_local_opin; ipin++) {
for (int ipin = 0; ipin < num_local_opin; ipin++) {
//Pop the nodes off the heap. We get them from the heap so we
//reserve those pins with lowest congestion cost first.
heap_head_ptr = get_heap_head();
inode = heap_head_ptr->index;
const RRNodeId& inode = heap_head_ptr->index;
VTR_ASSERT(device_ctx.rr_nodes[inode].type() == OPIN);
VTR_ASSERT(device_ctx.rr_graph.node_type(inode) == OPIN);
adjust_one_rr_occ_and_apcost(inode, 1, pres_fac, acc_fac);
route_ctx.clb_opins_used_locally[blk_id][iclass][ipin] = inode;
@ -1671,7 +1666,7 @@ void reserve_locally_used_opins(float pres_fac, float acc_fac, bool rip_up_local
}
}
static void adjust_one_rr_occ_and_apcost(int inode, int add_or_sub, float pres_fac, float acc_fac) {
static void adjust_one_rr_occ_and_apcost(const RRNodeId& inode, int add_or_sub, float pres_fac, float acc_fac) {
/* Increments or decrements (depending on add_or_sub) the occupancy of *
* one rr_node, and adjusts the present cost of that node appropriately. */
@ -1679,7 +1674,7 @@ static void adjust_one_rr_occ_and_apcost(int inode, int add_or_sub, float pres_f
auto& device_ctx = g_vpr_ctx.device();
int new_occ = route_ctx.rr_node_route_inf[inode].occ() + add_or_sub;
int capacity = device_ctx.rr_nodes[inode].capacity();
int capacity = device_ctx.rr_graph.node_capacity(inode);
route_ctx.rr_node_route_inf[inode].set_occ(new_occ);
if (new_occ < capacity) {
@ -1710,11 +1705,11 @@ void print_traceback(ClusterNetId net_id) {
VTR_LOG("traceback %zu: ", size_t(net_id));
t_trace* head = route_ctx.trace[net_id].head;
while (head) {
int inode{head->index};
if (device_ctx.rr_nodes[inode].type() == SINK)
VTR_LOG("%d(sink)(%d)->", inode, route_ctx.rr_node_route_inf[inode].occ());
RRNodeId inode{head->index};
if (device_ctx.rr_graph.node_type(inode) == SINK)
VTR_LOG("%ld(sink)(%d)->", size_t(inode), route_ctx.rr_node_route_inf[inode].occ());
else
VTR_LOG("%d(%d)->", inode, route_ctx.rr_node_route_inf[inode].occ());
VTR_LOG("%ld(%d)->", size_t(inode), route_ctx.rr_node_route_inf[inode].occ());
head = head->next;
}
VTR_LOG("\n");
@ -1725,8 +1720,8 @@ void print_traceback(const t_trace* trace) {
auto& route_ctx = g_vpr_ctx.routing();
const t_trace* prev = nullptr;
while (trace) {
int inode = trace->index;
VTR_LOG("%d (%s)", inode, rr_node_typename[device_ctx.rr_nodes[inode].type()]);
RRNodeId inode = trace->index;
VTR_LOG("%ld (%s)", size_t(inode), rr_node_typename[device_ctx.rr_graph.node_type(inode)]);
if (trace->iswitch == OPEN) {
VTR_LOG(" !"); //End of branch
@ -1736,7 +1731,7 @@ void print_traceback(const t_trace* trace) {
VTR_LOG("*"); //Reached non-configurably
}
if (route_ctx.rr_node_route_inf[inode].occ() > device_ctx.rr_nodes[inode].capacity()) {
if (route_ctx.rr_node_route_inf[inode].occ() > device_ctx.rr_graph.node_capacity(inode)) {
VTR_LOG(" x"); //Overused
}
VTR_LOG("\n");
@ -1747,12 +1742,12 @@ void print_traceback(const t_trace* trace) {
}
bool validate_traceback(t_trace* trace) {
std::set<int> seen_rr_nodes;
std::set<RRNodeId> seen_rr_nodes;
return validate_traceback_recurr(trace, seen_rr_nodes);
}
bool validate_traceback_recurr(t_trace* trace, std::set<int>& seen_rr_nodes) {
bool validate_traceback_recurr(t_trace* trace, std::set<RRNodeId>& seen_rr_nodes) {
if (!trace) {
return true;
}
@ -1766,7 +1761,7 @@ bool validate_traceback_recurr(t_trace* trace, std::set<int>& seen_rr_nodes) {
//Verify that the next element (branch point) has been already seen in the traceback so far
if (!seen_rr_nodes.count(next->index)) {
VPR_FATAL_ERROR(VPR_ERROR_ROUTE, "Traceback branch point %d not found", next->index);
VPR_FATAL_ERROR(VPR_ERROR_ROUTE, "Traceback branch point %ld not found", size_t(next->index));
} else {
//Recurse along the new branch
return validate_traceback_recurr(next, seen_rr_nodes);
@ -1778,25 +1773,25 @@ bool validate_traceback_recurr(t_trace* trace, std::set<int>& seen_rr_nodes) {
auto& device_ctx = g_vpr_ctx.device();
bool found = false;
for (t_edge_size iedge = 0; iedge < device_ctx.rr_nodes[trace->index].num_edges(); ++iedge) {
int to_node = device_ctx.rr_nodes[trace->index].edge_sink_node(iedge);
for (const RREdgeId& iedge : device_ctx.rr_graph.node_out_edges(trace->index)) {
RRNodeId to_node = device_ctx.rr_graph.edge_sink_node(iedge);
if (to_node == next->index) {
found = true;
//Verify that the switch matches
int rr_iswitch = device_ctx.rr_nodes[trace->index].edge_switch(iedge);
int rr_iswitch = (int)size_t(device_ctx.rr_graph.edge_switch(iedge));
if (trace->iswitch != rr_iswitch) {
VPR_FATAL_ERROR(VPR_ERROR_ROUTE, "Traceback mismatched switch type: traceback %d rr_graph %d (RR nodes %d -> %d)\n",
VPR_FATAL_ERROR(VPR_ERROR_ROUTE, "Traceback mismatched switch type: traceback %d rr_graph %d (RR nodes %ld -> %ld)\n",
trace->iswitch, rr_iswitch,
trace->index, to_node);
size_t(trace->index), size_t(to_node));
}
break;
}
}
if (!found) {
VPR_FATAL_ERROR(VPR_ERROR_ROUTE, "Traceback no RR edge between RR nodes %d -> %d\n", trace->index, next->index);
VPR_FATAL_ERROR(VPR_ERROR_ROUTE, "Traceback no RR edge between RR nodes %ld -> %ld\n", size_t(trace->index), size_t(next->index));
}
//Recurse
@ -1815,7 +1810,7 @@ void print_invalid_routing_info() {
auto& route_ctx = g_vpr_ctx.routing();
//Build a look-up of nets using each RR node
std::multimap<int, ClusterNetId> rr_node_nets;
std::multimap<RRNodeId, ClusterNetId> rr_node_nets;
for (auto net_id : cluster_ctx.clb_nlist.nets()) {
t_trace* tptr = route_ctx.trace[net_id].head;
@ -1826,9 +1821,9 @@ void print_invalid_routing_info() {
}
}
for (size_t inode = 0; inode < device_ctx.rr_nodes.size(); inode++) {
for (const RRNodeId& inode : device_ctx.rr_graph.nodes()) {
int occ = route_ctx.rr_node_route_inf[inode].occ();
int cap = device_ctx.rr_nodes[inode].capacity();
int cap = device_ctx.rr_graph.node_capacity(inode);
if (occ > cap) {
VTR_LOG(" %s is overused (occ=%d capacity=%d)\n", describe_rr_node(inode).c_str(), occ, cap);
@ -1844,14 +1839,14 @@ void print_invalid_routing_info() {
void print_rr_node_route_inf() {
auto& route_ctx = g_vpr_ctx.routing();
auto& device_ctx = g_vpr_ctx.device();
for (size_t inode = 0; inode < route_ctx.rr_node_route_inf.size(); ++inode) {
for (const RRNodeId& inode : device_ctx.rr_graph.nodes()) {
if (!std::isinf(route_ctx.rr_node_route_inf[inode].path_cost)) {
int prev_node = route_ctx.rr_node_route_inf[inode].prev_node;
int prev_edge = route_ctx.rr_node_route_inf[inode].prev_edge;
RRNodeId prev_node = route_ctx.rr_node_route_inf[inode].prev_node;
RREdgeId prev_edge = route_ctx.rr_node_route_inf[inode].prev_edge;
VTR_LOG("rr_node: %d prev_node: %d prev_edge: %d",
inode, prev_node, prev_edge);
size_t(inode), size_t(prev_node), size_t(prev_edge));
if (prev_node != OPEN && prev_edge != OPEN && !device_ctx.rr_nodes[prev_node].edge_is_configurable(prev_edge)) {
if (prev_node != RRNodeId::INVALID() && prev_edge != RREdgeId::INVALID() && !device_ctx.rr_graph.edge_is_configurable(prev_edge)) {
VTR_LOG("*");
}
@ -1867,23 +1862,24 @@ void print_rr_node_route_inf_dot() {
VTR_LOG("digraph G {\n");
VTR_LOG("\tnode[shape=record]\n");
for (size_t inode = 0; inode < route_ctx.rr_node_route_inf.size(); ++inode) {
for (const RRNodeId& inode : device_ctx.rr_graph.nodes()) {
if (!std::isinf(route_ctx.rr_node_route_inf[inode].path_cost)) {
VTR_LOG("\tnode%zu[label=\"{%zu (%s)", inode, inode, device_ctx.rr_nodes[inode].type_string());
if (route_ctx.rr_node_route_inf[inode].occ() > device_ctx.rr_nodes[inode].capacity()) {
VTR_LOG("\tnode%zu[label=\"{%zu (%s)", size_t(inode), size_t(inode), rr_node_typename[device_ctx.rr_graph.node_type(inode)]);
if (route_ctx.rr_node_route_inf[inode].occ() > device_ctx.rr_graph.node_capacity(inode)) {
VTR_LOG(" x");
}
VTR_LOG("}\"]\n");
}
}
for (size_t inode = 0; inode < route_ctx.rr_node_route_inf.size(); ++inode) {
for (const RRNodeId& inode : device_ctx.rr_graph.nodes()) {
if (!std::isinf(route_ctx.rr_node_route_inf[inode].path_cost)) {
int prev_node = route_ctx.rr_node_route_inf[inode].prev_node;
int prev_edge = route_ctx.rr_node_route_inf[inode].prev_edge;
RRNodeId prev_node = route_ctx.rr_node_route_inf[inode].prev_node;
RREdgeId prev_edge = route_ctx.rr_node_route_inf[inode].prev_edge;
if (prev_node != OPEN && prev_edge != OPEN) {
VTR_LOG("\tnode%d -> node%zu [", prev_node, inode);
if (prev_node != OPEN && prev_edge != OPEN && !device_ctx.rr_nodes[prev_node].edge_is_configurable(prev_edge)) {
if (prev_node != RRNodeId::INVALID() && prev_edge != RREdgeId::INVALID()) {
VTR_LOG("\tnode%ld -> node%ld [", size_t(prev_node), size_t(inode));
if (prev_node != RRNodeId::INVALID() && prev_edge != RREdgeId::INVALID() && !device_ctx.rr_graph.edge_is_configurable(prev_edge)) {
VTR_LOG("label=\"*\"");
}
@ -1895,26 +1891,35 @@ void print_rr_node_route_inf_dot() {
VTR_LOG("}\n");
}
static bool validate_trace_nodes(t_trace* head, const std::unordered_set<int>& trace_nodes) {
static bool validate_trace_nodes(t_trace* head, const std::unordered_set<RRNodeId>& trace_nodes) {
//Verifies that all nodes in the traceback 'head' are conatined in 'trace_nodes'
if (!head) {
return true;
}
std::vector<int> missing_from_trace_nodes;
std::vector<RRNodeId> missing_from_trace_nodes;
for (t_trace* tptr = head; tptr != nullptr; tptr = tptr->next) {
if (!trace_nodes.count(tptr->index)) {
missing_from_trace_nodes.push_back(tptr->index);
}
}
if (!missing_from_trace_nodes.empty()) {
std::string missing_from_trace_nodes_str;
for (const RRNodeId& node : missing_from_trace_nodes) {
if (&node != &missing_from_trace_nodes[0]) {
missing_from_trace_nodes_str += std::string(", ");
}
missing_from_trace_nodes_str += std::to_string(size_t(node));
}
std::string msg = vtr::string_fmt(
"The following %zu nodes were found in traceback"
" but were missing from trace_nodes: %s\n",
missing_from_trace_nodes.size(),
vtr::join(missing_from_trace_nodes, ", ").c_str());
missing_from_trace_nodes_str.c_str());
VPR_FATAL_ERROR(VPR_ERROR_ROUTE, msg.c_str());
return false;

View File

@ -3,6 +3,7 @@
#include <vector>
#include "clustered_netlist.h"
#include "vtr_vector.h"
#include "rr_graph_obj.h"
/* Used by the heap as its fundamental data structure.
* Each heap element represents a partial route.
@ -37,14 +38,21 @@ struct t_heap {
float backward_path_cost = 0.;
float R_upstream = 0.;
int index = OPEN;
RRNodeId index = RRNodeId::INVALID();
struct t_prev {
int node;
int edge;
RRNodeId node;
RREdgeId edge;
};
union {
/* Xifan Tang - type union was used here,
* but it causes an error in vtr_memory.h
* when allocating the data structure.
* I change to struct here.
* TODO: investigate the source of errors
* and see if this will cause memory overhead
*/
struct {
t_heap* next;
t_prev prev;
} u;
@ -59,22 +67,22 @@ t_bb load_net_route_bb(ClusterNetId net_id, int bb_factor);
void pathfinder_update_path_cost(t_trace* route_segment_start,
int add_or_sub,
float pres_fac);
void pathfinder_update_single_node_cost(int inode, int add_or_sub, float pres_fac);
void pathfinder_update_single_node_cost(const RRNodeId& inode, int add_or_sub, float pres_fac);
void pathfinder_update_cost(float pres_fac, float acc_fac);
t_trace* update_traceback(t_heap* hptr, ClusterNetId net_id);
void reset_path_costs(const std::vector<int>& visited_rr_nodes);
void reset_path_costs(const std::vector<RRNodeId>& visited_rr_nodes);
float get_rr_cong_cost(int inode);
float get_rr_cong_cost(const RRNodeId& inode);
void mark_ends(ClusterNetId net_id);
void mark_remaining_ends(const std::vector<int>& remaining_sinks);
void add_to_heap(t_heap* hptr);
t_heap* alloc_heap_data();
void node_to_heap(int inode, float cost, int prev_node, int prev_edge, float backward_path_cost, float R_upstream);
void node_to_heap(const RRNodeId& inode, float cost, const RRNodeId& prev_node, const RREdgeId& prev_edge, float backward_path_cost, float R_upstream);
bool is_empty_heap();
@ -82,14 +90,14 @@ void free_traceback(ClusterNetId net_id);
void drop_traceback_tail(ClusterNetId net_id);
void free_traceback(t_trace* tptr);
void add_to_mod_list(int inode, std::vector<int>& modified_rr_node_inf);
void add_to_mod_list(const RRNodeId& inode, std::vector<RRNodeId>& modified_rr_node_inf);
namespace heap_ {
void build_heap();
void sift_down(size_t hole);
void sift_up(size_t tail, t_heap* const hptr);
void push_back(t_heap* const hptr);
void push_back_node(int inode, float total_cost, int prev_node, int prev_edge, float backward_path_cost, float R_upstream);
void push_back_node(const RRNodeId& inode, float total_cost, const RRNodeId& prev_node, const RREdgeId& prev_edge, float backward_path_cost, float R_upstream);
bool is_valid();
void pop_heap();
void print_heap();
@ -102,7 +110,7 @@ void empty_heap();
void free_heap_data(t_heap* hptr);
void invalidate_heap_entries(int sink_node, int ipin_node);
void invalidate_heap_entries(const RRNodeId& sink_node, const RRNodeId& ipin_node);
void init_route_structs(int bb_factor);

View File

@ -29,9 +29,9 @@ bool try_route(int width_fac,
bool feasible_routing();
std::vector<int> collect_congested_rr_nodes();
std::vector<RRNodeId> collect_congested_rr_nodes();
std::vector<std::set<ClusterNetId>> collect_rr_node_nets();
vtr::vector<RRNodeId, std::set<ClusterNetId>> collect_rr_node_nets();
t_clb_opins_used alloc_route_structs();

View File

@ -148,7 +148,7 @@ static bool timing_driven_route_sink(ClusterNetId net_id,
static bool timing_driven_pre_route_to_clock_root(
ClusterNetId net_id,
int sink_node,
const RRNodeId& sink_node,
const t_conn_cost_params cost_params,
float pres_fac,
int high_fanout_threshold,
@ -158,45 +158,45 @@ static bool timing_driven_pre_route_to_clock_root(
RouterStats& router_stats);
static t_heap* timing_driven_route_connection_from_route_tree_high_fanout(t_rt_node* rt_root,
int sink_node,
const RRNodeId& sink_node,
const t_conn_cost_params cost_params,
t_bb bounding_box,
const RouterLookahead& router_lookahead,
const SpatialRouteTreeLookup& spatial_rt_lookup,
std::vector<int>& modified_rr_node_inf,
std::vector<RRNodeId>& modified_rr_node_inf,
RouterStats& router_stats);
static t_heap* timing_driven_route_connection_from_heap(int sink_node,
static t_heap* timing_driven_route_connection_from_heap(const RRNodeId& sink_node,
const t_conn_cost_params cost_params,
t_bb bounding_box,
const RouterLookahead& router_lookahead,
std::vector<int>& modified_rr_node_inf,
std::vector<RRNodeId>& modified_rr_node_inf,
RouterStats& router_stats);
static std::vector<t_heap> timing_driven_find_all_shortest_paths_from_heap(const t_conn_cost_params cost_params,
static vtr::vector<RRNodeId, t_heap> timing_driven_find_all_shortest_paths_from_heap(const t_conn_cost_params cost_params,
t_bb bounding_box,
std::vector<int>& modified_rr_node_inf,
std::vector<RRNodeId>& modified_rr_node_inf,
RouterStats& router_stats);
void disable_expansion_and_remove_sink_from_route_tree_nodes(t_rt_node* node);
static void timing_driven_expand_cheapest(t_heap* cheapest,
int target_node,
const RRNodeId& target_node,
const t_conn_cost_params cost_params,
t_bb bounding_box,
const RouterLookahead& router_lookahead,
std::vector<int>& modified_rr_node_inf,
std::vector<RRNodeId>& modified_rr_node_inf,
RouterStats& router_stats);
static t_rt_node* setup_routing_resources(int itry, ClusterNetId net_id, unsigned num_sinks, float pres_fac, int min_incremental_reroute_fanout, CBRR& incremental_rerouting_res, t_rt_node** rt_node_of_sink);
static void add_route_tree_to_heap(t_rt_node* rt_node,
int target_node,
const RRNodeId& target_node,
const t_conn_cost_params cost_params,
const RouterLookahead& router_lookahead,
RouterStats& router_stats);
static t_bb add_high_fanout_route_tree_to_heap(t_rt_node* rt_root,
int target_node,
const RRNodeId& target_node,
const t_conn_cost_params cost_params,
const RouterLookahead& router_lookahead,
const SpatialRouteTreeLookup& spatial_route_tree_lookup,
@ -206,7 +206,7 @@ static t_bb add_high_fanout_route_tree_to_heap(t_rt_node* rt_root,
static t_bb adjust_highfanout_bounding_box(t_bb highfanout_bb);
static void add_route_tree_node_to_heap(t_rt_node* rt_node,
int target_node,
const RRNodeId& target_node,
const t_conn_cost_params cost_params,
const RouterLookahead& router_lookahead,
RouterStats& router_stats);
@ -215,44 +215,44 @@ static void timing_driven_expand_neighbours(t_heap* current,
const t_conn_cost_params cost_params,
t_bb bounding_box,
const RouterLookahead& router_lookahead,
int target_node,
const RRNodeId& target_node,
RouterStats& router_stats);
static void timing_driven_expand_neighbour(t_heap* current,
const int from_node,
const t_edge_size from_edge,
const int to_node,
const RRNodeId& from_node,
const RREdgeId& from_edge,
const RRNodeId& to_node,
const t_conn_cost_params cost_params,
const t_bb bounding_box,
const RouterLookahead& router_lookahead,
int target_node,
const RRNodeId& target_node,
const t_bb target_bb,
RouterStats& router_stats);
static void timing_driven_add_to_heap(const t_conn_cost_params cost_params,
const RouterLookahead& router_lookahead,
const t_heap* current,
const int from_node,
const int to_node,
const int iconn,
const int target_node,
const RRNodeId& from_node,
const RRNodeId& to_node,
const RREdgeId& iconn,
const RRNodeId& target_node,
RouterStats& router_stats);
static void timing_driven_expand_node(const t_conn_cost_params cost_params,
const RouterLookahead& router_lookahead,
t_heap* current,
const int from_node,
const int to_node,
const int iconn,
const int target_node);
const RRNodeId& from_node,
const RRNodeId& to_node,
const RREdgeId& iconn,
const RRNodeId& target_node);
static void evaluate_timing_driven_node_costs(t_heap* from,
static void evaluate_timing_driven_node_costs(t_heap* to,
const t_conn_cost_params cost_params,
const RouterLookahead& router_lookahead,
const int from_node,
const int to_node,
const int iconn,
const int target_node);
const RRNodeId& from_node,
const RRNodeId& to_node,
const RREdgeId& iconn,
const RRNodeId& target_node);
static bool timing_driven_check_net_delays(vtr::vector<ClusterNetId, float*>& net_delay);
@ -286,7 +286,7 @@ static void print_route_status(int itry,
std::shared_ptr<const SetupHoldTimingInfo> timing_info,
float est_success_iteration);
static std::string describe_unrouteable_connection(const int source_node, const int sink_node);
static std::string describe_unrouteable_connection(const RRNodeId& source_node, const RRNodeId& sink_node);
static bool is_high_fanout(int fanout, int fanout_threshold);
@ -310,7 +310,7 @@ static void generate_route_timing_reports(const t_router_opts& router_opts,
static void prune_unused_non_configurable_nets(CBRR& connections_inf);
static bool same_non_config_node_set(int from_node, int to_node);
static bool same_non_config_node_set(const RRNodeId& from_node, const RRNodeId& to_node);
/************************ Subroutine definitions *****************************/
bool try_timing_driven_route(const t_router_opts& router_opts,
@ -1008,7 +1008,7 @@ bool timing_driven_route_net(ClusterNetId net_id,
// Pre-route to clock source for clock nets (marked as global nets)
if (cluster_ctx.clb_nlist.net_is_global(net_id) && router_opts.two_stage_clock_routing) {
VTR_ASSERT(router_opts.clock_modeling == DEDICATED_NETWORK);
int sink_node = device_ctx.virtual_clock_network_root_idx;
RRNodeId sink_node = RRNodeId(device_ctx.virtual_clock_network_root_idx);
enable_router_debug(router_opts, net_id, sink_node);
// Set to the max timing criticality which should intern minimize clock insertion
// delay by selecting a direct route from the clock source to the virtual sink
@ -1030,7 +1030,7 @@ bool timing_driven_route_net(ClusterNetId net_id,
for (unsigned itarget = 0; itarget < remaining_targets.size(); ++itarget) {
int target_pin = remaining_targets[itarget];
int sink_rr = route_ctx.net_rr_terminals[net_id][target_pin];
RRNodeId sink_rr = route_ctx.net_rr_terminals[net_id][target_pin];
enable_router_debug(router_opts, net_id, sink_rr);
@ -1069,12 +1069,12 @@ bool timing_driven_route_net(ClusterNetId net_id,
if (!cluster_ctx.clb_nlist.net_is_ignored(net_id)) {
for (unsigned ipin = 1; ipin < cluster_ctx.clb_nlist.net_pins(net_id).size(); ++ipin) {
if (net_delay[ipin] == 0) { // should be SOURCE->OPIN->IPIN->SINK
VTR_ASSERT(device_ctx.rr_nodes[rt_node_of_sink[ipin]->parent_node->parent_node->inode].type() == OPIN);
VTR_ASSERT(device_ctx.rr_graph.node_type(RRNodeId(rt_node_of_sink[ipin]->parent_node->parent_node->inode)) == OPIN);
}
}
}
VTR_ASSERT_MSG(route_ctx.rr_node_route_inf[rt_root->inode].occ() <= device_ctx.rr_nodes[rt_root->inode].capacity(), "SOURCE should never be congested");
VTR_ASSERT_MSG(route_ctx.rr_node_route_inf[rt_root->inode].occ() <= device_ctx.rr_graph.node_capacity(rt_root->inode), "SOURCE should never be congested");
// route tree is not kept persistent since building it from the traceback the next iteration takes almost 0 time
VTR_LOGV_DEBUG(f_router_debug, "Routed Net %zu (%zu sinks)\n", size_t(net_id), num_sinks);
@ -1084,7 +1084,7 @@ bool timing_driven_route_net(ClusterNetId net_id,
static bool timing_driven_pre_route_to_clock_root(
ClusterNetId net_id,
int sink_node,
const RRNodeId& sink_node,
const t_conn_cost_params cost_params,
float pres_fac,
int high_fanout_threshold,
@ -1100,7 +1100,7 @@ static bool timing_driven_pre_route_to_clock_root(
VTR_LOGV_DEBUG(f_router_debug, "Net %zu pre-route to (%s)\n", size_t(net_id), describe_rr_node(sink_node).c_str());
std::vector<int> modified_rr_node_inf;
std::vector<RRNodeId> modified_rr_node_inf;
profiling::sink_criticality_start();
@ -1198,13 +1198,13 @@ static bool timing_driven_route_sink(ClusterNetId net_id,
profiling::sink_criticality_start();
int sink_node = route_ctx.net_rr_terminals[net_id][target_pin];
RRNodeId sink_node = route_ctx.net_rr_terminals[net_id][target_pin];
VTR_LOGV_DEBUG(f_router_debug, "Net %zu Target %d (%s)\n", size_t(net_id), itarget, describe_rr_node(sink_node).c_str());
VTR_ASSERT_DEBUG(verify_traceback_route_tree_equivalent(route_ctx.trace[net_id].head, rt_root));
std::vector<int> modified_rr_node_inf;
std::vector<RRNodeId> modified_rr_node_inf;
t_heap* cheapest = nullptr;
t_bb bounding_box = route_ctx.route_bb[net_id];
@ -1265,7 +1265,7 @@ static bool timing_driven_route_sink(ClusterNetId net_id,
* lets me reuse all the routines written for breadth-first routing, which *
* all take a traceback structure as input. */
int inode = cheapest->index;
RRNodeId inode = cheapest->index;
route_ctx.rr_node_route_inf[inode].target_flag--; /* Connected to this SINK. */
t_trace* new_route_start_tptr = update_traceback(cheapest, net_id);
VTR_ASSERT_DEBUG(validate_traceback(route_ctx.trace[net_id].head));
@ -1296,18 +1296,18 @@ static bool timing_driven_route_sink(ClusterNetId net_id,
//
//Returns either the last element of the path, or nullptr if no path is found
t_heap* timing_driven_route_connection_from_route_tree(t_rt_node* rt_root,
int sink_node,
const RRNodeId& sink_node,
const t_conn_cost_params cost_params,
t_bb bounding_box,
const RouterLookahead& router_lookahead,
std::vector<int>& modified_rr_node_inf,
std::vector<RRNodeId>& modified_rr_node_inf,
RouterStats& router_stats) {
// re-explore route tree from root to add any new nodes (buildheap afterwards)
// route tree needs to be repushed onto the heap since each node's cost is target specific
add_route_tree_to_heap(rt_root, sink_node, cost_params, router_lookahead, router_stats);
heap_::build_heap(); // via sifting down everything
int source_node = rt_root->inode;
RRNodeId source_node = rt_root->inode;
if (is_empty_heap()) {
VTR_LOG("No source in route tree: %s\n", describe_unrouteable_connection(source_node, sink_node).c_str());
@ -1388,19 +1388,19 @@ t_heap* timing_driven_route_connection_from_route_tree(t_rt_node* rt_root,
//Unlike timing_driven_route_connection_from_route_tree(), only part of the route tree
//which is spatially close to the sink is added to the heap.
static t_heap* timing_driven_route_connection_from_route_tree_high_fanout(t_rt_node* rt_root,
int sink_node,
const RRNodeId& sink_node,
const t_conn_cost_params cost_params,
t_bb net_bounding_box,
const RouterLookahead& router_lookahead,
const SpatialRouteTreeLookup& spatial_rt_lookup,
std::vector<int>& modified_rr_node_inf,
std::vector<RRNodeId>& modified_rr_node_inf,
RouterStats& router_stats) {
// re-explore route tree from root to add any new nodes (buildheap afterwards)
// route tree needs to be repushed onto the heap since each node's cost is target specific
t_bb high_fanout_bb = add_high_fanout_route_tree_to_heap(rt_root, sink_node, cost_params, router_lookahead, spatial_rt_lookup, net_bounding_box, router_stats);
heap_::build_heap(); // via sifting down everything
int source_node = rt_root->inode;
RRNodeId source_node = rt_root->inode;
if (is_empty_heap()) {
VTR_LOG("No source in route tree: %s\n", describe_unrouteable_connection(source_node, sink_node).c_str());
@ -1409,7 +1409,8 @@ static t_heap* timing_driven_route_connection_from_route_tree_high_fanout(t_rt_n
return nullptr;
}
VTR_LOGV_DEBUG(f_router_debug, " Routing to %d as high fanout net (BB: %d,%d x %d,%d)\n", sink_node,
VTR_LOGV_DEBUG(f_router_debug, " Routing to %ld as high fanout net (BB: %d,%d x %d,%d)\n",
size_t(sink_node),
high_fanout_bb.xmin, high_fanout_bb.ymin,
high_fanout_bb.xmax, high_fanout_bb.ymax);
@ -1423,7 +1424,7 @@ static t_heap* timing_driven_route_connection_from_route_tree_high_fanout(t_rt_n
if (cheapest == nullptr) {
//Found no path, that may be due to an unlucky choice of existing route tree sub-set,
//try again with the full route tree to be sure this is not an artifact of high-fanout routing
VTR_LOG_WARN("No routing path found in high-fanout mode for net connection (to sink_rr %d), retrying with full route tree\n", sink_node);
VTR_LOG_WARN("No routing path found in high-fanout mode for net connection (to sink_rr %ld), retrying with full route tree\n", size_t(sink_node));
//Reset any previously recorded node costs so timing_driven_route_connection()
//starts over from scratch.
@ -1454,11 +1455,11 @@ static t_heap* timing_driven_route_connection_from_route_tree_high_fanout(t_rt_n
//This is the core maze routing routine.
//
//Returns either the last element of the path, or nullptr if no path is found
static t_heap* timing_driven_route_connection_from_heap(int sink_node,
static t_heap* timing_driven_route_connection_from_heap(const RRNodeId& sink_node,
const t_conn_cost_params cost_params,
t_bb bounding_box,
const RouterLookahead& router_lookahead,
std::vector<int>& modified_rr_node_inf,
std::vector<RRNodeId>& modified_rr_node_inf,
RouterStats& router_stats) {
VTR_ASSERT_SAFE(heap_::is_valid());
@ -1472,13 +1473,13 @@ static t_heap* timing_driven_route_connection_from_heap(int sink_node,
cheapest = get_heap_head();
++router_stats.heap_pops;
int inode = cheapest->index;
RRNodeId inode = cheapest->index;
VTR_LOGV_DEBUG(f_router_debug, " Popping node %d (cost: %g)\n",
inode, cheapest->cost);
size_t(inode), cheapest->cost);
//Have we found the target?
if (inode == sink_node) {
VTR_LOGV_DEBUG(f_router_debug, " Found target %8d (%s)\n", inode, describe_rr_node(inode).c_str());
VTR_LOGV_DEBUG(f_router_debug, " Found target %8d (%s)\n", size_t(inode), describe_rr_node(inode).c_str());
break;
}
@ -1504,13 +1505,13 @@ static t_heap* timing_driven_route_connection_from_heap(int sink_node,
}
//Find shortest paths from specified route tree to all nodes in the RR graph
std::vector<t_heap> timing_driven_find_all_shortest_paths_from_route_tree(t_rt_node* rt_root,
vtr::vector<RRNodeId, t_heap> timing_driven_find_all_shortest_paths_from_route_tree(t_rt_node* rt_root,
const t_conn_cost_params cost_params,
t_bb bounding_box,
std::vector<int>& modified_rr_node_inf,
std::vector<RRNodeId>& modified_rr_node_inf,
RouterStats& router_stats) {
//Add the route tree to the heap with no specific target node
int target_node = OPEN;
RRNodeId target_node = RRNodeId::INVALID();
auto router_lookahead = make_router_lookahead(e_router_lookahead::NO_OP,
/*write_lookahead=*/"", /*read_lookahead=*/"",
/*segment_inf=*/{});
@ -1529,16 +1530,16 @@ std::vector<t_heap> timing_driven_find_all_shortest_paths_from_route_tree(t_rt_n
//
//Note that to re-use code used for the regular A*-based router we use a
//no-operation lookahead which always returns zero.
static std::vector<t_heap> timing_driven_find_all_shortest_paths_from_heap(const t_conn_cost_params cost_params,
static vtr::vector<RRNodeId, t_heap> timing_driven_find_all_shortest_paths_from_heap(const t_conn_cost_params cost_params,
t_bb bounding_box,
std::vector<int>& modified_rr_node_inf,
std::vector<RRNodeId>& modified_rr_node_inf,
RouterStats& router_stats) {
auto router_lookahead = make_router_lookahead(e_router_lookahead::NO_OP,
/*write_lookahead=*/"", /*read_lookahead=*/"",
/*segment_inf=*/{});
auto& device_ctx = g_vpr_ctx.device();
std::vector<t_heap> cheapest_paths(device_ctx.rr_nodes.size());
vtr::vector<RRNodeId, t_heap> cheapest_paths(device_ctx.rr_graph.nodes().size());
VTR_ASSERT_SAFE(heap_::is_valid());
@ -1551,16 +1552,16 @@ static std::vector<t_heap> timing_driven_find_all_shortest_paths_from_heap(const
t_heap* cheapest = get_heap_head();
++router_stats.heap_pops;
int inode = cheapest->index;
VTR_LOGV_DEBUG(f_router_debug, " Popping node %d (cost: %g)\n",
inode, cheapest->cost);
RRNodeId inode = cheapest->index;
VTR_LOGV_DEBUG(f_router_debug, " Popping node %ld (cost: %g)\n",
size_t(inode), cheapest->cost);
//Since we want to find shortest paths to all nodes in the graph
//we do not specify a target node.
//
//By setting the target_node to OPEN in combination with the NoOp router
//lookahead we can re-use the node exploration code from the regular router
int target_node = OPEN;
RRNodeId target_node = RRNodeId::INVALID();
timing_driven_expand_cheapest(cheapest,
target_node,
@ -1570,11 +1571,11 @@ static std::vector<t_heap> timing_driven_find_all_shortest_paths_from_heap(const
modified_rr_node_inf,
router_stats);
if (cheapest_paths[inode].index == OPEN || cheapest_paths[inode].cost >= cheapest->cost) {
VTR_LOGV_DEBUG(f_router_debug, " Better cost to node %d: %g (was %g)\n", inode, cheapest->cost, cheapest_paths[inode].cost);
if (cheapest_paths[inode].index == RRNodeId::INVALID() || cheapest_paths[inode].cost >= cheapest->cost) {
VTR_LOGV_DEBUG(f_router_debug, " Better cost to node %ld: %g (was %g)\n", size_t(inode), cheapest->cost, cheapest_paths[inode].cost);
cheapest_paths[inode] = *cheapest;
} else {
VTR_LOGV_DEBUG(f_router_debug, " Worse cost to node %d: %g (better %g)\n", inode, cheapest->cost, cheapest_paths[inode].cost);
VTR_LOGV_DEBUG(f_router_debug, " Worse cost to node %ld: %g (better %g)\n", size_t(inode), cheapest->cost, cheapest_paths[inode].cost);
}
free_heap_data(cheapest);
@ -1584,15 +1585,15 @@ static std::vector<t_heap> timing_driven_find_all_shortest_paths_from_heap(const
}
static void timing_driven_expand_cheapest(t_heap* cheapest,
int target_node,
const RRNodeId& target_node,
const t_conn_cost_params cost_params,
t_bb bounding_box,
const RouterLookahead& router_lookahead,
std::vector<int>& modified_rr_node_inf,
std::vector<RRNodeId>& modified_rr_node_inf,
RouterStats& router_stats) {
auto& route_ctx = g_vpr_ctx.mutable_routing();
int inode = cheapest->index;
RRNodeId inode = cheapest->index;
float best_total_cost = route_ctx.rr_node_route_inf[inode].path_cost;
float best_back_cost = route_ctx.rr_node_route_inf[inode].backward_path_cost;
@ -1611,7 +1612,7 @@ static void timing_driven_expand_cheapest(t_heap* cheapest,
if (best_total_cost > new_total_cost && best_back_cost > new_back_cost) {
//Explore from this node, since the current/new partial path has the best cost
//found so far
VTR_LOGV_DEBUG(f_router_debug, " Better cost to %d\n", inode);
VTR_LOGV_DEBUG(f_router_debug, " Better cost to %d\n", size_t(inode));
VTR_LOGV_DEBUG(f_router_debug, " New total cost: %g\n", new_total_cost);
VTR_LOGV_DEBUG(f_router_debug, " New back cost: %g\n", new_back_cost);
VTR_LOGV_DEBUG(f_router_debug, " Setting path costs for assicated node %d (from %d edge %d)\n", cheapest->index, cheapest->u.prev.node, cheapest->u.prev.edge);
@ -1630,7 +1631,7 @@ static void timing_driven_expand_cheapest(t_heap* cheapest,
} else {
//Post-heap prune, do not re-explore from the current/new partial path as it
//has worse cost than the best partial path to this node found so far
VTR_LOGV_DEBUG(f_router_debug, " Worse cost to %d\n", inode);
VTR_LOGV_DEBUG(f_router_debug, " Worse cost to %d\n", size_t(inode));
VTR_LOGV_DEBUG(f_router_debug, " Old total cost: %g\n", best_total_cost);
VTR_LOGV_DEBUG(f_router_debug, " Old back cost: %g\n", best_back_cost);
VTR_LOGV_DEBUG(f_router_debug, " New total cost: %g\n", new_total_cost);
@ -1779,9 +1780,9 @@ void disable_expansion_and_remove_sink_from_route_tree_nodes(t_rt_node* rt_node)
while (linked_rt_edge != nullptr) {
child_node = linked_rt_edge->child;
if (device_ctx.rr_nodes[child_node->inode].type() == SINK) {
if (device_ctx.rr_graph.node_type(child_node->inode) == SINK) {
VTR_LOGV_DEBUG(f_router_debug,
"Removing sink %d from route tree\n", child_node->inode);
"Removing sink %ld from route tree\n", size_t(child_node->inode));
rt_node->u.child_list = nullptr;
rt_node->u.next = nullptr;
free(child_node);
@ -1789,7 +1790,7 @@ void disable_expansion_and_remove_sink_from_route_tree_nodes(t_rt_node* rt_node)
} else {
rt_node->re_expand = false;
VTR_LOGV_DEBUG(f_router_debug,
"unexpanding: %d in route tree\n", rt_node->inode);
"unexpanding: %ld in route tree\n", size_t(rt_node->inode));
}
disable_expansion_and_remove_sink_from_route_tree_nodes(child_node);
linked_rt_edge = linked_rt_edge->next;
@ -1797,7 +1798,7 @@ void disable_expansion_and_remove_sink_from_route_tree_nodes(t_rt_node* rt_node)
}
static void add_route_tree_to_heap(t_rt_node* rt_node,
int target_node,
const RRNodeId& target_node,
const t_conn_cost_params cost_params,
const RouterLookahead& router_lookahead,
RouterStats& router_stats) {
@ -1830,7 +1831,7 @@ static void add_route_tree_to_heap(t_rt_node* rt_node,
}
}
static t_bb add_high_fanout_route_tree_to_heap(t_rt_node* rt_root, int target_node, const t_conn_cost_params cost_params, const RouterLookahead& router_lookahead, const SpatialRouteTreeLookup& spatial_rt_lookup, t_bb net_bounding_box, RouterStats& router_stats) {
static t_bb add_high_fanout_route_tree_to_heap(t_rt_node* rt_root, const RRNodeId& target_node, const t_conn_cost_params cost_params, const RouterLookahead& router_lookahead, const SpatialRouteTreeLookup& spatial_rt_lookup, t_bb net_bounding_box, RouterStats& router_stats) {
//For high fanout nets we only add those route tree nodes which are spatially close
//to the sink.
//
@ -1844,18 +1845,16 @@ static t_bb add_high_fanout_route_tree_to_heap(t_rt_node* rt_root, int target_no
auto& device_ctx = g_vpr_ctx.device();
//Determine which bin the target node is located in
auto& target_rr_node = device_ctx.rr_nodes[target_node];
int target_bin_x = grid_to_bin_x(target_rr_node.xlow(), spatial_rt_lookup);
int target_bin_y = grid_to_bin_y(target_rr_node.ylow(), spatial_rt_lookup);
int target_bin_x = grid_to_bin_x(device_ctx.rr_graph.node_xlow(target_node), spatial_rt_lookup);
int target_bin_y = grid_to_bin_y(device_ctx.rr_graph.node_ylow(target_node), spatial_rt_lookup);
int nodes_added = 0;
t_bb highfanout_bb;
highfanout_bb.xmin = target_rr_node.xlow();
highfanout_bb.xmax = target_rr_node.xhigh();
highfanout_bb.ymin = target_rr_node.ylow();
highfanout_bb.ymax = target_rr_node.yhigh();
highfanout_bb.xmin = device_ctx.rr_graph.node_xlow(target_node);
highfanout_bb.xmax = device_ctx.rr_graph.node_xhigh(target_node);
highfanout_bb.ymin = device_ctx.rr_graph.node_ylow(target_node);
highfanout_bb.ymax = device_ctx.rr_graph.node_yhigh(target_node);
//Add existing routing starting from the target bin.
//If the target's bin has insufficient existing routing add from the surrounding bins
@ -1877,11 +1876,11 @@ static t_bb add_high_fanout_route_tree_to_heap(t_rt_node* rt_root, int target_no
add_route_tree_node_to_heap(rt_node, target_node, cost_params, router_lookahead, router_stats);
//Update Bounding Box
auto& rr_node = device_ctx.rr_nodes[rt_node->inode];
highfanout_bb.xmin = std::min<int>(highfanout_bb.xmin, rr_node.xlow());
highfanout_bb.ymin = std::min<int>(highfanout_bb.ymin, rr_node.ylow());
highfanout_bb.xmax = std::max<int>(highfanout_bb.xmax, rr_node.xhigh());
highfanout_bb.ymax = std::max<int>(highfanout_bb.ymax, rr_node.yhigh());
auto& rr_node = rt_node->inode;
highfanout_bb.xmin = std::min<int>(highfanout_bb.xmin, device_ctx.rr_graph.node_xlow(rr_node));
highfanout_bb.ymin = std::min<int>(highfanout_bb.ymin, device_ctx.rr_graph.node_ylow(rr_node));
highfanout_bb.xmax = std::max<int>(highfanout_bb.xmax, device_ctx.rr_graph.node_xhigh(rr_node));
highfanout_bb.ymax = std::max<int>(highfanout_bb.ymax, device_ctx.rr_graph.node_yhigh(rr_node));
++nodes_added;
}
@ -1929,11 +1928,11 @@ static t_bb adjust_highfanout_bounding_box(t_bb highfanout_bb) {
//Note that if you want to respect rt_node->re_expand that is the caller's
//responsibility.
static void add_route_tree_node_to_heap(t_rt_node* rt_node,
int target_node,
const RRNodeId& target_node,
const t_conn_cost_params cost_params,
const RouterLookahead& router_lookahead,
RouterStats& router_stats) {
int inode = rt_node->inode;
const RRNodeId& inode = rt_node->inode;
float backward_path_cost = cost_params.criticality * rt_node->Tdel;
float R_upstream = rt_node->R_upstream;
@ -1955,7 +1954,7 @@ static void add_route_tree_node_to_heap(t_rt_node* rt_node,
VTR_LOGV_DEBUG(f_router_debug, " Adding node %8d to heap from init route tree with cost %g (%s)\n", inode, tot_cost, describe_rr_node(inode).c_str());
heap_::push_back_node(inode, tot_cost, NO_PREVIOUS, NO_PREVIOUS,
heap_::push_back_node(inode, tot_cost, RRNodeId::INVALID(), RREdgeId::INVALID(),
backward_path_cost, R_upstream);
++router_stats.heap_pushes;
@ -1965,7 +1964,7 @@ static void timing_driven_expand_neighbours(t_heap* current,
const t_conn_cost_params cost_params,
t_bb bounding_box,
const RouterLookahead& router_lookahead,
int target_node,
const RRNodeId& target_node,
RouterStats& router_stats) {
/* Puts all the rr_nodes adjacent to current on the heap.
*/
@ -1973,19 +1972,18 @@ static void timing_driven_expand_neighbours(t_heap* current,
auto& device_ctx = g_vpr_ctx.device();
t_bb target_bb;
if (target_node != OPEN) {
target_bb.xmin = device_ctx.rr_nodes[target_node].xlow();
target_bb.ymin = device_ctx.rr_nodes[target_node].ylow();
target_bb.xmax = device_ctx.rr_nodes[target_node].xhigh();
target_bb.ymax = device_ctx.rr_nodes[target_node].yhigh();
if (target_node != RRNodeId::INVALID()) {
target_bb.xmin = device_ctx.rr_graph.node_xlow(target_node);
target_bb.ymin = device_ctx.rr_graph.node_ylow(target_node);
target_bb.xmax = device_ctx.rr_graph.node_xhigh(target_node);
target_bb.ymax = device_ctx.rr_graph.node_yhigh(target_node);
}
//For each node associated with the current heap element, expand all of it's neighbours
int num_edges = device_ctx.rr_nodes[current->index].num_edges();
for (int iconn = 0; iconn < num_edges; iconn++) {
int to_node = device_ctx.rr_nodes[current->index].edge_sink_node(iconn);
for (const RREdgeId& edge : device_ctx.rr_graph.node_out_edges(current->index)) {
const RRNodeId& to_node = device_ctx.rr_graph.edge_sink_node(edge);
timing_driven_expand_neighbour(current,
current->index, iconn, to_node,
current->index, edge, to_node,
cost_params,
bounding_box,
router_lookahead,
@ -1999,31 +1997,31 @@ static void timing_driven_expand_neighbours(t_heap* current,
//RR nodes outside the expanded bounding box specified in bounding_box are not added
//to the heap.
static void timing_driven_expand_neighbour(t_heap* current,
const int from_node,
const t_edge_size from_edge,
const int to_node,
const RRNodeId& from_node,
const RREdgeId& from_edge,
const RRNodeId& to_node,
const t_conn_cost_params cost_params,
const t_bb bounding_box,
const RouterLookahead& router_lookahead,
int target_node,
const RRNodeId& target_node,
const t_bb target_bb,
RouterStats& router_stats) {
auto& device_ctx = g_vpr_ctx.device();
int to_xlow = device_ctx.rr_nodes[to_node].xlow();
int to_ylow = device_ctx.rr_nodes[to_node].ylow();
int to_xhigh = device_ctx.rr_nodes[to_node].xhigh();
int to_yhigh = device_ctx.rr_nodes[to_node].yhigh();
int to_xlow = device_ctx.rr_graph.node_xlow(to_node);
int to_ylow = device_ctx.rr_graph.node_ylow(to_node);
int to_xhigh = device_ctx.rr_graph.node_xhigh(to_node);
int to_yhigh = device_ctx.rr_graph.node_yhigh(to_node);
if (to_xhigh < bounding_box.xmin //Strictly left of BB left-edge
|| to_xlow > bounding_box.xmax //Strictly right of BB right-edge
|| to_yhigh < bounding_box.ymin //Strictly below BB bottom-edge
|| to_ylow > bounding_box.ymax) { //Strictly above BB top-edge
VTR_LOGV_DEBUG(f_router_debug,
" Pruned expansion of node %d edge %d -> %d"
" Pruned expansion of node %ld edge %ld -> %ld"
" (to node location %d,%dx%d,%d outside of expanded"
" net bounding box %d,%dx%d,%d)\n",
from_node, from_edge, to_node,
size_t(from_node), size_t(from_edge), size_t(to_node),
to_xlow, to_ylow, to_xhigh, to_yhigh,
bounding_box.xmin, bounding_box.ymin, bounding_box.xmax, bounding_box.ymax);
return; /* Node is outside (expanded) bounding box. */
@ -2033,8 +2031,8 @@ static void timing_driven_expand_neighbour(t_heap* current,
* the issue of how to cost them properly so they don't get expanded before *
* more promising routes, but makes route-throughs (via CLBs) impossible. *
* Change this if you want to investigate route-throughs. */
if (target_node != OPEN) {
t_rr_type to_type = device_ctx.rr_nodes[to_node].type();
if (target_node != RRNodeId::INVALID()) {
t_rr_type to_type = device_ctx.rr_graph.node_type(to_node);
if (to_type == IPIN) {
//Check if this IPIN leads to the target block
// IPIN's of the target block should be contained within it's bounding box
@ -2043,10 +2041,10 @@ static void timing_driven_expand_neighbour(t_heap* current,
|| to_xhigh > target_bb.xmax
|| to_yhigh > target_bb.ymax) {
VTR_LOGV_DEBUG(f_router_debug,
" Pruned expansion of node %d edge %d -> %d"
" Pruned expansion of node %ld edge %ld -> %ld"
" (to node is IPIN at %d,%dx%d,%d which does not"
" lead to target block %d,%dx%d,%d)\n",
from_node, from_edge, to_node,
size_t(from_node), size_t(from_edge), size_t(to_node),
to_xlow, to_ylow, to_xhigh, to_yhigh,
target_bb.xmin, target_bb.ymin, target_bb.xmax, target_bb.ymax);
return;
@ -2054,8 +2052,8 @@ static void timing_driven_expand_neighbour(t_heap* current,
}
}
VTR_LOGV_DEBUG(f_router_debug, " Expanding node %d edge %d -> %d\n",
from_node, from_edge, to_node);
VTR_LOGV_DEBUG(f_router_debug, " Expanding node %ld edge %ld -> %ld\n",
size_t(from_node), size_t(from_edge), size_t(to_node));
timing_driven_add_to_heap(cost_params,
router_lookahead,
@ -2066,10 +2064,10 @@ static void timing_driven_expand_neighbour(t_heap* current,
static void timing_driven_add_to_heap(const t_conn_cost_params cost_params,
const RouterLookahead& router_lookahead,
const t_heap* current,
const int from_node,
const int to_node,
const int iconn,
const int target_node,
const RRNodeId& from_node,
const RRNodeId& to_node,
const RREdgeId& iconn,
const RRNodeId& target_node,
RouterStats& router_stats) {
t_heap* next = alloc_heap_data();
next->index = to_node;
@ -2110,11 +2108,11 @@ static void timing_driven_add_to_heap(const t_conn_cost_params cost_params,
static void timing_driven_expand_node(const t_conn_cost_params cost_params,
const RouterLookahead& router_lookahead,
t_heap* current,
const int from_node,
const int to_node,
const int iconn,
const int target_node) {
VTR_LOGV_DEBUG(f_router_debug, " Expanding to node %d (%s)\n", to_node, describe_rr_node(to_node).c_str());
const RRNodeId& from_node,
const RRNodeId& to_node,
const RREdgeId& iconn,
const RRNodeId& target_node) {
VTR_LOGV_DEBUG(f_router_debug, " Expanding to node %ld (%s)\n", size_t(to_node), describe_rr_node(to_node).c_str());
evaluate_timing_driven_node_costs(current,
cost_params,
@ -2131,10 +2129,10 @@ static void timing_driven_expand_node(const t_conn_cost_params cost_params,
static void evaluate_timing_driven_node_costs(t_heap* to,
const t_conn_cost_params cost_params,
const RouterLookahead& router_lookahead,
const int from_node,
const int to_node,
const int iconn,
const int target_node) {
const RRNodeId& from_node,
const RRNodeId& to_node,
const RREdgeId& iconn,
const RRNodeId& target_node) {
/* new_costs.backward_cost: is the "known" part of the cost to this node -- the
* congestion cost of all the routing resources back to the existing route
* plus the known delay of the total path back to the source.
@ -2146,18 +2144,18 @@ static void evaluate_timing_driven_node_costs(t_heap* to,
auto& device_ctx = g_vpr_ctx.device();
//Info for the switch connecting from_node to_node
int iswitch = device_ctx.rr_nodes[from_node].edge_switch(iconn);
int iswitch = size_t(device_ctx.rr_graph.edge_switch(iconn));
bool switch_buffered = device_ctx.rr_switch_inf[iswitch].buffered();
float switch_R = device_ctx.rr_switch_inf[iswitch].R;
float switch_Tdel = device_ctx.rr_switch_inf[iswitch].Tdel;
float switch_Cinternal = device_ctx.rr_switch_inf[iswitch].Cinternal;
//To node info
float node_C = device_ctx.rr_nodes[to_node].C();
float node_R = device_ctx.rr_nodes[to_node].R();
float node_C = device_ctx.rr_graph.node_C(to_node);
float node_R = device_ctx.rr_graph.node_R(to_node);
//From node info
float from_node_R = device_ctx.rr_nodes[from_node].R();
float from_node_R = device_ctx.rr_graph.node_R(from_node);
//Update R_upstream
if (switch_buffered) {
@ -2189,7 +2187,7 @@ static void evaluate_timing_driven_node_costs(t_heap* to,
//Second, we adjust the Tdel to account for the delay caused by the internal capacitance.
Tdel += Rdel_adjust * switch_Cinternal;
bool reached_configurably = device_ctx.rr_nodes[from_node].edge_is_configurable(iconn);
bool reached_configurably = device_ctx.rr_graph.edge_is_configurable(iconn);
float cong_cost = 0.;
if (reached_configurably) {
@ -2211,8 +2209,8 @@ static void evaluate_timing_driven_node_costs(t_heap* to,
to->backward_path_cost += cost_params.criticality * Tdel; //Delay cost
if (cost_params.bend_cost != 0.) {
t_rr_type from_type = device_ctx.rr_nodes[from_node].type();
t_rr_type to_type = device_ctx.rr_nodes[to_node].type();
t_rr_type from_type = device_ctx.rr_graph.node_type(from_node);
t_rr_type to_type = device_ctx.rr_graph.node_type(to_node);
if ((from_type == CHANX && to_type == CHANY) || (from_type == CHANY && to_type == CHANX)) {
to->backward_path_cost += cost_params.bend_cost; //Bend cost
}
@ -2317,9 +2315,9 @@ static bool should_route_net(ClusterNetId net_id, CBRR& connections_inf, bool if
}
for (;;) {
int inode = tptr->index;
RRNodeId inode = tptr->index;
int occ = route_ctx.rr_node_route_inf[inode].occ();
int capacity = device_ctx.rr_nodes[inode].capacity();
int capacity = device_ctx.rr_graph.node_capacity(inode);
if (occ > capacity) {
return true; /* overuse detected */
@ -2328,7 +2326,8 @@ static bool should_route_net(ClusterNetId net_id, CBRR& connections_inf, bool if
if (tptr->iswitch == OPEN) { //End of a branch
// even if net is fully routed, not complete if parts of it should get ripped up (EXPERIMENTAL)
if (if_force_reroute) {
if (connections_inf.should_force_reroute_connection(inode)) {
/* Xifan Tang - TODO: should use RRNodeId */
if (connections_inf.should_force_reroute_connection(size_t(inode))) {
return true;
}
}
@ -2406,8 +2405,8 @@ Connection_based_routing_resources::Connection_based_routing_resources()
// rr sink node index corresponding to this connection terminal
auto rr_sink_node = route_ctx.net_rr_terminals[net_id][ipin];
net_node_to_pin.insert({rr_sink_node, ipin});
net_forcible_reroute_connection_flag.insert({rr_sink_node, false});
net_node_to_pin.insert({size_t(rr_sink_node), ipin});
net_forcible_reroute_connection_flag.insert({size_t(rr_sink_node), false});
}
}
}
@ -2441,7 +2440,8 @@ void Connection_based_routing_resources::put_sink_rt_nodes_in_net_pins_lookup(co
const auto& node_to_pin_mapping = rr_sink_node_to_pin[current_inet];
for (t_rt_node* rt_node : sink_rt_nodes) {
auto mapping = node_to_pin_mapping.find(rt_node->inode);
/* Xifan Tang - TODO: should use RRNodeId later */
auto mapping = node_to_pin_mapping.find(size_t(rt_node->inode));
if (mapping != node_to_pin_mapping.end()) {
rt_node_of_sink[mapping->second] = rt_node;
@ -2464,7 +2464,7 @@ bool Connection_based_routing_resources::sanity_check_lookup() const {
VTR_LOG("%d cannot find itself (net %lu)\n", mapping.first, size_t(net_id));
return false;
}
VTR_ASSERT(route_ctx.net_rr_terminals[net_id][mapping.second] == mapping.first);
VTR_ASSERT(route_ctx.net_rr_terminals[net_id][mapping.second] == RRNodeId(mapping.first));
}
}
return true;
@ -2507,7 +2507,8 @@ bool Connection_based_routing_resources::forcibly_reroute_connections(float max_
auto rr_sink_node = route_ctx.net_rr_terminals[net_id][ipin];
//Clear any forced re-routing from the previuos iteration
forcible_reroute_connection_flag[net_id][rr_sink_node] = false;
/* Xifan Tang - TODO: should use RRNodeId later */
forcible_reroute_connection_flag[net_id][size_t(rr_sink_node)] = false;
// skip if connection is internal to a block such that SOURCE->OPIN->IPIN->SINK directly, which would have 0 time delay
if (lower_bound_connection_delay[net_id][ipin - 1] == 0)
@ -2528,7 +2529,8 @@ bool Connection_based_routing_resources::forcibly_reroute_connections(float max_
if (net_delay[net_id][ipin] < (lower_bound_connection_delay[net_id][ipin - 1] * connection_delay_optimality_tolerance))
continue;
forcible_reroute_connection_flag[net_id][rr_sink_node] = true;
/* Xifan Tang - TODO: should use RRNodeId later */
forcible_reroute_connection_flag[net_id][size_t(rr_sink_node)] = true;
// note that we don't set forcible_reroute_connection_flag to false when the converse is true
// resetting back to false will be done during tree pruning, after the sink has been legally reached
any_connection_rerouted = true;
@ -2563,7 +2565,7 @@ static OveruseInfo calculate_overuse_info() {
auto& cluster_ctx = g_vpr_ctx.clustering();
auto& route_ctx = g_vpr_ctx.routing();
std::unordered_set<int> checked_nodes;
std::unordered_set<RRNodeId> checked_nodes;
size_t overused_nodes = 0;
size_t total_overuse = 0;
@ -2579,14 +2581,14 @@ static OveruseInfo calculate_overuse_info() {
//used by routing, particularly on large devices).
for (auto net_id : cluster_ctx.clb_nlist.nets()) {
for (t_trace* tptr = route_ctx.trace[net_id].head; tptr != nullptr; tptr = tptr->next) {
int inode = tptr->index;
RRNodeId inode = tptr->index;
auto result = checked_nodes.insert(inode);
if (!result.second) { //Already counted
continue;
}
int overuse = route_ctx.rr_node_route_inf[inode].occ() - device_ctx.rr_nodes[inode].capacity();
int overuse = route_ctx.rr_node_route_inf[inode].occ() - device_ctx.rr_graph.node_capacity(inode);
if (overuse > 0) {
overused_nodes += 1;
@ -2596,19 +2598,19 @@ static OveruseInfo calculate_overuse_info() {
}
}
return OveruseInfo(device_ctx.rr_nodes.size(), overused_nodes, total_overuse, worst_overuse);
return OveruseInfo(device_ctx.rr_graph.nodes().size(), overused_nodes, total_overuse, worst_overuse);
}
static size_t calculate_wirelength_available() {
auto& device_ctx = g_vpr_ctx.device();
size_t available_wirelength = 0;
for (size_t i = 0; i < device_ctx.rr_nodes.size(); ++i) {
if (device_ctx.rr_nodes[i].type() == CHANX || device_ctx.rr_nodes[i].type() == CHANY) {
size_t length_x = device_ctx.rr_nodes[i].xhigh() - device_ctx.rr_nodes[i].xlow();
size_t length_y = device_ctx.rr_nodes[i].yhigh() - device_ctx.rr_nodes[i].ylow();
for (const RRNodeId& node : device_ctx.rr_graph.nodes()) {
if (device_ctx.rr_graph.node_type(node) == CHANX || device_ctx.rr_graph.node_type(node) == CHANY) {
size_t length_x = device_ctx.rr_graph.node_xhigh(node) - device_ctx.rr_graph.node_xlow(node);
size_t length_y = device_ctx.rr_graph.node_yhigh(node) - device_ctx.rr_graph.node_ylow(node);
available_wirelength += device_ctx.rr_nodes[i].capacity() * (length_x + length_y + 1);
available_wirelength += device_ctx.rr_graph.node_capacity(node) * (length_x + length_y + 1);
}
}
return available_wirelength;
@ -2736,7 +2738,7 @@ static void print_route_status(int itry, double elapsed_sec, float pres_fac, int
fflush(stdout);
}
static std::string describe_unrouteable_connection(const int source_node, const int sink_node) {
static std::string describe_unrouteable_connection(const RRNodeId& source_node, const RRNodeId& sink_node) {
std::string msg = vtr::string_fmt(
"Cannot route from %s (%s) to "
"%s (%s) -- no possible path",
@ -2868,15 +2870,15 @@ static t_bb calc_current_bb(const t_trace* head) {
bb.ymax = 0;
for (const t_trace* elem = head; elem != nullptr; elem = elem->next) {
const t_rr_node& node = device_ctx.rr_nodes[elem->index];
const RRNodeId& node = elem->index;
//The router interprets RR nodes which cross the boundary as being
//'within' of the BB. Only thos which are *strictly* out side the
//box are exluded, hence we use the nodes xhigh/yhigh for xmin/xmax,
//and xlow/ylow for xmax/ymax calculations
bb.xmin = std::min<int>(bb.xmin, node.xhigh());
bb.ymin = std::min<int>(bb.ymin, node.yhigh());
bb.xmax = std::max<int>(bb.xmax, node.xlow());
bb.ymax = std::max<int>(bb.ymax, node.ylow());
bb.xmin = std::min<int>(bb.xmin, device_ctx.rr_graph.node_xhigh(node));
bb.ymin = std::min<int>(bb.ymin, device_ctx.rr_graph.node_yhigh(node));
bb.xmax = std::max<int>(bb.xmax, device_ctx.rr_graph.node_xlow(node));
bb.ymax = std::max<int>(bb.ymax, device_ctx.rr_graph.node_ylow(node));
}
VTR_ASSERT(bb.xmin <= bb.xmax);
@ -2885,14 +2887,14 @@ static t_bb calc_current_bb(const t_trace* head) {
return bb;
}
void enable_router_debug(const t_router_opts& router_opts, ClusterNetId net, int sink_rr) {
void enable_router_debug(const t_router_opts& router_opts, ClusterNetId net, const RRNodeId& sink_rr) {
bool all_net_debug = (router_opts.router_debug_net == -1);
bool specific_net_debug = (router_opts.router_debug_net >= 0);
bool specific_sink_debug = (router_opts.router_debug_sink_rr >= 0);
bool match_net = (ClusterNetId(router_opts.router_debug_net) == net);
bool match_sink = (router_opts.router_debug_sink_rr == sink_rr);
bool match_sink = (router_opts.router_debug_sink_rr == (int)size_t(sink_rr));
if (all_net_debug) {
VTR_ASSERT(!specific_net_debug);
@ -3051,7 +3053,7 @@ static void prune_unused_non_configurable_nets(CBRR& connections_inf) {
}
//Returns true if both nodes are part of the same non-configurable edge set
static bool same_non_config_node_set(int from_node, int to_node) {
static bool same_non_config_node_set(const RRNodeId& from_node, const RRNodeId& to_node) {
auto& device_ctx = g_vpr_ctx.device();
auto from_itr = device_ctx.rr_node_to_non_config_node_set.find(from_node);

View File

@ -56,7 +56,7 @@ void alloc_timing_driven_route_structs(float** pin_criticality_ptr,
t_rt_node*** rt_node_of_sink_ptr);
void free_timing_driven_route_structs(float* pin_criticality, int* sink_order, t_rt_node** rt_node_of_sink);
void enable_router_debug(const t_router_opts& router_opts, ClusterNetId net, int sink_rr);
void enable_router_debug(const t_router_opts& router_opts, ClusterNetId net, const RRNodeId& sink_rr);
//Delay budget information for a specific connection
struct t_conn_delay_budget {
@ -80,17 +80,17 @@ struct t_conn_cost_params {
};
t_heap* timing_driven_route_connection_from_route_tree(t_rt_node* rt_root,
int sink_node,
const RRNodeId& sink_node,
const t_conn_cost_params cost_params,
t_bb bounding_box,
const RouterLookahead& router_lookahead,
std::vector<int>& modified_rr_node_inf,
std::vector<RRNodeId>& modified_rr_node_inf,
RouterStats& router_stats);
std::vector<t_heap> timing_driven_find_all_shortest_paths_from_route_tree(t_rt_node* rt_root,
vtr::vector<RRNodeId, t_heap> timing_driven_find_all_shortest_paths_from_route_tree(t_rt_node* rt_root,
const t_conn_cost_params cost_params,
t_bb bounding_box,
std::vector<int>& modified_rr_node_inf,
std::vector<RRNodeId>& modified_rr_node_inf,
RouterStats& router_stats);
struct timing_driven_route_structs {

View File

@ -27,7 +27,7 @@
/* Array below allows mapping from any rr_node to any rt_node currently in
* the rt_tree. */
static std::vector<t_rt_node*> rr_node_to_rt_node; /* [0..device_ctx.rr_nodes.size()-1] */
static vtr::vector<RRNodeId, t_rt_node*> rr_node_to_rt_node; /* [0..device_ctx.rr_graph.nodes().size()-1] */
/* Frees lists for fast addition and deletion of nodes and edges. */
@ -47,19 +47,19 @@ static void free_linked_rt_edge(t_linked_rt_edge* rt_edge);
static t_rt_node* add_subtree_to_route_tree(t_heap* hptr,
t_rt_node** sink_rt_node_ptr);
static t_rt_node* add_non_configurable_to_route_tree(const int rr_node, const bool reached_by_non_configurable_edge, std::unordered_set<int>& visited);
static t_rt_node* add_non_configurable_to_route_tree(const RRNodeId& rr_node, const bool reached_by_non_configurable_edge, std::unordered_set<RRNodeId>& visited);
static t_rt_node* update_unbuffered_ancestors_C_downstream(t_rt_node* start_of_new_subtree_rt_node);
bool verify_route_tree_recurr(t_rt_node* node, std::set<int>& seen_nodes);
bool verify_route_tree_recurr(t_rt_node* node, std::set<RRNodeId>& seen_nodes);
static t_rt_node* prune_route_tree_recurr(t_rt_node* node, CBRR& connections_inf, bool congested, std::vector<int>* non_config_node_set_usage);
static t_trace* traceback_to_route_tree_branch(t_trace* trace, std::map<int, t_rt_node*>& rr_node_to_rt, std::vector<int>* non_config_node_set_usage);
static t_trace* traceback_to_route_tree_branch(t_trace* trace, std::map<RRNodeId, t_rt_node*>& rr_node_to_rt, std::vector<int>* non_config_node_set_usage);
static std::pair<t_trace*, t_trace*> traceback_from_route_tree_recurr(t_trace* head, t_trace* tail, const t_rt_node* node);
void collect_route_tree_connections(const t_rt_node* node, std::set<std::tuple<int, int, int>>& connections);
void collect_route_tree_connections(const t_rt_node* node, std::set<std::tuple<RRNodeId, int, RRNodeId>>& connections);
/************************** Subroutine definitions ***************************/
@ -73,7 +73,7 @@ bool alloc_route_tree_timing_structs(bool exists_ok) {
auto& device_ctx = g_vpr_ctx.device();
bool route_tree_structs_are_allocated = (rr_node_to_rt_node.size() == size_t(device_ctx.rr_nodes.size())
bool route_tree_structs_are_allocated = (rr_node_to_rt_node.size() == size_t(device_ctx.rr_graph.nodes().size())
|| rt_node_free_list != nullptr);
if (route_tree_structs_are_allocated) {
if (exists_ok) {
@ -84,7 +84,7 @@ bool alloc_route_tree_timing_structs(bool exists_ok) {
}
}
rr_node_to_rt_node = std::vector<t_rt_node*>(device_ctx.rr_nodes.size(), nullptr);
rr_node_to_rt_node = vtr::vector<RRNodeId, t_rt_node*>(device_ctx.rr_graph.nodes().size(), nullptr);
return true;
}
@ -173,7 +173,7 @@ static void free_linked_rt_edge(t_linked_rt_edge* rt_edge) {
* node of the rt_tree (which is just the net source). */
t_rt_node* init_route_tree_to_source(ClusterNetId inet) {
t_rt_node* rt_root;
int inode;
RRNodeId inode;
auto& route_ctx = g_vpr_ctx.routing();
auto& device_ctx = g_vpr_ctx.device();
@ -187,9 +187,9 @@ t_rt_node* init_route_tree_to_source(ClusterNetId inet) {
inode = route_ctx.net_rr_terminals[inet][0]; /* Net source */
rt_root->inode = inode;
rt_root->C_downstream = device_ctx.rr_nodes[inode].C();
rt_root->R_upstream = device_ctx.rr_nodes[inode].R();
rt_root->Tdel = 0.5 * device_ctx.rr_nodes[inode].R() * device_ctx.rr_nodes[inode].C();
rt_root->C_downstream = device_ctx.rr_graph.node_C(inode);
rt_root->R_upstream = device_ctx.rr_graph.node_R(inode);
rt_root->Tdel = 0.5 * device_ctx.rr_graph.node_R(inode) * device_ctx.rr_graph.node_C(inode);
rr_node_to_rt_node[inode] = rt_root;
return (rt_root);
@ -263,7 +263,7 @@ add_subtree_to_route_tree(t_heap* hptr, t_rt_node** sink_rt_node_ptr) {
auto& device_ctx = g_vpr_ctx.device();
auto& route_ctx = g_vpr_ctx.routing();
int inode = hptr->index;
RRNodeId inode = hptr->index;
//if (device_ctx.rr_nodes[inode].type() != SINK) {
//VPR_FATAL_ERROR(VPR_ERROR_ROUTE,
@ -286,11 +286,11 @@ add_subtree_to_route_tree(t_heap* hptr, t_rt_node** sink_rt_node_ptr) {
downstream_rt_node = sink_rt_node;
std::unordered_set<int> main_branch_visited;
std::unordered_set<int> all_visited;
std::unordered_set<RRNodeId> main_branch_visited;
std::unordered_set<RRNodeId> all_visited;
inode = hptr->u.prev.node;
t_edge_size iedge = hptr->u.prev.edge;
short iswitch = device_ctx.rr_nodes[inode].edge_switch(iedge);
RREdgeId iedge = hptr->u.prev.edge;
short iswitch = (short)size_t(device_ctx.rr_graph.edge_switch(iedge));
/* For all "new" nodes in the main path */
// inode is node index of previous node
@ -319,7 +319,7 @@ add_subtree_to_route_tree(t_heap* hptr, t_rt_node** sink_rt_node_ptr) {
rr_node_to_rt_node[inode] = rt_node;
if (device_ctx.rr_nodes[inode].type() == IPIN) {
if (device_ctx.rr_graph.node_type(inode) == IPIN) {
rt_node->re_expand = false;
} else {
rt_node->re_expand = true;
@ -328,7 +328,7 @@ add_subtree_to_route_tree(t_heap* hptr, t_rt_node** sink_rt_node_ptr) {
downstream_rt_node = rt_node;
iedge = route_ctx.rr_node_route_inf[inode].prev_edge;
inode = route_ctx.rr_node_route_inf[inode].prev_node;
iswitch = device_ctx.rr_nodes[inode].edge_switch(iedge);
iswitch = (short)size_t(device_ctx.rr_graph.edge_switch(iedge));
}
//Inode is now the branch point to the old routing; do not need
@ -347,7 +347,7 @@ add_subtree_to_route_tree(t_heap* hptr, t_rt_node** sink_rt_node_ptr) {
//Expand (recursively) each of the main-branch nodes adding any
//non-configurably connected nodes
for (int rr_node : main_branch_visited) {
for (const RRNodeId& rr_node : main_branch_visited) {
add_non_configurable_to_route_tree(rr_node, false, all_visited);
}
@ -355,7 +355,7 @@ add_subtree_to_route_tree(t_heap* hptr, t_rt_node** sink_rt_node_ptr) {
return (downstream_rt_node);
}
static t_rt_node* add_non_configurable_to_route_tree(const int rr_node, const bool reached_by_non_configurable_edge, std::unordered_set<int>& visited) {
static t_rt_node* add_non_configurable_to_route_tree(const RRNodeId& rr_node, const bool reached_by_non_configurable_edge, std::unordered_set<RRNodeId>& visited) {
t_rt_node* rt_node = nullptr;
if (!visited.count(rr_node) || !reached_by_non_configurable_edge) {
@ -375,7 +375,7 @@ static t_rt_node* add_non_configurable_to_route_tree(const int rr_node, const bo
rt_node->u.child_list = nullptr;
rt_node->inode = rr_node;
if (device_ctx.rr_nodes[rr_node].type() == IPIN) {
if (device_ctx.rr_graph.node_type(rr_node) == IPIN) {
rt_node->re_expand = false;
} else {
rt_node->re_expand = true;
@ -385,18 +385,18 @@ static t_rt_node* add_non_configurable_to_route_tree(const int rr_node, const bo
}
}
for (int iedge : device_ctx.rr_nodes[rr_node].non_configurable_edges()) {
for (const RREdgeId& iedge : device_ctx.rr_graph.node_non_configurable_out_edges(rr_node)) {
//Recursive case: expand children
VTR_ASSERT(!device_ctx.rr_nodes[rr_node].edge_is_configurable(iedge));
VTR_ASSERT(!device_ctx.rr_graph.edge_is_configurable(iedge));
int to_rr_node = device_ctx.rr_nodes[rr_node].edge_sink_node(iedge);
RRNodeId to_rr_node = device_ctx.rr_graph.edge_sink_node(iedge);
//Recurse
t_rt_node* child_rt_node = add_non_configurable_to_route_tree(to_rr_node, true, visited);
if (!child_rt_node) continue;
int iswitch = device_ctx.rr_nodes[rr_node].edge_switch(iedge);
int iswitch = (short)size_t(device_ctx.rr_graph.edge_switch(iedge));
//Create the edge
t_linked_rt_edge* linked_rt_edge = alloc_linked_rt_edge();
@ -428,7 +428,7 @@ void load_new_subtree_R_upstream(t_rt_node* rt_node) {
auto& device_ctx = g_vpr_ctx.device();
t_rt_node* parent_rt_node = rt_node->parent_node;
int inode = rt_node->inode;
RRNodeId inode = rt_node->inode;
//Calculate upstream resistance
float R_upstream = 0.;
@ -441,7 +441,7 @@ void load_new_subtree_R_upstream(t_rt_node* rt_node) {
}
R_upstream += device_ctx.rr_switch_inf[iswitch].R; //Parent switch R
}
R_upstream += device_ctx.rr_nodes[inode].R(); //Current node R
R_upstream += device_ctx.rr_graph.node_R(inode); //Current node R
rt_node->R_upstream = R_upstream;
@ -457,7 +457,7 @@ float load_new_subtree_C_downstream(t_rt_node* rt_node) {
if (rt_node) {
auto& device_ctx = g_vpr_ctx.device();
C_downstream += device_ctx.rr_nodes[rt_node->inode].C();
C_downstream += device_ctx.rr_graph.node_C(rt_node->inode);
for (t_linked_rt_edge* edge = rt_node->u.child_list; edge != nullptr; edge = edge->next) {
/*Similar to net_delay.cpp, this for loop traverses a rc subtree, whose edges represent enabled switches.
* When switches such as multiplexers and tristate buffers are enabled, their fanout
@ -536,7 +536,7 @@ void load_route_tree_Tdel(t_rt_node* subtree_rt_root, float Tarrival) {
* must be correct before this routine is called. Tarrival is the time at
* at which the signal arrives at this node's *input*. */
int inode;
RRNodeId inode;
short iswitch;
t_rt_node* child_node;
t_linked_rt_edge* linked_rt_edge;
@ -550,7 +550,7 @@ void load_route_tree_Tdel(t_rt_node* subtree_rt_root, float Tarrival) {
* along a wire segment's length. See discussion in net_delay.c if you want
* to change this. */
Tdel = Tarrival + 0.5 * subtree_rt_root->C_downstream * device_ctx.rr_nodes[inode].R();
Tdel = Tarrival + 0.5 * subtree_rt_root->C_downstream * device_ctx.rr_graph.node_R(inode);
subtree_rt_root->Tdel = Tdel;
/* Now expand the children of this node to load their Tdel values (depth-
@ -581,9 +581,9 @@ void load_route_tree_rr_route_inf(t_rt_node* root) {
t_linked_rt_edge* edge{root->u.child_list};
for (;;) {
int inode = root->inode;
route_ctx.rr_node_route_inf[inode].prev_node = NO_PREVIOUS;
route_ctx.rr_node_route_inf[inode].prev_edge = NO_PREVIOUS;
RRNodeId inode = root->inode;
route_ctx.rr_node_route_inf[inode].prev_node = RRNodeId::INVALID();
route_ctx.rr_node_route_inf[inode].prev_edge = RREdgeId::INVALID();
// path cost should be unset
VTR_ASSERT(std::isinf(route_ctx.rr_node_route_inf[inode].path_cost));
VTR_ASSERT(std::isinf(route_ctx.rr_node_route_inf[inode].backward_path_cost));
@ -606,13 +606,13 @@ void load_route_tree_rr_route_inf(t_rt_node* root) {
}
bool verify_route_tree(t_rt_node* root) {
std::set<int> seen_nodes;
std::set<RRNodeId> seen_nodes;
return verify_route_tree_recurr(root, seen_nodes);
}
bool verify_route_tree_recurr(t_rt_node* node, std::set<int>& seen_nodes) {
bool verify_route_tree_recurr(t_rt_node* node, std::set<RRNodeId>& seen_nodes) {
if (seen_nodes.count(node->inode)) {
VPR_FATAL_ERROR(VPR_ERROR_ROUTE, "Duplicate route tree nodes found for node %d", node->inode);
VPR_FATAL_ERROR(VPR_ERROR_ROUTE, "Duplicate route tree nodes found for node %ld", size_t(node->inode));
}
seen_nodes.insert(node->inode);
@ -657,7 +657,7 @@ void print_route_tree(const t_rt_node* rt_node, int depth) {
}
auto& device_ctx = g_vpr_ctx.device();
VTR_LOG("%srt_node: %d (%s)", indent.c_str(), rt_node->inode, device_ctx.rr_nodes[rt_node->inode].type_string());
VTR_LOG("%srt_node: %ld (%s)", indent.c_str(), size_t(rt_node->inode), rr_node_typename[device_ctx.rr_graph.node_type(rt_node->inode)]);
if (rt_node->parent_switch != OPEN) {
bool parent_edge_configurable = device_ctx.rr_switch_inf[rt_node->parent_switch].configurable();
@ -668,7 +668,7 @@ void print_route_tree(const t_rt_node* rt_node, int depth) {
auto& route_ctx = g_vpr_ctx.routing();
if (route_ctx.rr_node_route_inf[rt_node->inode].occ() > device_ctx.rr_nodes[rt_node->inode].capacity()) {
if (route_ctx.rr_node_route_inf[rt_node->inode].occ() > device_ctx.rr_graph.node_capacity(rt_node->inode)) {
VTR_LOG(" x");
}
@ -727,7 +727,7 @@ t_rt_node* traceback_to_route_tree(t_trace* head, std::vector<int>* non_config_n
VTR_ASSERT_DEBUG(validate_traceback(head));
std::map<int, t_rt_node*> rr_node_to_rt;
std::map<RRNodeId, t_rt_node*> rr_node_to_rt;
t_trace* trace = head;
while (trace) { //Each branch
@ -749,14 +749,14 @@ t_rt_node* traceback_to_route_tree(t_trace* head, std::vector<int>* non_config_n
//
//Returns the t_trace defining the start of the next branch
static t_trace* traceback_to_route_tree_branch(t_trace* trace,
std::map<int, t_rt_node*>& rr_node_to_rt,
std::map<RRNodeId, t_rt_node*>& rr_node_to_rt,
std::vector<int>* non_config_node_set_usage) {
t_trace* next = nullptr;
if (trace) {
t_rt_node* node = nullptr;
int inode = trace->index;
RRNodeId inode = trace->index;
int iswitch = trace->iswitch;
auto itr = rr_node_to_rt.find(trace->index);
@ -773,7 +773,7 @@ static t_trace* traceback_to_route_tree_branch(t_trace* trace,
node->Tdel = std::numeric_limits<float>::quiet_NaN();
auto& device_ctx = g_vpr_ctx.device();
auto node_type = device_ctx.rr_nodes[inode].type();
auto node_type = device_ctx.rr_graph.node_type(inode);
if (node_type == IPIN || node_type == SINK)
node->re_expand = false;
else
@ -899,7 +899,7 @@ t_trace* traceback_from_route_tree(ClusterNetId inet, const t_rt_node* root, int
t_trace* head;
t_trace* tail;
std::unordered_set<int> nodes;
std::unordered_set<RRNodeId> nodes;
std::tie(head, tail) = traceback_from_route_tree_recurr(nullptr, nullptr, root);
@ -912,7 +912,7 @@ t_trace* traceback_from_route_tree(ClusterNetId inet, const t_rt_node* root, int
nodes.insert(trace->index);
//Sanity check that number of sinks match expected
if (device_ctx.rr_nodes[trace->index].type() == SINK) {
if (device_ctx.rr_graph.node_type(trace->index) == SINK) {
num_trace_sinks += 1;
}
}
@ -937,7 +937,7 @@ static t_rt_node* prune_route_tree_recurr(t_rt_node* node, CBRR& connections_inf
auto& device_ctx = g_vpr_ctx.device();
auto& route_ctx = g_vpr_ctx.routing();
bool congested = (route_ctx.rr_node_route_inf[node->inode].occ() > device_ctx.rr_nodes[node->inode].capacity());
bool congested = (route_ctx.rr_node_route_inf[node->inode].occ() > device_ctx.rr_graph.node_capacity(node->inode));
int node_set = -1;
auto itr = device_ctx.rr_node_to_non_config_node_set.find(node->inode);
if (itr != device_ctx.rr_node_to_non_config_node_set.end()) {
@ -949,7 +949,7 @@ static t_rt_node* prune_route_tree_recurr(t_rt_node* node, CBRR& connections_inf
force_prune = true;
}
if (connections_inf.should_force_reroute_connection(node->inode)) {
if (connections_inf.should_force_reroute_connection(size_t(node->inode))) {
//Forcibly re-route (e.g. to improve delay)
force_prune = true;
}
@ -995,7 +995,7 @@ static t_rt_node* prune_route_tree_recurr(t_rt_node* node, CBRR& connections_inf
}
}
if (device_ctx.rr_nodes[node->inode].type() == SINK) {
if (device_ctx.rr_graph.node_type(node->inode) == SINK) {
if (!force_prune) {
//Valid path to sink
@ -1007,7 +1007,7 @@ static t_rt_node* prune_route_tree_recurr(t_rt_node* node, CBRR& connections_inf
VTR_ASSERT(force_prune);
//Record as not reached
connections_inf.toreach_rr_sink(node->inode);
connections_inf.toreach_rr_sink(size_t(node->inode));
free_rt_node(node);
return nullptr; //Pruned
@ -1131,9 +1131,9 @@ t_rt_node* prune_route_tree(t_rt_node* rt_root, CBRR& connections_inf, std::vect
auto& device_ctx = g_vpr_ctx.device();
auto& route_ctx = g_vpr_ctx.routing();
VTR_ASSERT_MSG(device_ctx.rr_nodes[rt_root->inode].type() == SOURCE, "Root of route tree must be SOURCE");
VTR_ASSERT_MSG(device_ctx.rr_graph.node_type(rt_root->inode) == SOURCE, "Root of route tree must be SOURCE");
VTR_ASSERT_MSG(route_ctx.rr_node_route_inf[rt_root->inode].occ() <= device_ctx.rr_nodes[rt_root->inode].capacity(),
VTR_ASSERT_MSG(route_ctx.rr_node_route_inf[rt_root->inode].occ() <= device_ctx.rr_graph.node_capacity(rt_root->inode),
"Route tree root/SOURCE should never be congested");
return prune_route_tree_recurr(rt_root, connections_inf, false, non_config_node_set_usage);
@ -1206,7 +1206,7 @@ void print_edge(const t_linked_rt_edge* edge) {
return;
}
while (edge) {
VTR_LOG("%d(%d) ", edge->child->inode, edge->iswitch);
VTR_LOG("%ld(%d) ", size_t(edge->child->inode), edge->iswitch);
edge = edge->next;
}
VTR_LOG("\n");
@ -1215,31 +1215,30 @@ void print_edge(const t_linked_rt_edge* edge) {
static void print_node(const t_rt_node* rt_node) {
auto& device_ctx = g_vpr_ctx.device();
int inode = rt_node->inode;
t_rr_type node_type = device_ctx.rr_nodes[inode].type();
RRNodeId inode = rt_node->inode;
t_rr_type node_type = device_ctx.rr_graph.node_type(inode);
VTR_LOG("%5.1e %5.1e %2d%6s|%-6d-> ", rt_node->C_downstream, rt_node->R_upstream,
rt_node->re_expand, rr_node_typename[node_type], inode);
rt_node->re_expand, rr_node_typename[node_type], size_t(inode));
}
static void print_node_inf(const t_rt_node* rt_node) {
auto& route_ctx = g_vpr_ctx.routing();
int inode = rt_node->inode;
RRNodeId inode = rt_node->inode;
const auto& node_inf = route_ctx.rr_node_route_inf[inode];
VTR_LOG("%5.1e %5.1e%6d%3d|%-6d-> ", node_inf.path_cost, node_inf.backward_path_cost,
node_inf.prev_node, node_inf.prev_edge, inode);
node_inf.prev_node, node_inf.prev_edge, size_t(inode));
}
static void print_node_congestion(const t_rt_node* rt_node) {
auto& device_ctx = g_vpr_ctx.device();
auto& route_ctx = g_vpr_ctx.routing();
int inode = rt_node->inode;
RRNodeId inode = rt_node->inode;
const auto& node_inf = route_ctx.rr_node_route_inf[inode];
const auto& node = device_ctx.rr_nodes[inode];
const auto& node_state = route_ctx.rr_node_route_inf[inode];
VTR_LOG("%2d %2d|%-6d-> ", node_inf.pres_cost, rt_node->Tdel,
node_state.occ(), node.capacity(), inode);
node_state.occ(), device_ctx.rr_graph.node_capacity(inode), size_t(inode));
}
void print_route_tree_inf(const t_rt_node* rt_root) {
@ -1263,8 +1262,8 @@ bool is_equivalent_route_tree(const t_rt_node* root, const t_rt_node* root_clone
if (!root && !root_clone) return true;
if (!root || !root_clone) return false; // one of them is null
if ((root->inode != root_clone->inode) || (!equal_approx(root->R_upstream, root_clone->R_upstream)) || (!equal_approx(root->C_downstream, root_clone->C_downstream)) || (!equal_approx(root->Tdel, root_clone->Tdel))) {
VTR_LOG("mismatch i %d|%d R %e|%e C %e|%e T %e %e\n",
root->inode, root_clone->inode,
VTR_LOG("mismatch i %ld|%ld R %e|%e C %e|%e T %e %e\n",
size_t(root->inode), size_t(root_clone->inode),
root->R_upstream, root_clone->R_upstream,
root->C_downstream, root_clone->C_downstream,
root->Tdel, root_clone->Tdel);
@ -1274,8 +1273,8 @@ bool is_equivalent_route_tree(const t_rt_node* root, const t_rt_node* root_clone
t_linked_rt_edge* clone_edge{root_clone->u.child_list};
while (orig_edge && clone_edge) {
if (orig_edge->iswitch != clone_edge->iswitch)
VTR_LOG("mismatch i %d|%d edge switch %d|%d\n",
root->inode, root_clone->inode,
VTR_LOG("mismatch i %ld|%ld edge switch %d|%d\n",
size_t(root->inode), size_t(root_clone->inode),
orig_edge->iswitch, clone_edge->iswitch);
if (!is_equivalent_route_tree(orig_edge->child, clone_edge->child)) return false; // child trees not equivalent
orig_edge = orig_edge->next;
@ -1290,23 +1289,23 @@ bool is_equivalent_route_tree(const t_rt_node* root, const t_rt_node* root_clone
// check only the connections are correct, ignore R and C
bool is_valid_skeleton_tree(const t_rt_node* root) {
int inode = root->inode;
RRNodeId inode = root->inode;
t_linked_rt_edge* edge = root->u.child_list;
while (edge) {
if (edge->child->parent_node != root) {
VTR_LOG("parent-child relationship not mutually acknowledged by parent %d->%d child %d<-%d\n",
inode, edge->child->inode,
edge->child->inode, edge->child->parent_node->inode);
VTR_LOG("parent-child relationship not mutually acknowledged by parent %ld->%ld child %ld<-%ld\n",
size_t(inode), size_t(edge->child->inode),
size_t(edge->child->inode), size_t(edge->child->parent_node->inode));
return false;
}
if (edge->iswitch != edge->child->parent_switch) {
VTR_LOG("parent(%d)-child(%d) connected switch not equivalent parent %d child %d\n",
inode, edge->child->inode, edge->iswitch, edge->child->parent_switch);
VTR_LOG("parent(%ld)-child(%ld) connected switch not equivalent parent %d child %d\n",
size_t(inode), size_t(edge->child->inode), edge->iswitch, edge->child->parent_switch);
return false;
}
if (!is_valid_skeleton_tree(edge->child)) {
VTR_LOG("subtree %d invalid, propagating up\n", edge->child->inode);
VTR_LOG("subtree %ld invalid, propagating up\n", size_t(edge->child->inode));
return false;
}
edge = edge->next;
@ -1324,24 +1323,24 @@ bool is_valid_route_tree(const t_rt_node* root) {
constexpr float RES_REL_TOL = 1e-6;
constexpr float RES_ABS_TOL = vtr::DEFAULT_ABS_TOL;
int inode = root->inode;
RRNodeId inode = root->inode;
short iswitch = root->parent_switch;
if (root->parent_node) {
if (device_ctx.rr_switch_inf[iswitch].buffered()) {
float R_upstream_check = device_ctx.rr_nodes[inode].R() + device_ctx.rr_switch_inf[iswitch].R;
float R_upstream_check = device_ctx.rr_graph.node_R(inode) + device_ctx.rr_switch_inf[iswitch].R;
if (!vtr::isclose(root->R_upstream, R_upstream_check, RES_REL_TOL, RES_ABS_TOL)) {
VTR_LOG("%d mismatch R upstream %e supposed %e\n", inode, root->R_upstream, R_upstream_check);
VTR_LOG("%ld mismatch R upstream %e supposed %e\n", size_t(inode), root->R_upstream, R_upstream_check);
return false;
}
} else {
float R_upstream_check = device_ctx.rr_nodes[inode].R() + root->parent_node->R_upstream + device_ctx.rr_switch_inf[iswitch].R;
float R_upstream_check = device_ctx.rr_graph.node_R(inode) + root->parent_node->R_upstream + device_ctx.rr_switch_inf[iswitch].R;
if (!vtr::isclose(root->R_upstream, R_upstream_check, RES_REL_TOL, RES_ABS_TOL)) {
VTR_LOG("%d mismatch R upstream %e supposed %e\n", inode, root->R_upstream, R_upstream_check);
VTR_LOG("%ld mismatch R upstream %e supposed %e\n", size_t(inode), root->R_upstream, R_upstream_check);
return false;
}
}
} else if (root->R_upstream != device_ctx.rr_nodes[inode].R()) {
VTR_LOG("%d mismatch R upstream %e supposed %e\n", inode, root->R_upstream, device_ctx.rr_nodes[inode].R());
} else if (root->R_upstream != device_ctx.rr_graph.node_R(inode)) {
VTR_LOG("%ld mismatch R upstream %e supposed %e\n", size_t(inode), root->R_upstream, device_ctx.rr_graph.node_R(inode));
return false;
}
@ -1351,22 +1350,22 @@ bool is_valid_route_tree(const t_rt_node* root) {
// sink, must not be congested
if (!edge) {
int occ = route_ctx.rr_node_route_inf[inode].occ();
int capacity = device_ctx.rr_nodes[inode].capacity();
int capacity = device_ctx.rr_graph.node_capacity(inode);
if (occ > capacity) {
VTR_LOG("SINK %d occ %d > cap %d\n", inode, occ, capacity);
VTR_LOG("SINK %ld occ %d > cap %d\n", size_t(inode), occ, capacity);
return false;
}
}
while (edge) {
if (edge->child->parent_node != root) {
VTR_LOG("parent-child relationship not mutually acknowledged by parent %d->%d child %d<-%d\n",
inode, edge->child->inode,
edge->child->inode, edge->child->parent_node->inode);
VTR_LOG("parent-child relationship not mutually acknowledged by parent %ld->%ld child %ld<-%ld\n",
size_t(inode), size_t(edge->child->inode),
size_t(edge->child->inode), size_t(edge->child->parent_node->inode));
return false;
}
if (edge->iswitch != edge->child->parent_switch) {
VTR_LOG("parent(%d)-child(%d) connected switch not equivalent parent %d child %d\n",
inode, edge->child->inode, edge->iswitch, edge->child->parent_switch);
VTR_LOG("parent(%ld)-child(%ld) connected switch not equivalent parent %d child %d\n",
size_t(inode), size_t(edge->child->inode), edge->iswitch, edge->child->parent_switch);
return false;
}
@ -1383,9 +1382,9 @@ bool is_valid_route_tree(const t_rt_node* root) {
edge = edge->next;
}
float C_downstream_check = C_downstream_children + device_ctx.rr_nodes[inode].C();
float C_downstream_check = C_downstream_children + device_ctx.rr_graph.node_C(inode);
if (!vtr::isclose(root->C_downstream, C_downstream_check, CAP_REL_TOL, CAP_ABS_TOL)) {
VTR_LOG("%d mismatch C downstream %e supposed %e\n", inode, root->C_downstream, C_downstream_check);
VTR_LOG("%ld mismatch C downstream %e supposed %e\n", size_t(inode), root->C_downstream, C_downstream_check);
return false;
}
@ -1397,8 +1396,8 @@ bool is_uncongested_route_tree(const t_rt_node* root) {
auto& route_ctx = g_vpr_ctx.routing();
auto& device_ctx = g_vpr_ctx.device();
int inode = root->inode;
if (route_ctx.rr_node_route_inf[inode].occ() > device_ctx.rr_nodes[inode].capacity()) {
RRNodeId inode = root->inode;
if (route_ctx.rr_node_route_inf[inode].occ() > device_ctx.rr_graph.node_capacity(inode)) {
//This node is congested
return false;
}
@ -1415,7 +1414,7 @@ bool is_uncongested_route_tree(const t_rt_node* root) {
}
t_rt_node*
init_route_tree_to_source_no_net(int inode) {
init_route_tree_to_source_no_net(const RRNodeId& inode) {
/* Initializes the routing tree to just the net source, and returns the root
* node of the rt_tree (which is just the net source). */
@ -1429,9 +1428,10 @@ init_route_tree_to_source_no_net(int inode) {
rt_root->parent_switch = OPEN;
rt_root->re_expand = true;
rt_root->inode = inode;
rt_root->C_downstream = device_ctx.rr_nodes[inode].C();
rt_root->R_upstream = device_ctx.rr_nodes[inode].R();
rt_root->Tdel = 0.5 * device_ctx.rr_nodes[inode].R() * device_ctx.rr_nodes[inode].C();
rt_root->C_downstream = device_ctx.rr_graph.node_C(inode);
rt_root->R_upstream = device_ctx.rr_graph.node_R(inode);
rt_root->Tdel = 0.5 * device_ctx.rr_graph.node_R(inode) * device_ctx.rr_graph.node_C(inode);
rr_node_to_rt_node[inode] = rt_root;
return (rt_root);
@ -1439,16 +1439,16 @@ init_route_tree_to_source_no_net(int inode) {
bool verify_traceback_route_tree_equivalent(const t_trace* head, const t_rt_node* rt_root) {
//Walk the route tree saving all the used connections
std::set<std::tuple<int, int, int>> route_tree_connections;
std::set<std::tuple<RRNodeId, int, RRNodeId>> route_tree_connections;
collect_route_tree_connections(rt_root, route_tree_connections);
//Remove the extra parent connection to root (not included in traceback)
route_tree_connections.erase(std::make_tuple(OPEN, OPEN, rt_root->inode));
route_tree_connections.erase(std::make_tuple(RRNodeId::INVALID(), OPEN, rt_root->inode));
//Walk the traceback and verify that every connection exists in the route tree set
int prev_node = OPEN;
RRNodeId prev_node = RRNodeId::INVALID();
int prev_switch = OPEN;
int to_node = OPEN;
RRNodeId to_node = RRNodeId::INVALID();
for (const t_trace* trace = head; trace != nullptr; trace = trace->next) {
to_node = trace->index;
@ -1471,7 +1471,7 @@ bool verify_traceback_route_tree_equivalent(const t_trace* head, const t_rt_node
std::string msg = "Found route tree connection(s) not in traceback:\n";
for (auto conn : route_tree_connections) {
std::tie(prev_node, prev_switch, to_node) = conn;
msg += vtr::string_fmt("\tnode %d -> %d (switch %d)\n", prev_node, to_node, prev_switch);
msg += vtr::string_fmt("\tnode %ld -> %ld (switch %d)\n", size_t(prev_node), size_t(to_node), prev_switch);
}
VPR_FATAL_ERROR(VPR_ERROR_ROUTE, msg.c_str());
@ -1480,12 +1480,12 @@ bool verify_traceback_route_tree_equivalent(const t_trace* head, const t_rt_node
return true;
}
void collect_route_tree_connections(const t_rt_node* node, std::set<std::tuple<int, int, int>>& connections) {
void collect_route_tree_connections(const t_rt_node* node, std::set<std::tuple<RRNodeId, int, RRNodeId>>& connections) {
if (node) {
//Record reaching connection
int prev_node = OPEN;
RRNodeId prev_node = RRNodeId::INVALID();
int prev_switch = OPEN;
int to_node = node->inode;
RRNodeId to_node = node->inode;
if (node->parent_node) {
prev_node = node->parent_node->inode;
prev_switch = node->parent_switch;
@ -1514,13 +1514,13 @@ t_rt_node* find_sink_rt_node(t_rt_node* rt_root, ClusterNetId net_id, ClusterPin
auto& route_ctx = g_vpr_ctx.routing();
int ipin = cluster_ctx.clb_nlist.pin_net_index(sink_pin);
int sink_rr_inode = route_ctx.net_rr_terminals[net_id][ipin]; //obtain the value of the routing resource sink
RRNodeId sink_rr_inode = route_ctx.net_rr_terminals[net_id][ipin]; //obtain the value of the routing resource sink
t_rt_node* sink_rt_node = find_sink_rt_node_recurr(rt_root, sink_rr_inode); //find pointer to route tree node corresponding to sink_rr_inode
VTR_ASSERT(sink_rt_node);
return sink_rt_node;
}
t_rt_node* find_sink_rt_node_recurr(t_rt_node* node, int sink_rr_inode) {
t_rt_node* find_sink_rt_node_recurr(t_rt_node* node, const RRNodeId& sink_rr_inode) {
if (node->inode == sink_rr_inode) { //check if current node matches sink_rr_inode
return node;
}

View File

@ -4,6 +4,7 @@
#include "connection_based_routing.h"
#include "route_common.h"
#include "spatial_route_tree_lookup.h"
#include "rr_graph_obj.h"
/**************** Subroutines exported by route_tree_timing.c ***************/
@ -30,7 +31,7 @@ void update_remaining_net_delays_from_route_tree(float* net_delay,
void load_route_tree_Tdel(t_rt_node* rt_root, float Tarrival);
void load_route_tree_rr_route_inf(t_rt_node* root);
t_rt_node* init_route_tree_to_source_no_net(int inode);
t_rt_node* init_route_tree_to_source_no_net(const RRNodeId& inode);
void add_route_tree_to_rr_node_lookup(t_rt_node* node);
@ -38,7 +39,7 @@ bool verify_route_tree(t_rt_node* root);
bool verify_traceback_route_tree_equivalent(const t_trace* trace_head, const t_rt_node* rt_root);
t_rt_node* find_sink_rt_node(t_rt_node* rt_root, ClusterNetId net_id, ClusterPinId sink_pin);
t_rt_node* find_sink_rt_node_recurr(t_rt_node* node, int sink_inode);
t_rt_node* find_sink_rt_node_recurr(t_rt_node* node, const RRNodeId& sink_inode);
/********** Incremental reroute ***********/
// instead of ripping up a net that has some congestion, cut the branches

View File

@ -1,4 +1,5 @@
#pragma once
#include "rr_graph_obj.h"
/************** Types and defines exported by route_tree_timing.c ************/
struct t_rt_node;
@ -11,6 +12,10 @@ struct t_rt_node;
struct t_linked_rt_edge {
t_rt_node* child;
short iswitch;
/* Xifan Tang - RRGraph switch*/
RRSwitchId iswitch_id;
t_linked_rt_edge* next;
};
@ -41,8 +46,14 @@ struct t_rt_node {
} u;
t_rt_node* parent_node;
short parent_switch;
/* Xifan Tang - RRGraph switch*/
RRSwitchId parent_switch_id;
bool re_expand;
int inode;
/* Xifan Tang - RRGraph node */
RRNodeId inode;
float C_downstream;
float R_upstream;
float Tdel;

View File

@ -11,13 +11,13 @@ vtr::Matrix<float> calculate_routing_usage(t_rr_type rr_type) {
vtr::Matrix<float> usage({{device_ctx.grid.width(), device_ctx.grid.height()}}, 0.);
//Collect all the in-use RR nodes
std::set<int> rr_nodes;
std::set<RRNodeId> rr_nodes;
for (auto net : cluster_ctx.clb_nlist.nets()) {
t_trace* tptr = route_ctx.trace[net].head;
while (tptr != nullptr) {
int inode = tptr->index;
RRNodeId inode = tptr->index;
if (device_ctx.rr_nodes[inode].type() == rr_type) {
if (device_ctx.rr_graph.node_type(inode) == rr_type) {
rr_nodes.insert(inode);
}
tptr = tptr->next;
@ -25,24 +25,22 @@ vtr::Matrix<float> calculate_routing_usage(t_rr_type rr_type) {
}
//Record number of used resources in each x/y channel
for (int inode : rr_nodes) {
auto& rr_node = device_ctx.rr_nodes[inode];
for (const RRNodeId& inode : rr_nodes) {
if (rr_type == CHANX) {
VTR_ASSERT(rr_node.type() == CHANX);
VTR_ASSERT(rr_node.ylow() == rr_node.yhigh());
VTR_ASSERT(device_ctx.rr_graph.node_type(inode) == CHANX);
VTR_ASSERT(device_ctx.rr_graph.node_ylow(inode) == device_ctx.rr_graph.node_yhigh(inode));
int y = rr_node.ylow();
for (int x = rr_node.xlow(); x <= rr_node.xhigh(); ++x) {
int y = device_ctx.rr_graph.node_ylow(inode);
for (int x = device_ctx.rr_graph.node_xlow(inode); x <= device_ctx.rr_graph.node_xhigh(inode); ++x) {
usage[x][y] += route_ctx.rr_node_route_inf[inode].occ();
}
} else {
VTR_ASSERT(rr_type == CHANY);
VTR_ASSERT(rr_node.type() == CHANY);
VTR_ASSERT(rr_node.xlow() == rr_node.xhigh());
VTR_ASSERT(device_ctx.rr_graph.node_type(inode) == CHANY);
VTR_ASSERT(device_ctx.rr_graph.node_xlow(inode) == device_ctx.rr_graph.node_xhigh(inode));
int x = rr_node.xlow();
for (int y = rr_node.ylow(); y <= rr_node.yhigh(); ++y) {
int x = device_ctx.rr_graph.node_xlow(inode);
for (int y = device_ctx.rr_graph.node_ylow(inode); y <= device_ctx.rr_graph.node_yhigh(inode); ++y) {
usage[x][y] += route_ctx.rr_node_route_inf[inode].occ();
}
}
@ -57,24 +55,23 @@ vtr::Matrix<float> calculate_routing_avail(t_rr_type rr_type) {
auto& device_ctx = g_vpr_ctx.device();
vtr::Matrix<float> avail({{device_ctx.grid.width(), device_ctx.grid.height()}}, 0.);
for (size_t inode = 0; inode < device_ctx.rr_nodes.size(); ++inode) {
auto& rr_node = device_ctx.rr_nodes[inode];
for (const RRNodeId& inode : device_ctx.rr_graph.nodes()) {
if (rr_node.type() == CHANX && rr_type == CHANX) {
VTR_ASSERT(rr_node.type() == CHANX);
VTR_ASSERT(rr_node.ylow() == rr_node.yhigh());
if (device_ctx.rr_graph.node_type(inode) == CHANX && rr_type == CHANX) {
VTR_ASSERT(device_ctx.rr_graph.node_type(inode) == CHANX);
VTR_ASSERT(device_ctx.rr_graph.node_ylow(inode) == device_ctx.rr_graph.node_yhigh(inode));
int y = rr_node.ylow();
for (int x = rr_node.xlow(); x <= rr_node.xhigh(); ++x) {
avail[x][y] += rr_node.capacity();
int y = device_ctx.rr_graph.node_ylow(inode);
for (int x = device_ctx.rr_graph.node_xlow(inode); x <= device_ctx.rr_graph.node_xhigh(inode); ++x) {
avail[x][y] += device_ctx.rr_graph.node_capacity(inode);
}
} else if (rr_node.type() == CHANY && rr_type == CHANY) {
VTR_ASSERT(rr_node.type() == CHANY);
VTR_ASSERT(rr_node.xlow() == rr_node.xhigh());
} else if (device_ctx.rr_graph.node_type(inode) == CHANY && rr_type == CHANY) {
VTR_ASSERT(device_ctx.rr_graph.node_type(inode) == CHANY);
VTR_ASSERT(device_ctx.rr_graph.node_xlow(inode) == device_ctx.rr_graph.node_xhigh(inode));
int x = rr_node.xlow();
for (int y = rr_node.ylow(); y <= rr_node.yhigh(); ++y) {
avail[x][y] += rr_node.capacity();
int x = device_ctx.rr_graph.node_xlow(inode);
for (int y = device_ctx.rr_graph.node_ylow(inode); y <= device_ctx.rr_graph.node_yhigh(inode); ++y) {
avail[x][y] += device_ctx.rr_graph.node_capacity(inode);
}
}
}

View File

@ -7,13 +7,13 @@
#include "route_export.h"
#include "rr_graph.h"
static t_rt_node* setup_routing_resources_no_net(int source_node);
static t_rt_node* setup_routing_resources_no_net(const RRNodeId& source_node);
RouterDelayProfiler::RouterDelayProfiler(
const RouterLookahead* lookahead)
: router_lookahead_(lookahead) {}
bool RouterDelayProfiler::calculate_delay(int source_node, int sink_node, const t_router_opts& router_opts, float* net_delay) const {
bool RouterDelayProfiler::calculate_delay(const RRNodeId& source_node, const RRNodeId& sink_node, const t_router_opts& router_opts, float* net_delay) const {
/* Returns true as long as found some way to hook up this net, even if that *
* way resulted in overuse of resources (congestion). If there is no way *
* to route this net, even ignoring congestion, it returns false. In this *
@ -22,6 +22,7 @@ bool RouterDelayProfiler::calculate_delay(int source_node, int sink_node, const
auto& route_ctx = g_vpr_ctx.routing();
t_rt_node* rt_root = setup_routing_resources_no_net(source_node);
/* TODO: This should be changed to RRNodeId */
enable_router_debug(router_opts, ClusterNetId(), sink_node);
/* Update base costs according to fanout and criticality rules */
@ -43,7 +44,7 @@ bool RouterDelayProfiler::calculate_delay(int source_node, int sink_node, const
init_heap(device_ctx.grid);
std::vector<int> modified_rr_node_inf;
std::vector<RRNodeId> modified_rr_node_inf;
RouterStats router_stats;
t_heap* cheapest = timing_driven_route_connection_from_route_tree(rt_root,
sink_node, cost_params, bounding_box, *router_lookahead_,
@ -59,7 +60,7 @@ bool RouterDelayProfiler::calculate_delay(int source_node, int sink_node, const
//find delay
*net_delay = rt_node_of_sink->Tdel;
VTR_ASSERT_MSG(route_ctx.rr_node_route_inf[rt_root->inode].occ() <= device_ctx.rr_nodes[rt_root->inode].capacity(), "SOURCE should never be congested");
VTR_ASSERT_MSG(route_ctx.rr_node_route_inf[rt_root->inode].occ() <= device_ctx.rr_graph.node_capacity(rt_root->inode), "SOURCE should never be congested");
free_route_tree(rt_root);
}
@ -71,10 +72,10 @@ bool RouterDelayProfiler::calculate_delay(int source_node, int sink_node, const
}
//Returns the shortest path delay from src_node to all RR nodes in the RR graph, or NaN if no path exists
std::vector<float> calculate_all_path_delays_from_rr_node(int src_rr_node, const t_router_opts& router_opts) {
vtr::vector<RRNodeId, float> calculate_all_path_delays_from_rr_node(const RRNodeId& src_rr_node, const t_router_opts& router_opts) {
auto& device_ctx = g_vpr_ctx.device();
std::vector<float> path_delays_to(device_ctx.rr_nodes.size(), std::numeric_limits<float>::quiet_NaN());
vtr::vector<RRNodeId, float> path_delays_to(device_ctx.rr_graph.nodes().size(), std::numeric_limits<float>::quiet_NaN());
t_rt_node* rt_root = setup_routing_resources_no_net(src_rr_node);
@ -89,12 +90,12 @@ std::vector<float> calculate_all_path_delays_from_rr_node(int src_rr_node, const
cost_params.astar_fac = router_opts.astar_fac;
cost_params.bend_cost = router_opts.bend_cost;
std::vector<int> modified_rr_node_inf;
std::vector<RRNodeId> modified_rr_node_inf;
RouterStats router_stats;
init_heap(device_ctx.grid);
std::vector<t_heap> shortest_paths = timing_driven_find_all_shortest_paths_from_route_tree(rt_root,
vtr::vector<RRNodeId, t_heap> shortest_paths = timing_driven_find_all_shortest_paths_from_route_tree(rt_root,
cost_params,
bounding_box,
modified_rr_node_inf,
@ -102,12 +103,12 @@ std::vector<float> calculate_all_path_delays_from_rr_node(int src_rr_node, const
free_route_tree(rt_root);
VTR_ASSERT(shortest_paths.size() == device_ctx.rr_nodes.size());
for (int sink_rr_node = 0; sink_rr_node < (int)device_ctx.rr_nodes.size(); ++sink_rr_node) {
VTR_ASSERT(shortest_paths.size() == device_ctx.rr_graph.nodes().size());
for (const RRNodeId& sink_rr_node : device_ctx.rr_graph.nodes()) {
if (sink_rr_node == src_rr_node) {
path_delays_to[sink_rr_node] = 0.;
} else {
if (shortest_paths[sink_rr_node].index == OPEN) continue;
if (shortest_paths[sink_rr_node].index == RRNodeId::INVALID()) continue;
VTR_ASSERT(shortest_paths[sink_rr_node].index == sink_rr_node);
@ -154,7 +155,7 @@ std::vector<float> calculate_all_path_delays_from_rr_node(int src_rr_node, const
return path_delays_to;
}
static t_rt_node* setup_routing_resources_no_net(int source_node) {
static t_rt_node* setup_routing_resources_no_net(const RRNodeId& source_node) {
/* Build and return a partial route tree from the legal connections from last iteration.
* along the way do:
* update pathfinder costs to be accurate to the partial route tree

View File

@ -9,13 +9,13 @@
class RouterDelayProfiler {
public:
RouterDelayProfiler(const RouterLookahead* lookahead);
bool calculate_delay(int source_node, int sink_node, const t_router_opts& router_opts, float* net_delay) const;
bool calculate_delay(const RRNodeId& source_node, const RRNodeId& sink_node, const t_router_opts& router_opts, float* net_delay) const;
private:
const RouterLookahead* router_lookahead_;
};
std::vector<float> calculate_all_path_delays_from_rr_node(int src_rr_node, const t_router_opts& router_opts);
vtr::vector<RRNodeId, float> calculate_all_path_delays_from_rr_node(const RRNodeId& src_rr_node, const t_router_opts& router_opts);
void alloc_routing_structs(t_chan_width chan_width,
const t_router_opts& router_opts,

View File

@ -5,7 +5,7 @@
#include "globals.h"
#include "route_timing.h"
static int get_expected_segs_to_target(int inode, int target_node, int* num_segs_ortho_dir_ptr);
static int get_expected_segs_to_target(const RRNodeId& inode, const RRNodeId& target_node, int* num_segs_ortho_dir_ptr);
static int round_up(float x);
static std::unique_ptr<RouterLookahead> make_router_lookahead_object(e_router_lookahead router_lookahead_type) {
@ -41,10 +41,10 @@ std::unique_ptr<RouterLookahead> make_router_lookahead(
return router_lookahead;
}
float ClassicLookahead::get_expected_cost(int current_node, int target_node, const t_conn_cost_params& params, float R_upstream) const {
float ClassicLookahead::get_expected_cost(const RRNodeId& current_node, const RRNodeId& target_node, const t_conn_cost_params& params, float R_upstream) const {
auto& device_ctx = g_vpr_ctx.device();
t_rr_type rr_type = device_ctx.rr_nodes[current_node].type();
t_rr_type rr_type = device_ctx.rr_graph.node_type(current_node);
if (rr_type == CHANX || rr_type == CHANY) {
return classic_wire_lookahead_cost(current_node, target_node, params.criticality, R_upstream);
@ -55,15 +55,15 @@ float ClassicLookahead::get_expected_cost(int current_node, int target_node, con
}
}
float ClassicLookahead::classic_wire_lookahead_cost(int inode, int target_node, float criticality, float R_upstream) const {
float ClassicLookahead::classic_wire_lookahead_cost(const RRNodeId& inode, const RRNodeId& target_node, float criticality, float R_upstream) const {
auto& device_ctx = g_vpr_ctx.device();
VTR_ASSERT_SAFE(device_ctx.rr_nodes[inode].type() == CHANX || device_ctx.rr_nodes[inode].type() == CHANY);
VTR_ASSERT_SAFE(device_ctx.rr_graph.node_type(inode) == CHANX || device_ctx.rr_graph.node_type(inode) == CHANY);
int num_segs_ortho_dir = 0;
int num_segs_same_dir = get_expected_segs_to_target(inode, target_node, &num_segs_ortho_dir);
int cost_index = device_ctx.rr_nodes[inode].cost_index();
int cost_index = device_ctx.rr_graph.node_cost_index(inode);
int ortho_cost_index = device_ctx.rr_indexed_data[cost_index].ortho_cost_index;
const auto& same_data = device_ctx.rr_indexed_data[cost_index];
@ -87,10 +87,10 @@ float ClassicLookahead::classic_wire_lookahead_cost(int inode, int target_node,
return (expected_cost);
}
float MapLookahead::get_expected_cost(int current_node, int target_node, const t_conn_cost_params& params, float /*R_upstream*/) const {
float MapLookahead::get_expected_cost(const RRNodeId& current_node, const RRNodeId& target_node, const t_conn_cost_params& params, float /*R_upstream*/) const {
auto& device_ctx = g_vpr_ctx.device();
t_rr_type rr_type = device_ctx.rr_nodes[current_node].type();
t_rr_type rr_type = device_ctx.rr_graph.node_type(current_node);
if (rr_type == CHANX || rr_type == CHANY) {
return get_lookahead_map_cost(current_node, target_node, params.criticality);
@ -105,7 +105,7 @@ void MapLookahead::compute(const std::vector<t_segment_inf>& segment_inf) {
compute_router_lookahead(segment_inf.size());
}
float NoOpLookahead::get_expected_cost(int /*current_node*/, int /*target_node*/, const t_conn_cost_params& /*params*/, float /*R_upstream*/) const {
float NoOpLookahead::get_expected_cost(const RRNodeId& /*current_node*/, const RRNodeId& /*target_node*/, const t_conn_cost_params& /*params*/, float /*R_upstream*/) const {
return 0.;
}
@ -115,7 +115,7 @@ static int round_up(float x) {
return std::ceil(x - 0.001);
}
static int get_expected_segs_to_target(int inode, int target_node, int* num_segs_ortho_dir_ptr) {
static int get_expected_segs_to_target(const RRNodeId& inode, const RRNodeId& target_node, int* num_segs_ortho_dir_ptr) {
/* Returns the number of segments the same type as inode that will be needed *
* to reach target_node (not including inode) in each direction (the same *
* direction (horizontal or vertical) as inode and the orthogonal direction).*/
@ -127,18 +127,18 @@ static int get_expected_segs_to_target(int inode, int target_node, int* num_segs
int no_need_to_pass_by_clb;
float inv_length, ortho_inv_length, ylow, yhigh, xlow, xhigh;
target_x = device_ctx.rr_nodes[target_node].xlow();
target_y = device_ctx.rr_nodes[target_node].ylow();
cost_index = device_ctx.rr_nodes[inode].cost_index();
target_x = device_ctx.rr_graph.node_xlow(target_node);
target_y = device_ctx.rr_graph.node_ylow(target_node);
cost_index = device_ctx.rr_graph.node_cost_index(inode);
inv_length = device_ctx.rr_indexed_data[cost_index].inv_length;
ortho_cost_index = device_ctx.rr_indexed_data[cost_index].ortho_cost_index;
ortho_inv_length = device_ctx.rr_indexed_data[ortho_cost_index].inv_length;
rr_type = device_ctx.rr_nodes[inode].type();
rr_type = device_ctx.rr_graph.node_type(inode);
if (rr_type == CHANX) {
ylow = device_ctx.rr_nodes[inode].ylow();
xhigh = device_ctx.rr_nodes[inode].xhigh();
xlow = device_ctx.rr_nodes[inode].xlow();
ylow = device_ctx.rr_graph.node_ylow(inode);
xhigh = device_ctx.rr_graph.node_xhigh(inode);
xlow = device_ctx.rr_graph.node_xlow(inode);
/* Count vertical (orthogonal to inode) segs first. */
@ -163,9 +163,9 @@ static int get_expected_segs_to_target(int inode, int target_node, int* num_segs
num_segs_same_dir = 0;
}
} else { /* inode is a CHANY */
ylow = device_ctx.rr_nodes[inode].ylow();
yhigh = device_ctx.rr_nodes[inode].yhigh();
xlow = device_ctx.rr_nodes[inode].xlow();
ylow = device_ctx.rr_graph.node_ylow(inode);
yhigh = device_ctx.rr_graph.node_yhigh(inode);
xlow = device_ctx.rr_graph.node_xlow(inode);
/* Count horizontal (orthogonal to inode) segs first. */

View File

@ -12,7 +12,7 @@ class RouterLookahead {
//
// Either compute or read methods must be invoked before invoking
// get_expected_cost.
virtual float get_expected_cost(int node, int target_node, const t_conn_cost_params& params, float R_upstream) const = 0;
virtual float get_expected_cost(const RRNodeId& node, const RRNodeId& target_node, const t_conn_cost_params& params, float R_upstream) const = 0;
// Compute router lookahead (if needed).
virtual void compute(const std::vector<t_segment_inf>& segment_inf) = 0;
@ -53,7 +53,7 @@ const RouterLookahead* get_cached_router_lookahead(
class ClassicLookahead : public RouterLookahead {
public:
float get_expected_cost(int node, int target_node, const t_conn_cost_params& params, float R_upstream) const override;
float get_expected_cost(const RRNodeId& node, const RRNodeId& target_node, const t_conn_cost_params& params, float R_upstream) const override;
void compute(const std::vector<t_segment_inf>& /*segment_inf*/) override {
}
@ -65,12 +65,12 @@ class ClassicLookahead : public RouterLookahead {
}
private:
float classic_wire_lookahead_cost(int node, int target_node, float criticality, float R_upstream) const;
float classic_wire_lookahead_cost(const RRNodeId& node, const RRNodeId& target_node, float criticality, float R_upstream) const;
};
class MapLookahead : public RouterLookahead {
protected:
float get_expected_cost(int node, int target_node, const t_conn_cost_params& params, float R_upstream) const override;
float get_expected_cost(const RRNodeId& node, const RRNodeId& target_node, const t_conn_cost_params& params, float R_upstream) const override;
void compute(const std::vector<t_segment_inf>& segment_inf) override;
void read(const std::string& /*file*/) override {
VPR_THROW(VPR_ERROR_ROUTE, "MapLookahead::read unimplemented");
@ -82,7 +82,7 @@ class MapLookahead : public RouterLookahead {
class NoOpLookahead : public RouterLookahead {
protected:
float get_expected_cost(int node, int target_node, const t_conn_cost_params& params, float R_upstream) const override;
float get_expected_cost(const RRNodeId& node, const RRNodeId& target_node, const t_conn_cost_params& params, float R_upstream) const override;
void compute(const std::vector<t_segment_inf>& /*segment_inf*/) override {
}
void read(const std::string& /*file*/) override {

View File

@ -25,6 +25,7 @@
#include "vtr_log.h"
#include "vtr_assert.h"
#include "vtr_time.h"
#include "rr_graph_obj_util.h"
#include "router_lookahead_map.h"
/* the cost map is computed by running a Dijkstra search from channel segment rr nodes at the specified reference coordinate */
@ -131,7 +132,7 @@ class Expansion_Cost_Entry {
/* a class that represents an entry in the Dijkstra expansion priority queue */
class PQ_Entry {
public:
int rr_node_ind; //index in device_ctx.rr_nodes that this entry represents
RRNodeId rr_node_ind; //index in device_ctx.rr_nodes that this entry represents
float cost; //the cost of the path to get to this node
/* store backward delay, R and congestion info */
@ -139,7 +140,7 @@ class PQ_Entry {
float R_upstream;
float congestion_upstream;
PQ_Entry(int set_rr_node_ind, int /*switch_ind*/, float parent_delay, float parent_R_upstream, float parent_congestion_upstream, bool starting_node) {
PQ_Entry(const RRNodeId& set_rr_node_ind, int /*switch_ind*/, float parent_delay, float parent_R_upstream, float parent_congestion_upstream, bool starting_node) {
this->rr_node_ind = set_rr_node_ind;
auto& device_ctx = g_vpr_ctx.device();
@ -147,7 +148,7 @@ class PQ_Entry {
this->congestion_upstream = parent_congestion_upstream;
this->R_upstream = parent_R_upstream;
if (!starting_node) {
int cost_index = device_ctx.rr_nodes[set_rr_node_ind].cost_index();
int cost_index = device_ctx.rr_graph.node_cost_index(set_rr_node_ind);
//this->delay += g_rr_nodes[set_rr_node_ind].C() * (g_rr_switch_inf[switch_ind].R + 0.5*g_rr_nodes[set_rr_node_ind].R()) +
// g_rr_switch_inf[switch_ind].Tdel;
@ -191,12 +192,12 @@ t_cost_map f_cost_map;
static void alloc_cost_map(int num_segments);
static void free_cost_map();
/* returns index of a node from which to start routing */
static int get_start_node_ind(int start_x, int start_y, int target_x, int target_y, t_rr_type rr_type, int seg_index, int track_offset);
static RRNodeId get_start_node_ind(int start_x, int start_y, int target_x, int target_y, t_rr_type rr_type, int seg_index, int track_offset);
/* runs Dijkstra's algorithm from specified node until all nodes have been visited. Each time a pin is visited, the delay/congestion information
* to that pin is stored is added to an entry in the routing_cost_map */
static void run_dijkstra(int start_node_ind, int start_x, int start_y, t_routing_cost_map& routing_cost_map);
static void run_dijkstra(const RRNodeId& start_node_ind, int start_x, int start_y, t_routing_cost_map& routing_cost_map);
/* iterates over the children of the specified node and selectively pushes them onto the priority queue */
static void expand_dijkstra_neighbours(PQ_Entry parent_entry, std::vector<float>& node_visited_costs, std::vector<bool>& node_expanded, std::priority_queue<PQ_Entry>& pq);
static void expand_dijkstra_neighbours(PQ_Entry parent_entry, vtr::vector<RRNodeId, float>& node_visited_costs, vtr::vector<RRNodeId, bool>& node_expanded, std::priority_queue<PQ_Entry>& pq);
/* sets the lookahead cost map entries based on representative cost entries from routing_cost_map */
static void set_lookahead_map_costs(int segment_index, e_rr_type chan_type, t_routing_cost_map& routing_cost_map);
/* fills in missing lookahead map entries by copying the cost of the closest valid entry */
@ -204,19 +205,18 @@ static void fill_in_missing_lookahead_entries(int segment_index, e_rr_type chan_
/* returns a cost entry in the f_cost_map that is near the specified coordinates (and preferably towards (0,0)) */
static Cost_Entry get_nearby_cost_entry(int x, int y, int segment_index, int chan_index);
/* returns the absolute delta_x and delta_y offset required to reach to_node from from_node */
static void get_xy_deltas(int from_node_ind, int to_node_ind, int* delta_x, int* delta_y);
static void get_xy_deltas(const RRNodeId& from_node_ind, const RRNodeId& to_node_ind, int* delta_x, int* delta_y);
static void print_cost_map();
/******** Function Definitions ********/
/* queries the lookahead_map (should have been computed prior to routing) to get the expected cost
* from the specified source to the specified target */
float get_lookahead_map_cost(int from_node_ind, int to_node_ind, float criticality_fac) {
float get_lookahead_map_cost(const RRNodeId& from_node_ind, const RRNodeId& to_node_ind, float criticality_fac) {
auto& device_ctx = g_vpr_ctx.device();
auto& from_node = device_ctx.rr_nodes[from_node_ind];
e_rr_type from_type = from_node.type();
int from_cost_index = from_node.cost_index();
e_rr_type from_type = device_ctx.rr_graph.node_type(from_node_ind);
int from_cost_index = device_ctx.rr_graph.node_cost_index(from_node_ind);
int from_seg_index = device_ctx.rr_indexed_data[from_cost_index].seg_index;
VTR_ASSERT(from_seg_index >= 0);
@ -262,11 +262,11 @@ void compute_router_lookahead(int num_segments) {
for (int ref_inc = 0; ref_inc < 3; ref_inc++) {
for (int track_offset = 0; track_offset < MAX_TRACK_OFFSET; track_offset += 2) {
/* get the rr node index from which to start routing */
int start_node_ind = get_start_node_ind(REF_X + ref_inc, REF_Y + ref_inc,
RRNodeId start_node_ind = get_start_node_ind(REF_X + ref_inc, REF_Y + ref_inc,
device_ctx.grid.width() - 2, device_ctx.grid.height() - 2, //non-corner upper right
chan_type, iseg, track_offset);
if (start_node_ind == UNDEFINED) {
if (start_node_ind == RRNodeId::INVALID()) {
continue;
}
@ -289,8 +289,8 @@ void compute_router_lookahead(int num_segments) {
}
/* returns index of a node from which to start routing */
static int get_start_node_ind(int start_x, int start_y, int target_x, int target_y, t_rr_type rr_type, int seg_index, int track_offset) {
int result = UNDEFINED;
static RRNodeId get_start_node_ind(int start_x, int start_y, int target_x, int target_y, t_rr_type rr_type, int seg_index, int track_offset) {
RRNodeId result = RRNodeId::INVALID();
if (rr_type != CHANX && rr_type != CHANY) {
VPR_FATAL_ERROR(VPR_ERROR_ROUTE, "Must start lookahead routing from CHANX or CHANY node\n");
@ -306,14 +306,18 @@ static int get_start_node_ind(int start_x, int start_y, int target_x, int target
VTR_ASSERT(rr_type == CHANX || rr_type == CHANY);
const std::vector<int>& channel_node_list = device_ctx.rr_node_indices[rr_type][start_x][start_y][0];
std::vector<RRNodeId> channel_node_list;
/* As rr_node_indices is now an internal data of RRGraph object,
* we can get the number of tracks for a
* routing channel and then get the node one by one
*/
channel_node_list = find_rr_graph_chan_nodes(device_ctx.rr_graph, start_x, start_y, rr_type);
/* find first node in channel that has specified segment index and goes in the desired direction */
for (unsigned itrack = 0; itrack < channel_node_list.size(); itrack++) {
int node_ind = channel_node_list[itrack];
for (const RRNodeId& node_ind : channel_node_list) {
e_direction node_direction = device_ctx.rr_nodes[node_ind].direction();
int node_cost_ind = device_ctx.rr_nodes[node_ind].cost_index();
e_direction node_direction = device_ctx.rr_graph.node_direction(node_ind);
int node_cost_ind = device_ctx.rr_graph.node_cost_index(node_ind);
int node_seg_ind = device_ctx.rr_indexed_data[node_cost_ind].seg_index;
if ((node_direction == direction || node_direction == BI_DIRECTION) && node_seg_ind == seg_index) {
@ -343,14 +347,14 @@ static void free_cost_map() {
/* runs Dijkstra's algorithm from specified node until all nodes have been visited. Each time a pin is visited, the delay/congestion information
* to that pin is stored is added to an entry in the routing_cost_map */
static void run_dijkstra(int start_node_ind, int start_x, int start_y, t_routing_cost_map& routing_cost_map) {
static void run_dijkstra(const RRNodeId& start_node_ind, int start_x, int start_y, t_routing_cost_map& routing_cost_map) {
auto& device_ctx = g_vpr_ctx.device();
/* a list of boolean flags (one for each rr node) to figure out if a certain node has already been expanded */
std::vector<bool> node_expanded(device_ctx.rr_nodes.size(), false);
vtr::vector<RRNodeId, bool> node_expanded(device_ctx.rr_graph.nodes().size(), false);
/* for each node keep a list of the cost with which that node has been visited (used to determine whether to push
* a candidate node onto the expansion queue */
std::vector<float> node_visited_costs(device_ctx.rr_nodes.size(), -1.0);
vtr::vector<RRNodeId, float> node_visited_costs(device_ctx.rr_graph.nodes().size(), -1.0);
/* a priority queue for expansion */
std::priority_queue<PQ_Entry> pq;
@ -364,7 +368,7 @@ static void run_dijkstra(int start_node_ind, int start_x, int start_y, t_routing
PQ_Entry current = pq.top();
pq.pop();
int node_ind = current.rr_node_ind;
RRNodeId node_ind = current.rr_node_ind;
/* check that we haven't already expanded from this node */
if (node_expanded[node_ind]) {
@ -372,9 +376,9 @@ static void run_dijkstra(int start_node_ind, int start_x, int start_y, t_routing
}
/* if this node is an ipin record its congestion/delay in the routing_cost_map */
if (device_ctx.rr_nodes[node_ind].type() == IPIN) {
int ipin_x = device_ctx.rr_nodes[node_ind].xlow();
int ipin_y = device_ctx.rr_nodes[node_ind].ylow();
if (device_ctx.rr_graph.node_type(node_ind) == IPIN) {
int ipin_x = device_ctx.rr_graph.node_xlow(node_ind);
int ipin_y = device_ctx.rr_graph.node_ylow(node_ind);
if (ipin_x >= start_x && ipin_y >= start_y) {
int delta_x, delta_y;
@ -390,16 +394,14 @@ static void run_dijkstra(int start_node_ind, int start_x, int start_y, t_routing
}
/* iterates over the children of the specified node and selectively pushes them onto the priority queue */
static void expand_dijkstra_neighbours(PQ_Entry parent_entry, std::vector<float>& node_visited_costs, std::vector<bool>& node_expanded, std::priority_queue<PQ_Entry>& pq) {
static void expand_dijkstra_neighbours(PQ_Entry parent_entry, vtr::vector<RRNodeId, float>& node_visited_costs, vtr::vector<RRNodeId, bool>& node_expanded, std::priority_queue<PQ_Entry>& pq) {
auto& device_ctx = g_vpr_ctx.device();
int parent_ind = parent_entry.rr_node_ind;
RRNodeId parent_ind = parent_entry.rr_node_ind;
auto& parent_node = device_ctx.rr_nodes[parent_ind];
for (t_edge_size iedge = 0; iedge < parent_node.num_edges(); iedge++) {
int child_node_ind = parent_node.edge_sink_node(iedge);
int switch_ind = parent_node.edge_switch(iedge);
for (const RREdgeId& iedge : device_ctx.rr_graph.node_out_edges(parent_ind)) {
RRNodeId child_node_ind = device_ctx.rr_graph.edge_sink_node(iedge);
int switch_ind = (int)size_t(device_ctx.rr_graph.edge_switch(iedge));
/* skip this child if it has already been expanded from */
if (node_expanded[child_node_ind]) {
@ -595,25 +597,22 @@ Cost_Entry Expansion_Cost_Entry::get_median_entry() {
}
/* returns the absolute delta_x and delta_y offset required to reach to_node from from_node */
static void get_xy_deltas(int from_node_ind, int to_node_ind, int* delta_x, int* delta_y) {
static void get_xy_deltas(const RRNodeId& from_node_ind, const RRNodeId& to_node_ind, int* delta_x, int* delta_y) {
auto& device_ctx = g_vpr_ctx.device();
auto& from = device_ctx.rr_nodes[from_node_ind];
auto& to = device_ctx.rr_nodes[to_node_ind];
/* get chan/seg coordinates of the from/to nodes. seg coordinate is along the wire,
* chan coordinate is orthogonal to the wire */
int from_seg_low = from.xlow();
int from_seg_high = from.xhigh();
int from_chan = from.ylow();
int to_seg = to.xlow();
int to_chan = to.ylow();
if (from.type() == CHANY) {
from_seg_low = from.ylow();
from_seg_high = from.yhigh();
from_chan = from.xlow();
to_seg = to.ylow();
to_chan = to.xlow();
int from_seg_low = device_ctx.rr_graph.node_xlow(from_node_ind);
int from_seg_high = device_ctx.rr_graph.node_xhigh(from_node_ind);
int from_chan = device_ctx.rr_graph.node_ylow(from_node_ind);
int to_seg = device_ctx.rr_graph.node_xlow(to_node_ind);
int to_chan = device_ctx.rr_graph.node_ylow(to_node_ind);
if (device_ctx.rr_graph.node_type(from_node_ind) == CHANY) {
from_seg_low = device_ctx.rr_graph.node_ylow(from_node_ind);
from_seg_high = device_ctx.rr_graph.node_yhigh(from_node_ind);
from_chan = device_ctx.rr_graph.node_xlow(from_node_ind);
to_seg = device_ctx.rr_graph.node_ylow(to_node_ind);
to_chan = device_ctx.rr_graph.node_xlow(to_node_ind);
}
/* now we want to count the minimum number of *channel segments* between the from and to nodes */
@ -649,13 +648,13 @@ static void get_xy_deltas(int from_node_ind, int to_node_ind, int* delta_x, int*
/* account for wire direction. lookahead map was computed by looking up and to the right starting at INC wires. for targets
* that are opposite of the wire direction, let's add 1 to delta_seg */
if ((to_seg < from_seg_low && from.direction() == INC_DIRECTION) || (to_seg > from_seg_high && from.direction() == DEC_DIRECTION)) {
if ((to_seg < from_seg_low && device_ctx.rr_graph.node_direction(from_node_ind) == INC_DIRECTION) || (to_seg > from_seg_high && device_ctx.rr_graph.node_direction(from_node_ind) == DEC_DIRECTION)) {
delta_seg++;
}
*delta_x = delta_seg;
*delta_y = delta_chan;
if (from.type() == CHANY) {
if (device_ctx.rr_graph.node_type(from_node_ind) == CHANY) {
*delta_x = delta_chan;
*delta_y = delta_seg;
}

View File

@ -6,4 +6,4 @@ void compute_router_lookahead(int num_segments);
/* queries the lookahead_map (should have been computed prior to routing) to get the expected cost
* from the specified source to the specified target */
float get_lookahead_map_cost(int from_node_ind, int to_node_ind, float criticality_fac);
float get_lookahead_map_cost(const RRNodeId& from_node_ind, const RRNodeId& to_node_ind, float criticality_fac);

File diff suppressed because it is too large Load Diff

View File

@ -43,7 +43,7 @@ void create_rr_graph(const t_graph_type graph_type,
void free_rr_graph();
//Returns a brief one-line summary of an RR node
std::string describe_rr_node(int inode);
std::string describe_rr_node(const RRNodeId& inode);
void init_fan_in(std::vector<t_rr_node>& L_rr_node, const int num_rr_nodes);

View File

@ -40,6 +40,7 @@ static void load_block_rr_indices(const DeviceGrid& grid,
static int get_bidir_track_to_chan_seg(const std::vector<int> conn_tracks,
const t_rr_node_indices& L_rr_node_indices,
const RRGraph& rr_graph,
const int to_chan,
const int to_seg,
const int to_sb,
@ -49,7 +50,7 @@ static int get_bidir_track_to_chan_seg(const std::vector<int> conn_tracks,
const int from_switch,
const int switch_override,
const enum e_directionality directionality,
const int from_rr_node,
const RRNodeId& from_rr_node,
t_rr_edge_info_set& rr_edges_to_create);
static int get_unidir_track_to_chan_seg(const int from_track,
@ -65,9 +66,10 @@ static int get_unidir_track_to_chan_seg(const int from_track,
t_sblock_pattern& sblock_pattern,
const int switch_override,
const t_rr_node_indices& L_rr_node_indices,
const RRGraph& rr_graph,
const t_chan_seg_details* seg_details,
bool* Fs_clipped,
const int from_rr_node,
const RRNodeId& from_rr_node,
t_rr_edge_info_set& rr_edges_to_create);
static int get_track_to_chan_seg(const int from_track,
@ -78,8 +80,9 @@ static int get_track_to_chan_seg(const int from_track,
const e_side to_side,
const int swtich_override,
const t_rr_node_indices& L_rr_node_indices,
const RRGraph& rr_graph,
t_sb_connection_map* sb_conn_map,
const int from_rr_node,
const RRNodeId& from_rr_node,
t_rr_edge_info_set& rr_edges_to_create);
static int vpr_to_phy_track(const int itrack,
@ -664,14 +667,15 @@ int get_seg_end(const t_chan_seg_details* seg_details, const int itrack, const i
int get_bidir_opin_connections(const int i,
const int j,
const int ipin,
const int from_rr_node,
const RRNodeId& from_rr_node,
t_rr_edge_info_set& rr_edges_to_create,
const t_pin_to_track_lookup& opin_to_track_map,
const t_rr_node_indices& L_rr_node_indices,
const t_chan_details& chan_details_x,
const t_chan_details& chan_details_y) {
int num_conn, tr_i, tr_j, chan, seg;
int to_switch, to_node;
int to_switch;
RRNodeId to_node;
int is_connected_track;
t_physical_tile_type_ptr type;
t_rr_type to_type;
@ -730,9 +734,9 @@ int get_bidir_opin_connections(const int i,
/* Only connect to wire if there is a CB */
if (is_cblock(chan, seg, to_track, seg_details)) {
to_switch = seg_details[to_track].arch_wire_switch();
to_node = get_rr_node_index(L_rr_node_indices, tr_i, tr_j, to_type, to_track);
to_node = RRNodeId(get_rr_node_index(L_rr_node_indices, tr_i, tr_j, to_type, to_track));
if (to_node == OPEN) {
if (to_node == RRNodeId::INVALID()) {
continue;
}
@ -751,7 +755,7 @@ int get_unidir_opin_connections(const int chan,
const int seg_type_index,
const t_rr_type chan_type,
const t_chan_seg_details* seg_details,
const int from_rr_node,
const RRNodeId& from_rr_node,
t_rr_edge_info_set& rr_edges_to_create,
vtr::NdMatrix<int, 3>& Fc_ofs,
const int max_len,
@ -764,7 +768,7 @@ int get_unidir_opin_connections(const int chan,
int* inc_muxes = nullptr;
int* dec_muxes = nullptr;
int num_inc_muxes, num_dec_muxes, iconn;
int inc_inode_index, dec_inode_index;
RRNodeId inc_inode_index, dec_inode_index;
int inc_mux, dec_mux;
int inc_track, dec_track;
int x, y;
@ -808,10 +812,10 @@ int get_unidir_opin_connections(const int chan,
dec_track = dec_muxes[dec_mux];
/* Figure the inodes of those muxes */
inc_inode_index = get_rr_node_index(L_rr_node_indices, x, y, chan_type, inc_track);
dec_inode_index = get_rr_node_index(L_rr_node_indices, x, y, chan_type, dec_track);
inc_inode_index = get_rr_graph_node_index(L_rr_node_indices, x, y, chan_type, inc_track);
dec_inode_index = get_rr_graph_node_index(L_rr_node_indices, x, y, chan_type, dec_track);
if (inc_inode_index == OPEN || dec_inode_index == OPEN) {
if (inc_inode_index == RRNodeId::INVALID() || dec_inode_index == RRNodeId::INVALID()) {
continue;
}
@ -1023,6 +1027,36 @@ void dump_sblock_pattern(const t_sblock_pattern& sblock_pattern,
fclose(fp);
}
/********************************************************************
* Create all the nodes for routing tracks
* num_chans : number of routing channels to be created
* chan_len : maximum length of routing channels
*
* X-direction channel
* |<-------chan_len-------->|
* -----+--------------------------
* ^ |
* | |
* | |
* num_chans | FPGA
* | |
* | |
* v |
* -----+--------------------------
*
* Y-direction channel
* |<-------num_chans-------->|
* -----+--------------------------
* ^ |
* | |
* | |
* chan_len | FPGA
* | |
* | |
* v |
* -----+--------------------------
*
*******************************************************************/
static void load_chan_rr_indices(const int max_chan_width,
const int chan_len,
const int num_chans,
@ -1068,6 +1102,7 @@ static void load_chan_rr_indices(const int max_chan_width,
/* Assign inode of start of wire to current position */
indices[type][chan][seg][0][track] = inode;
}
}
}
@ -1097,8 +1132,6 @@ static void load_block_rr_indices(const DeviceGrid& grid,
}
++(*index);
}
VTR_ASSERT(indices[SOURCE][x][y][0].size() == size_t(type->num_class));
VTR_ASSERT(indices[SINK][x][y][0].size() == size_t(type->num_class));
//Assign indicies for IPINs and OPINs at all offsets from root
for (int ipin = 0; ipin < type->num_pins; ++ipin) {
@ -1120,6 +1153,7 @@ static void load_block_rr_indices(const DeviceGrid& grid,
indices[OPIN][x_tile][y_tile][side].push_back(OPEN);
}
++(*index);
} else {
indices[IPIN][x_tile][y_tile][side].push_back(OPEN);
indices[OPIN][x_tile][y_tile][side].push_back(OPEN);
@ -1128,7 +1162,6 @@ static void load_block_rr_indices(const DeviceGrid& grid,
}
}
}
//Sanity check
for (int width_offset = 0; width_offset < type->width; ++width_offset) {
int x_tile = x + width_offset;
@ -1150,7 +1183,11 @@ static void load_block_rr_indices(const DeviceGrid& grid,
for (size_t y = 0; y < grid.height(); y++) {
int width_offset = grid[x][y].width_offset;
int height_offset = grid[x][y].height_offset;
if (width_offset != 0 || height_offset != 0) {
/* We only care the largest offset in both x and y direction
* this can avoid runtime cost to rebuild the fast look-up inside RRGraph object
*/
if ( (grid[x][y].type->height == height_offset && grid[x][y].type->width == width_offset)
&& (0 != height_offset || 0 != width_offset) ) {
int root_x = x - width_offset;
int root_y = y - height_offset;
@ -1194,6 +1231,7 @@ t_rr_node_indices alloc_and_load_rr_node_indices(const int max_chan_width,
}
}
/* Assign indices for block nodes */
load_block_rr_indices(grid, indices, index);
@ -1219,6 +1257,44 @@ std::vector<int> get_rr_node_chan_wires_at_location(const t_rr_node_indices& L_r
return L_rr_node_indices[rr_type][x][y][SIDES[0]];
}
/********************************************************************
* A wrapper for the old function to return a vector of RRNodeId
*******************************************************************/
std::vector<RRNodeId> get_rr_graph_node_indices(const t_rr_node_indices& L_rr_node_indices,
int x,
int y,
t_rr_type rr_type,
int ptc) {
/*
* Like get_rr_node_index() but returns all matching nodes,
* rather than just the first. This is particularly useful for getting all instances
* of a specific IPIN/OPIN at a specific gird tile (x,y) location.
*/
std::vector<RRNodeId> indices;
if (rr_type == IPIN || rr_type == OPIN) {
//For pins we need to look at all the sides of the current grid tile
for (e_side side : SIDES) {
int rr_node_index = get_rr_node_index(L_rr_node_indices, x, y, rr_type, ptc, side);
if (rr_node_index >= 0) {
indices.push_back(RRNodeId(rr_node_index));
}
}
} else {
//Sides do not effect non-pins so there should only be one per ptc
int rr_node_index = get_rr_node_index(L_rr_node_indices, x, y, rr_type, ptc);
if (rr_node_index != OPEN) {
indices.push_back(RRNodeId(rr_node_index));
}
}
return indices;
}
std::vector<int> get_rr_node_indices(const t_rr_node_indices& L_rr_node_indices,
int x,
int y,
@ -1253,6 +1329,15 @@ std::vector<int> get_rr_node_indices(const t_rr_node_indices& L_rr_node_indices,
return indices;
}
RRNodeId get_rr_graph_node_index(const t_rr_node_indices& L_rr_node_indices,
int x,
int y,
t_rr_type rr_type,
int ptc,
e_side side) {
return RRNodeId(get_rr_node_index(L_rr_node_indices, x, y, rr_type, ptc, side));
}
int get_rr_node_index(const t_rr_node_indices& L_rr_node_indices,
int x,
int y,
@ -1343,29 +1428,29 @@ int get_rr_node_index(const t_rr_node_indices& L_rr_node_indices,
return ((unsigned)ptc < lookup.size() ? lookup[ptc] : -1);
}
int find_average_rr_node_index(int device_width,
int device_height,
t_rr_type rr_type,
int ptc,
const t_rr_node_indices& L_rr_node_indices) {
RRNodeId find_average_rr_node_index(int device_width,
int device_height,
t_rr_type rr_type,
int ptc,
const RRGraph& rr_graph) {
/* Find and return the index to a rr_node that is located at the "center" *
* of the current grid array, if possible. In the event the "center" of *
* the grid array is an EMPTY or IO node, then retry alterate locations. *
* Worst case, this function will simply return the 1st non-EMPTY and *
* non-IO node. */
int inode = get_rr_node_index(L_rr_node_indices, (device_width) / 2, (device_height) / 2,
rr_type, ptc);
RRNodeId inode = rr_graph.find_node((device_width) / 2, (device_height) / 2,
rr_type, ptc);
if (inode == OPEN) {
inode = get_rr_node_index(L_rr_node_indices, (device_width) / 4, (device_height) / 4,
rr_type, ptc);
if (inode == RRNodeId::INVALID()) {
inode = rr_graph.find_node((device_width) / 4, (device_height) / 4,
rr_type, ptc);
}
if (inode == OPEN) {
inode = get_rr_node_index(L_rr_node_indices, (device_width) / 4 * 3, (device_height) / 4 * 3,
rr_type, ptc);
if (inode == RRNodeId::INVALID()) {
inode = rr_graph.find_node((device_width) / 4 * 3, (device_height) / 4 * 3,
rr_type, ptc);
}
if (inode == OPEN) {
if (inode == RRNodeId::INVALID()) {
auto& device_ctx = g_vpr_ctx.device();
for (int x = 0; x < device_width; ++x) {
@ -1375,11 +1460,11 @@ int find_average_rr_node_index(int device_width,
if (is_io_type(device_ctx.grid[x][y].type))
continue;
inode = get_rr_node_index(L_rr_node_indices, x, y, rr_type, ptc);
if (inode != OPEN)
inode = rr_graph.find_node(x, y, rr_type, ptc);
if (inode != RRNodeId::INVALID())
break;
}
if (inode != OPEN)
if (inode != RRNodeId::INVALID())
break;
}
}
@ -1390,9 +1475,10 @@ int get_track_to_pins(int seg,
int chan,
int track,
int tracks_per_chan,
int from_rr_node,
const RRNodeId& from_rr_node,
t_rr_edge_info_set& rr_edges_to_create,
const t_rr_node_indices& L_rr_node_indices,
const RRGraph& rr_graph,
const t_track_to_pin_lookup& track_to_pin_lookup,
const t_chan_seg_details* seg_details,
enum e_rr_type chan_type,
@ -1450,8 +1536,8 @@ int get_track_to_pins(int seg,
/* Check there is a connection and Fc map isn't wrong */
/*int to_node = get_rr_node_index(L_rr_node_indices, x + width_offset, y + height_offset, IPIN, ipin, side);*/
int to_node = get_rr_node_index(L_rr_node_indices, x, y, IPIN, ipin, side);
if (to_node >= 0) {
RRNodeId to_node = RRNodeId(get_rr_node_index(L_rr_node_indices, x, y, IPIN, ipin, side));
if (rr_graph.valid_node_id(to_node)) {
rr_edges_to_create.emplace_back(from_rr_node, to_node, wire_to_ipin_switch);
++num_conn;
}
@ -1491,13 +1577,14 @@ int get_track_to_tracks(const int from_chan,
const DeviceGrid& grid,
const int Fs_per_side,
t_sblock_pattern& sblock_pattern,
const int from_rr_node,
const RRNodeId& from_rr_node,
t_rr_edge_info_set& rr_edges_to_create,
const t_chan_seg_details* from_seg_details,
const t_chan_seg_details* to_seg_details,
const t_chan_details& to_chan_details,
const enum e_directionality directionality,
const t_rr_node_indices& L_rr_node_indices,
const RRGraph& rr_graph,
const vtr::NdMatrix<std::vector<int>, 3>& switch_block_conn,
t_sb_connection_map* sb_conn_map) {
int to_chan, to_sb;
@ -1630,7 +1717,7 @@ int get_track_to_tracks(const int from_chan,
num_conn += get_track_to_chan_seg(from_track, to_chan, to_seg,
to_type, from_side_a, to_side,
switch_override,
L_rr_node_indices,
L_rr_node_indices, rr_graph,
sb_conn_map, from_rr_node, rr_edges_to_create);
}
} else {
@ -1639,7 +1726,7 @@ int get_track_to_tracks(const int from_chan,
* switchbox, so we follow through regardless of whether the current segment has an SB */
conn_tracks = switch_block_conn[from_side_a][to_side][from_track];
num_conn += get_bidir_track_to_chan_seg(conn_tracks,
L_rr_node_indices, to_chan, to_seg, to_sb, to_type,
L_rr_node_indices, rr_graph, to_chan, to_seg, to_sb, to_type,
to_seg_details, from_is_sblock, from_switch,
switch_override,
directionality, from_rr_node, rr_edges_to_create);
@ -1654,7 +1741,7 @@ int get_track_to_tracks(const int from_chan,
from_side_a, to_side, Fs_per_side,
sblock_pattern,
switch_override,
L_rr_node_indices, to_seg_details,
L_rr_node_indices, rr_graph, to_seg_details,
&Fs_clipped, from_rr_node, rr_edges_to_create);
}
}
@ -1669,7 +1756,7 @@ int get_track_to_tracks(const int from_chan,
num_conn += get_track_to_chan_seg(from_track, to_chan, to_seg,
to_type, from_side_b, to_side,
switch_override,
L_rr_node_indices,
L_rr_node_indices, rr_graph,
sb_conn_map, from_rr_node, rr_edges_to_create);
}
} else {
@ -1678,7 +1765,7 @@ int get_track_to_tracks(const int from_chan,
* switchbox, so we follow through regardless of whether the current segment has an SB */
conn_tracks = switch_block_conn[from_side_b][to_side][from_track];
num_conn += get_bidir_track_to_chan_seg(conn_tracks,
L_rr_node_indices, to_chan, to_seg, to_sb, to_type,
L_rr_node_indices, rr_graph, to_chan, to_seg, to_sb, to_type,
to_seg_details, from_is_sblock, from_switch,
switch_override,
directionality, from_rr_node, rr_edges_to_create);
@ -1694,7 +1781,7 @@ int get_track_to_tracks(const int from_chan,
from_side_b, to_side, Fs_per_side,
sblock_pattern,
switch_override,
L_rr_node_indices, to_seg_details,
L_rr_node_indices, rr_graph, to_seg_details,
&Fs_clipped, from_rr_node, rr_edges_to_create);
}
}
@ -1707,6 +1794,7 @@ int get_track_to_tracks(const int from_chan,
static int get_bidir_track_to_chan_seg(const std::vector<int> conn_tracks,
const t_rr_node_indices& L_rr_node_indices,
const RRGraph& rr_graph,
const int to_chan,
const int to_seg,
const int to_sb,
@ -1716,10 +1804,11 @@ static int get_bidir_track_to_chan_seg(const std::vector<int> conn_tracks,
const int from_switch,
const int switch_override,
const enum e_directionality directionality,
const int from_rr_node,
const RRNodeId& from_rr_node,
t_rr_edge_info_set& rr_edges_to_create) {
unsigned iconn;
int to_track, to_node, to_switch, num_conn, to_x, to_y, i;
int to_track, to_switch, num_conn, to_x, to_y, i;
RRNodeId to_node;
bool to_is_sblock;
short switch_types[2];
@ -1737,9 +1826,9 @@ static int get_bidir_track_to_chan_seg(const std::vector<int> conn_tracks,
num_conn = 0;
for (iconn = 0; iconn < conn_tracks.size(); ++iconn) {
to_track = conn_tracks[iconn];
to_node = get_rr_node_index(L_rr_node_indices, to_x, to_y, to_type, to_track);
to_node = get_rr_graph_node_index(L_rr_node_indices, to_x, to_y, to_type, to_track);
if (to_node == OPEN) {
if (false == rr_graph.valid_node_id(to_node)) {
continue;
}
@ -1781,8 +1870,9 @@ static int get_track_to_chan_seg(const int from_wire,
const e_side to_side,
const int switch_override,
const t_rr_node_indices& L_rr_node_indices,
const RRGraph& rr_graph,
t_sb_connection_map* sb_conn_map,
const int from_rr_node,
const RRNodeId& from_rr_node,
t_rr_edge_info_set& rr_edges_to_create) {
int edge_count = 0;
int to_x, to_y;
@ -1816,9 +1906,9 @@ static int get_track_to_chan_seg(const int from_wire,
if (conn_vector.at(iconn).from_wire != from_wire) continue;
int to_wire = conn_vector.at(iconn).to_wire;
int to_node = get_rr_node_index(L_rr_node_indices, to_x, to_y, to_chan_type, to_wire);
RRNodeId to_node = RRNodeId(get_rr_node_index(L_rr_node_indices, to_x, to_y, to_chan_type, to_wire));
if (to_node == OPEN) {
if (false == rr_graph.valid_node_id(to_node)) {
continue;
}
@ -1860,9 +1950,10 @@ static int get_unidir_track_to_chan_seg(const int from_track,
t_sblock_pattern& sblock_pattern,
const int switch_override,
const t_rr_node_indices& L_rr_node_indices,
const RRGraph& rr_graph,
const t_chan_seg_details* seg_details,
bool* Fs_clipped,
const int from_rr_node,
const RRNodeId& from_rr_node,
t_rr_edge_info_set& rr_edges_to_create) {
int num_labels = 0;
int* mux_labels = nullptr;
@ -1916,9 +2007,9 @@ static int get_unidir_track_to_chan_seg(const int from_track,
sblock_pattern[sb_x][sb_y][from_side][to_side][from_track][j + 1] = to_track;
}
int to_node = get_rr_node_index(L_rr_node_indices, to_x, to_y, to_type, to_track);
RRNodeId to_node = get_rr_graph_node_index(L_rr_node_indices, to_x, to_y, to_type, to_track);
if (to_node == OPEN) {
if (false == rr_graph.valid_node_id(to_node)) {
continue;
}

View File

@ -15,21 +15,29 @@ enum e_seg_details_type {
};
struct t_rr_edge_info {
t_rr_edge_info(int from, int to, short type) noexcept
t_rr_edge_info(RRNodeId from, RRNodeId to, short type) noexcept
: from_node(from)
, to_node(to)
, switch_type(type) {}
int from_node = OPEN;
int to_node = OPEN;
RRNodeId from_node = RRNodeId::INVALID();
RRNodeId to_node = RRNodeId::INVALID();
short switch_type = OPEN;
friend bool operator<(const t_rr_edge_info& lhs, const t_rr_edge_info& rhs) {
return std::tie(lhs.from_node, lhs.to_node, lhs.switch_type) < std::tie(rhs.from_node, rhs.to_node, rhs.switch_type);
size_t lhs_from_node = size_t(lhs.from_node);
size_t lhs_to_node = size_t(lhs.to_node);
size_t rhs_from_node = size_t(rhs.from_node);
size_t rhs_to_node = size_t(rhs.to_node);
return std::tie(lhs_from_node, lhs_to_node, lhs.switch_type) < std::tie(rhs_from_node, rhs_to_node, rhs.switch_type);
}
friend bool operator==(const t_rr_edge_info& lhs, const t_rr_edge_info& rhs) {
return std::tie(lhs.from_node, lhs.to_node, lhs.switch_type) == std::tie(rhs.from_node, rhs.to_node, rhs.switch_type);
size_t lhs_from_node = size_t(lhs.from_node);
size_t lhs_to_node = size_t(lhs.to_node);
size_t rhs_from_node = size_t(rhs.from_node);
size_t rhs_to_node = size_t(rhs.to_node);
return std::tie(lhs_from_node, lhs_to_node, lhs.switch_type) == std::tie(rhs_from_node, rhs_to_node, rhs.switch_type);
}
};
@ -51,7 +59,14 @@ int get_rr_node_index(int x,
int ptc,
const t_rr_node_indices& L_rr_node_indices);
//Returns all the rr nodes associated with the specified coordinate (i.e. accross sides)
std::vector<RRNodeId> get_rr_graph_node_indices(const t_rr_node_indices& L_rr_node_indices,
int x,
int y,
t_rr_type rr_type,
int ptc);
std::vector<int> get_rr_node_indices(const t_rr_node_indices& L_rr_node_indices,
int x,
int y,
@ -66,6 +81,13 @@ std::vector<int> get_rr_node_chan_wires_at_location(const t_rr_node_indices& L_r
//Return the first rr node of the specified type and coordinates
// For non-IPIN/OPIN types 'side' is ignored
RRNodeId get_rr_graph_node_index(const t_rr_node_indices& L_rr_node_indices,
int x,
int y,
t_rr_type rr_type,
int ptc,
e_side side = NUM_SIDES);
int get_rr_node_index(const t_rr_node_indices& L_rr_node_indices,
int x,
int y,
@ -73,11 +95,11 @@ int get_rr_node_index(const t_rr_node_indices& L_rr_node_indices,
int ptc,
e_side side = NUM_SIDES);
int find_average_rr_node_index(int device_width,
int device_height,
t_rr_type rr_type,
int ptc,
const t_rr_node_indices& L_rr_node_indices);
RRNodeId find_average_rr_node_index(int device_width,
int device_height,
t_rr_type rr_type,
int ptc,
const RRGraph& rr_graph);
t_seg_details* alloc_and_load_seg_details(int* max_chan_width,
const int max_len,
@ -145,7 +167,7 @@ bool is_sblock(const int chan,
int get_bidir_opin_connections(const int i,
const int j,
const int ipin,
const int from_rr_node,
const RRNodeId& from_rr_node,
t_rr_edge_info_set& rr_edges_to_create,
const t_pin_to_track_lookup& opin_to_track_map,
const t_rr_node_indices& L_rr_node_indices,
@ -158,7 +180,7 @@ int get_unidir_opin_connections(const int chan,
const int seg_type_index,
const t_rr_type chan_type,
const t_chan_seg_details* seg_details,
const int from_rr_node,
const RRNodeId& from_rr_node,
t_rr_edge_info_set& rr_edges_to_create,
vtr::NdMatrix<int, 3>& Fc_ofs,
const int max_len,
@ -170,9 +192,10 @@ int get_track_to_pins(int seg,
int chan,
int track,
int tracks_per_chan,
int from_rr_node,
const RRNodeId& from_rr_node,
t_rr_edge_info_set& rr_edges_to_create,
const t_rr_node_indices& L_rr_node_indices,
const RRGraph& rr_graph,
const t_track_to_pin_lookup& track_to_pin_lookup,
const t_chan_seg_details* seg_details,
enum e_rr_type chan_type,
@ -191,13 +214,14 @@ int get_track_to_tracks(const int from_chan,
const DeviceGrid& grid,
const int Fs_per_side,
t_sblock_pattern& sblock_pattern,
const int from_rr_node,
const RRNodeId& from_rr_node,
t_rr_edge_info_set& rr_edges_to_create,
const t_chan_seg_details* from_seg_details,
const t_chan_seg_details* to_seg_details,
const t_chan_details& to_chan_details,
const enum e_directionality directionality,
const t_rr_node_indices& L_rr_node_indices,
const RRGraph& rr_graph,
const vtr::NdMatrix<std::vector<int>, 3>& switch_block_conn,
t_sb_connection_map* sb_conn_map);

View File

@ -33,7 +33,7 @@ static void count_unidir_routing_transistors(std::vector<t_segment_inf>& segment
float R_minW_pmos,
const float trans_sram_bit);
static float get_cblock_trans(int* num_inputs_to_cblock, int wire_to_ipin_switch, int max_inputs_to_cblock, float trans_sram_bit);
static float get_cblock_trans(const vtr::vector<RRNodeId, int>& num_inputs_to_cblock, int wire_to_ipin_switch, int max_inputs_to_cblock, float trans_sram_bit);
static float* alloc_and_load_unsharable_switch_trans(int num_switch,
float trans_sram_bit,
@ -101,7 +101,7 @@ void count_bidir_routing_transistors(int num_switch, int wire_to_ipin_switch, fl
* optimistic (but I still think it's pretty reasonable). */
auto& device_ctx = g_vpr_ctx.device();
int* num_inputs_to_cblock; /* [0..device_ctx.rr_nodes.size()-1], but all entries not */
vtr::vector<RRNodeId, int> num_inputs_to_cblock; /* [0..device_ctx.rr_nodes.size()-1], but all entries not */
/* corresponding to IPINs will be 0. */
@ -110,7 +110,7 @@ void count_bidir_routing_transistors(int num_switch, int wire_to_ipin_switch, fl
float *unsharable_switch_trans, *sharable_switch_trans; /* [0..num_switch-1] */
t_rr_type from_rr_type, to_rr_type;
int iedge, num_edges, maxlen;
int maxlen;
int iswitch, i, j, iseg, max_inputs_to_cblock;
float input_cblock_trans, shared_opin_buffer_trans;
@ -144,7 +144,7 @@ void count_bidir_routing_transistors(int num_switch, int wire_to_ipin_switch, fl
trans_track_to_cblock_buf = 0;
}
num_inputs_to_cblock = (int*)vtr::calloc(device_ctx.rr_nodes.size(), sizeof(int));
num_inputs_to_cblock.resize(device_ctx.rr_graph.nodes().size(), 0);
maxlen = std::max(device_ctx.grid.width(), device_ctx.grid.height());
cblock_counted = (bool*)vtr::calloc(maxlen, sizeof(bool));
@ -156,29 +156,27 @@ void count_bidir_routing_transistors(int num_switch, int wire_to_ipin_switch, fl
sharable_switch_trans = alloc_and_load_sharable_switch_trans(num_switch,
R_minW_nmos, R_minW_pmos);
for (size_t from_node = 0; from_node < device_ctx.rr_nodes.size(); from_node++) {
from_rr_type = device_ctx.rr_nodes[from_node].type();
for (const RRNodeId& from_node : device_ctx.rr_graph.nodes()) {
from_rr_type = device_ctx.rr_graph.node_type(from_node);
switch (from_rr_type) {
case CHANX:
case CHANY:
num_edges = device_ctx.rr_nodes[from_node].num_edges();
for (iedge = 0; iedge < num_edges; iedge++) {
size_t to_node = device_ctx.rr_nodes[from_node].edge_sink_node(iedge);
to_rr_type = device_ctx.rr_nodes[to_node].type();
for (const RREdgeId& iedge : device_ctx.rr_graph.node_out_edges(from_node)) {
RRNodeId to_node = device_ctx.rr_graph.edge_sink_node(iedge);
to_rr_type = device_ctx.rr_graph.node_type(to_node);
/* Ignore any uninitialized rr_graph nodes */
if ((device_ctx.rr_nodes[to_node].type() == SOURCE)
&& (device_ctx.rr_nodes[to_node].xlow() == 0) && (device_ctx.rr_nodes[to_node].ylow() == 0)
&& (device_ctx.rr_nodes[to_node].xhigh() == 0) && (device_ctx.rr_nodes[to_node].yhigh() == 0)) {
if ((device_ctx.rr_graph.node_type(to_node) == SOURCE)
&& (device_ctx.rr_graph.node_xlow(to_node) == 0) && (device_ctx.rr_graph.node_ylow(to_node) == 0)
&& (device_ctx.rr_graph.node_xhigh(to_node) == 0) && (device_ctx.rr_graph.node_yhigh(to_node) == 0)) {
continue;
}
switch (to_rr_type) {
case CHANX:
case CHANY:
iswitch = device_ctx.rr_nodes[from_node].edge_switch(iedge);
iswitch = (short)size_t(device_ctx.rr_graph.edge_switch(iedge));
if (device_ctx.rr_switch_inf[iswitch].buffered()) {
iseg = seg_index_of_sblock(from_node, to_node);
@ -214,8 +212,8 @@ void count_bidir_routing_transistors(int num_switch, int wire_to_ipin_switch, fl
default:
VPR_ERROR(VPR_ERROR_ROUTE,
"in count_routing_transistors:\n"
"\tUnexpected connection from node %d (type %s) to node %d (type %s).\n",
from_node, rr_node_typename[from_rr_type], to_node, rr_node_typename[to_rr_type]);
"\tUnexpected connection from node %ld (type %s) to node %ld (type %s).\n",
size_t(from_node), rr_node_typename[from_rr_type], size_t(to_node), rr_node_typename[to_rr_type]);
break;
} /* End switch on to_rr_type. */
@ -225,35 +223,34 @@ void count_bidir_routing_transistors(int num_switch, int wire_to_ipin_switch, fl
/* Now add in the shared buffer transistors, and reset some flags. */
if (from_rr_type == CHANX) {
for (i = device_ctx.rr_nodes[from_node].xlow() - 1;
i <= device_ctx.rr_nodes[from_node].xhigh(); i++) {
for (i = device_ctx.rr_graph.node_xlow(from_node) - 1;
i <= device_ctx.rr_graph.node_xhigh(from_node); i++) {
ntrans_sharing += shared_buffer_trans[i];
shared_buffer_trans[i] = 0.;
}
for (i = device_ctx.rr_nodes[from_node].xlow(); i <= device_ctx.rr_nodes[from_node].xhigh();
for (i = device_ctx.rr_graph.node_xlow(from_node); i <= device_ctx.rr_graph.node_xhigh(from_node);
i++)
cblock_counted[i] = false;
} else { /* CHANY */
for (j = device_ctx.rr_nodes[from_node].ylow() - 1;
j <= device_ctx.rr_nodes[from_node].yhigh(); j++) {
for (j = device_ctx.rr_graph.node_ylow(from_node) - 1;
j <= device_ctx.rr_graph.node_yhigh(from_node); j++) {
ntrans_sharing += shared_buffer_trans[j];
shared_buffer_trans[j] = 0.;
}
for (j = device_ctx.rr_nodes[from_node].ylow(); j <= device_ctx.rr_nodes[from_node].yhigh();
for (j = device_ctx.rr_graph.node_ylow(from_node); j <= device_ctx.rr_graph.node_yhigh(from_node);
j++)
cblock_counted[j] = false;
}
break;
case OPIN:
num_edges = device_ctx.rr_nodes[from_node].num_edges();
shared_opin_buffer_trans = 0.;
for (iedge = 0; iedge < num_edges; iedge++) {
iswitch = device_ctx.rr_nodes[from_node].edge_switch(iedge);
for (const RREdgeId& iedge : device_ctx.rr_graph.node_out_edges(from_node)) {
iswitch = (short)size_t(device_ctx.rr_graph.edge_switch(iedge));
ntrans_no_sharing += unsharable_switch_trans[iswitch]
+ sharable_switch_trans[iswitch];
ntrans_sharing += unsharable_switch_trans[iswitch];
@ -281,7 +278,7 @@ void count_bidir_routing_transistors(int num_switch, int wire_to_ipin_switch, fl
input_cblock_trans = get_cblock_trans(num_inputs_to_cblock, wire_to_ipin_switch,
max_inputs_to_cblock, trans_sram_bit);
free(num_inputs_to_cblock);
num_inputs_to_cblock.clear();
ntrans_sharing += input_cblock_trans;
ntrans_no_sharing += input_cblock_trans;
@ -303,12 +300,12 @@ void count_unidir_routing_transistors(std::vector<t_segment_inf>& /*segment_inf*
auto& device_ctx = g_vpr_ctx.device();
bool* cblock_counted; /* [0..max(device_ctx.grid.width(),device_ctx.grid.height())] -- 0th element unused. */
int* num_inputs_to_cblock; /* [0..device_ctx.rr_nodes.size()-1], but all entries not */
vtr::vector<RRNodeId, int> num_inputs_to_cblock; /* [0..device_ctx.rr_nodes.size()-1], but all entries not */
/* corresponding to IPINs will be 0. */
t_rr_type from_rr_type, to_rr_type;
int i, j, iseg, to_node, iedge, num_edges, maxlen;
int i, j, iseg, maxlen;
int max_inputs_to_cblock;
float input_cblock_trans;
@ -317,8 +314,7 @@ void count_unidir_routing_transistors(std::vector<t_segment_inf>& /*segment_inf*
* a single mux. We should count this mux only once as we look at the outgoing
* switches of all rr nodes. Thus we keep track of which muxes we have already
* counted via the variable below. */
bool* chan_node_switch_done;
chan_node_switch_done = (bool*)vtr::calloc(device_ctx.rr_nodes.size(), sizeof(bool));
vtr::vector<RRNodeId, bool> chan_node_switch_done(device_ctx.rr_graph.nodes().size(), false);
/* The variable below is an accumulator variable that will add up all the *
* transistors in the routing. Make double so that it doesn't stop *
@ -348,28 +344,26 @@ void count_unidir_routing_transistors(std::vector<t_segment_inf>& /*segment_inf*
trans_track_to_cblock_buf = 0;
}
num_inputs_to_cblock = (int*)vtr::calloc(device_ctx.rr_nodes.size(), sizeof(int));
num_inputs_to_cblock.resize(device_ctx.rr_graph.nodes().size(), 0);
maxlen = std::max(device_ctx.grid.width(), device_ctx.grid.height());
cblock_counted = (bool*)vtr::calloc(maxlen, sizeof(bool));
ntrans = 0;
for (size_t from_node = 0; from_node < device_ctx.rr_nodes.size(); from_node++) {
from_rr_type = device_ctx.rr_nodes[from_node].type();
for (const RRNodeId& from_node : device_ctx.rr_graph.nodes()) {
from_rr_type = device_ctx.rr_graph.node_type(from_node);
switch (from_rr_type) {
case CHANX:
case CHANY:
num_edges = device_ctx.rr_nodes[from_node].num_edges();
/* Increment number of inputs per cblock if IPIN */
for (iedge = 0; iedge < num_edges; iedge++) {
to_node = device_ctx.rr_nodes[from_node].edge_sink_node(iedge);
to_rr_type = device_ctx.rr_nodes[to_node].type();
for (const RREdgeId& iedge : device_ctx.rr_graph.node_out_edges(from_node)) {
RRNodeId to_node = device_ctx.rr_graph.edge_sink_node(iedge);
to_rr_type = device_ctx.rr_graph.node_type(to_node);
/* Ignore any uninitialized rr_graph nodes */
if ((device_ctx.rr_nodes[to_node].type() == SOURCE)
&& (device_ctx.rr_nodes[to_node].xlow() == 0) && (device_ctx.rr_nodes[to_node].ylow() == 0)
&& (device_ctx.rr_nodes[to_node].xhigh() == 0) && (device_ctx.rr_nodes[to_node].yhigh() == 0)) {
if ((device_ctx.rr_graph.node_type(to_node) == SOURCE)
&& (device_ctx.rr_graph.node_xlow(to_node) == 0) && (device_ctx.rr_graph.node_ylow(to_node) == 0)
&& (device_ctx.rr_graph.node_xhigh(to_node) == 0) && (device_ctx.rr_graph.node_yhigh(to_node) == 0)) {
continue;
}
@ -377,10 +371,10 @@ void count_unidir_routing_transistors(std::vector<t_segment_inf>& /*segment_inf*
case CHANX:
case CHANY:
if (!chan_node_switch_done[to_node]) {
int switch_index = device_ctx.rr_nodes[from_node].edge_switch(iedge);
int switch_index = (int)size_t(device_ctx.rr_graph.edge_switch(iedge));
auto switch_type = device_ctx.rr_switch_inf[switch_index].type();
int fan_in = device_ctx.rr_nodes[to_node].fan_in();
int fan_in = device_ctx.rr_graph.node_in_edges(to_node).size();
if (device_ctx.rr_switch_inf[switch_index].type() == SwitchType::MUX) {
/* Each wire segment begins with a multipexer followed by a driver for unidirectional */
@ -434,8 +428,8 @@ void count_unidir_routing_transistors(std::vector<t_segment_inf>& /*segment_inf*
default:
VPR_ERROR(VPR_ERROR_ROUTE,
"in count_routing_transistors:\n"
"\tUnexpected connection from node %d (type %d) to node %d (type %d).\n",
from_node, from_rr_type, to_node, to_rr_type);
"\tUnexpected connection from node %ld (type %d) to node %ld (type %d).\n",
size_t(from_node), from_rr_type, size_t(to_node), to_rr_type);
break;
} /* End switch on to_rr_type. */
@ -444,11 +438,11 @@ void count_unidir_routing_transistors(std::vector<t_segment_inf>& /*segment_inf*
/* Reset some flags */
if (from_rr_type == CHANX) {
for (i = device_ctx.rr_nodes[from_node].xlow(); i <= device_ctx.rr_nodes[from_node].xhigh(); i++)
for (i = device_ctx.rr_graph.node_xlow(from_node); i <= device_ctx.rr_graph.node_xhigh(from_node); i++)
cblock_counted[i] = false;
} else { /* CHANY */
for (j = device_ctx.rr_nodes[from_node].ylow(); j <= device_ctx.rr_nodes[from_node].yhigh();
for (j = device_ctx.rr_graph.node_ylow(from_node); j <= device_ctx.rr_graph.node_yhigh(from_node);
j++)
cblock_counted[j] = false;
}
@ -468,8 +462,8 @@ void count_unidir_routing_transistors(std::vector<t_segment_inf>& /*segment_inf*
max_inputs_to_cblock, trans_sram_bit);
free(cblock_counted);
free(num_inputs_to_cblock);
free(chan_node_switch_done);
num_inputs_to_cblock.clear();
chan_node_switch_done.clear();
ntrans += input_cblock_trans;
@ -478,7 +472,7 @@ void count_unidir_routing_transistors(std::vector<t_segment_inf>& /*segment_inf*
VTR_LOG("\tTotal routing area: %#g, per logic tile: %#g\n", ntrans, ntrans / (float)(device_ctx.grid.width() * device_ctx.grid.height()));
}
static float get_cblock_trans(int* num_inputs_to_cblock, int wire_to_ipin_switch, int max_inputs_to_cblock, float trans_sram_bit) {
static float get_cblock_trans(const vtr::vector<RRNodeId, int>& num_inputs_to_cblock, int wire_to_ipin_switch, int max_inputs_to_cblock, float trans_sram_bit) {
/* Computes the transistors in the input connection block multiplexers and *
* the buffers from connection block outputs to the logic block input pins. *
* For speed, I precompute the number of transistors in the multiplexers of *
@ -506,7 +500,7 @@ static float get_cblock_trans(int* num_inputs_to_cblock, int wire_to_ipin_switch
trans_count = 0.;
for (size_t i = 0; i < device_ctx.rr_nodes.size(); i++) {
for (const RRNodeId& i : device_ctx.rr_graph.nodes()) {
num_inputs = num_inputs_to_cblock[i];
trans_count += trans_per_cblock[num_inputs];
}

View File

@ -24,22 +24,20 @@ void ClockRRGraphBuilder::create_and_append_clock_rr_graph(std::vector<t_segment
auto& clock_networks = device_ctx.clock_networks;
auto& clock_routing = device_ctx.clock_connections;
size_t clock_nodes_start_idx = device_ctx.rr_nodes.size();
size_t clock_nodes_start_idx = device_ctx.rr_graph.nodes().size();
ClockRRGraphBuilder clock_graph = ClockRRGraphBuilder();
clock_graph.create_clock_networks_wires(clock_networks, segment_inf.size());
clock_graph.create_clock_networks_switches(clock_routing);
// Reset fanin to account for newly added clock rr_nodes
init_fan_in(device_ctx.rr_nodes, device_ctx.rr_nodes.size());
clock_graph.add_rr_switches_and_map_to_nodes(clock_nodes_start_idx, R_minW_nmos, R_minW_pmos);
// "Partition the rr graph edges for efficient access to configurable/non-configurable
// edge subsets. Must be done after RR switches have been allocated"
partition_rr_graph_edges(device_ctx);
device_ctx.rr_graph.rebuild_node_edges();
alloc_and_load_rr_indexed_data(segment_inf, device_ctx.rr_node_indices,
alloc_and_load_rr_indexed_data(segment_inf, device_ctx.rr_graph,
chan_width->max, wire_to_rr_ipin_switch, base_cost_type);
float elapsed_time = (float)(clock() - begin) / CLOCKS_PER_SEC;
@ -55,8 +53,8 @@ void ClockRRGraphBuilder::create_clock_networks_wires(std::vector<std::unique_pt
}
// Reduce the capacity of rr_nodes for performance
auto& rr_nodes = g_vpr_ctx.mutable_device().rr_nodes;
rr_nodes.shrink_to_fit();
auto& rr_graph = g_vpr_ctx.mutable_device().rr_graph;
rr_graph.compress();
}
// Clock switch information comes from the arch file
@ -70,18 +68,18 @@ void ClockRRGraphBuilder::add_rr_switches_and_map_to_nodes(size_t node_start_idx
const float R_minW_nmos,
const float R_minW_pmos) {
auto& device_ctx = g_vpr_ctx.mutable_device();
auto& rr_nodes = device_ctx.rr_nodes;
auto& rr_graph = device_ctx.rr_graph;
// Check to see that clock nodes were sucessfully appended to rr_nodes
VTR_ASSERT(rr_nodes.size() > node_start_idx);
VTR_ASSERT(true == rr_graph.valid_node_id(RRNodeId(node_start_idx)));
std::unordered_map<int, int> arch_switch_to_rr_switch;
// The following assumes that arch_switch was specified earlier when the edges where added
for (size_t node_idx = node_start_idx; node_idx < rr_nodes.size(); node_idx++) {
auto& from_node = rr_nodes[node_idx];
for (t_edge_size edge_idx = 0; edge_idx < from_node.num_edges(); edge_idx++) {
int arch_switch_idx = from_node.edge_switch(edge_idx);
for (size_t node_idx = node_start_idx; node_idx < rr_graph.nodes().size(); node_idx++) {
const RRNodeId& from_node = RRNodeId(node_idx);
for (const RREdgeId& edge_idx : rr_graph.node_out_edges(from_node)) {
int arch_switch_idx = (int)size_t(rr_graph.edge_switch(edge_idx));
int rr_switch_idx;
auto itter = arch_switch_to_rr_switch.find(arch_switch_idx);
@ -94,7 +92,7 @@ void ClockRRGraphBuilder::add_rr_switches_and_map_to_nodes(size_t node_start_idx
rr_switch_idx = itter->second;
}
from_node.set_edge_switch(edge_idx, rr_switch_idx);
rr_graph.set_edge_switch(edge_idx, RRSwitchId(rr_switch_idx));
}
}
@ -125,17 +123,17 @@ void ClockRRGraphBuilder::add_switch_location(std::string clock_name,
std::string switch_point_name,
int x,
int y,
int node_index) {
const RRNodeId& node_index) {
// Note use of operator[] will automatically insert clock name if it doesn't exist
clock_name_to_switch_points[clock_name].insert_switch_node_idx(switch_point_name, x, y, node_index);
}
void SwitchPoints::insert_switch_node_idx(std::string switch_point_name, int x, int y, int node_idx) {
void SwitchPoints::insert_switch_node_idx(std::string switch_point_name, int x, int y, const RRNodeId& node_idx) {
// Note use of operator[] will automatically insert switch name if it doesn't exit
switch_point_name_to_switch_location[switch_point_name].insert_node_idx(x, y, node_idx);
}
void SwitchPoint::insert_node_idx(int x, int y, int node_idx) {
void SwitchPoint::insert_node_idx(int x, int y, const RRNodeId& node_idx) {
// allocate 2d vector of grid size
if (rr_node_indices.empty()) {
auto& grid = g_vpr_ctx.device().grid;
@ -150,7 +148,7 @@ void SwitchPoint::insert_node_idx(int x, int y, int node_idx) {
locations.insert({x, y});
}
std::vector<int> ClockRRGraphBuilder::get_rr_node_indices_at_switch_location(std::string clock_name,
std::vector<RRNodeId> ClockRRGraphBuilder::get_rr_node_indices_at_switch_location(std::string clock_name,
std::string switch_point_name,
int x,
int y) const {
@ -163,7 +161,7 @@ std::vector<int> ClockRRGraphBuilder::get_rr_node_indices_at_switch_location(std
return switch_points.get_rr_node_indices_at_location(switch_point_name, x, y);
}
std::vector<int> SwitchPoints::get_rr_node_indices_at_location(std::string switch_point_name,
std::vector<RRNodeId> SwitchPoints::get_rr_node_indices_at_location(std::string switch_point_name,
int x,
int y) const {
auto itter = switch_point_name_to_switch_location.find(switch_point_name);
@ -172,11 +170,11 @@ std::vector<int> SwitchPoints::get_rr_node_indices_at_location(std::string switc
VTR_ASSERT(itter != switch_point_name_to_switch_location.end());
auto& switch_point = itter->second;
std::vector<int> rr_node_indices = switch_point.get_rr_node_indices_at_location(x, y);
std::vector<RRNodeId> rr_node_indices = switch_point.get_rr_node_indices_at_location(x, y);
return rr_node_indices;
}
std::vector<int> SwitchPoint::get_rr_node_indices_at_location(int x, int y) const {
std::vector<RRNodeId> SwitchPoint::get_rr_node_indices_at_location(int x, int y) const {
// assert that switch is connected to nodes at the location
VTR_ASSERT(!rr_node_indices[x][y].empty());

View File

@ -13,6 +13,8 @@
#include "clock_network_builders.h"
#include "clock_connection_builders.h"
#include "rr_graph_fwd.h"
class ClockNetwork;
class ClockConnection;
@ -23,18 +25,18 @@ class SwitchPoint {
* Examples of SwitchPoint(s) are rib-to-spine, driver-to-spine. */
public:
// [grid_width][grid_height][0..nodes_at_this_location-1]
std::vector<std::vector<std::vector<int>>> rr_node_indices;
std::vector<std::vector<std::vector<RRNodeId>>> rr_node_indices;
// Set of all the locations for this switch point. Used to quickly find
// if the switch point exists at a certian location.
std::set<std::pair<int, int>> locations; // x,y
public:
/** Getters **/
std::vector<int> get_rr_node_indices_at_location(int x, int y) const;
std::vector<RRNodeId> get_rr_node_indices_at_location(int x, int y) const;
std::set<std::pair<int, int>> get_switch_locations() const;
/** Setters **/
void insert_node_idx(int x, int y, int node_idx);
void insert_node_idx(int x, int y, const RRNodeId& node_idx);
};
class SwitchPoints {
@ -50,14 +52,14 @@ class SwitchPoints {
/* Example: x,y = middle of the chip, switch_point_name == name of main drive
* of global clock spine, returns the rr_nodes of all the clock spines that
* start the newtork there*/
std::vector<int> get_rr_node_indices_at_location(std::string switch_point_name,
std::vector<RRNodeId> get_rr_node_indices_at_location(std::string switch_point_name,
int x,
int y) const;
std::set<std::pair<int, int>> get_switch_locations(std::string switch_point_name) const;
/** Setters **/
void insert_switch_node_idx(std::string switch_point_name, int x, int y, int node_idx);
void insert_switch_node_idx(std::string switch_point_name, int x, int y, const RRNodeId& node_idx);
};
class ClockRRGraphBuilder {
@ -78,10 +80,10 @@ class ClockRRGraphBuilder {
std::string switch_point_name,
int x,
int y,
int node_index);
const RRNodeId& node_index);
/* Returns the rr_node idx of the switch at location {x, y} */
std::vector<int> get_rr_node_indices_at_switch_location(std::string clock_name,
std::vector<RRNodeId> get_rr_node_indices_at_switch_location(std::string clock_name,
std::string switch_point_name,
int x,
int y) const;

View File

@ -16,17 +16,17 @@
/******************* Subroutines local to this module ************************/
static void load_rr_indexed_data_base_costs(int nodes_per_chan,
const t_rr_node_indices& L_rr_node_indices,
const RRGraph& rr_graph,
enum e_base_cost_type base_cost_type);
static float get_delay_normalization_fac(int nodes_per_chan,
const t_rr_node_indices& L_rr_node_indices);
const RRGraph& rr_graph);
static void load_rr_indexed_data_T_values(int index_start,
int num_indices_to_load,
t_rr_type rr_type,
int nodes_per_chan,
const t_rr_node_indices& L_rr_node_indices);
const RRGraph& rr_graph);
static void fixup_rr_indexed_data_T_values(size_t num_segment);
@ -48,7 +48,7 @@ static std::vector<size_t> count_rr_segment_types();
* x-channel its own cost_index, and each segment type in a y-channel its *
* own cost_index. */
void alloc_and_load_rr_indexed_data(const std::vector<t_segment_inf>& segment_inf,
const t_rr_node_indices& L_rr_node_indices,
const RRGraph& rr_graph,
const int nodes_per_chan,
int wire_to_ipin_switch,
enum e_base_cost_type base_cost_type) {
@ -93,7 +93,7 @@ void alloc_and_load_rr_indexed_data(const std::vector<t_segment_inf>& segment_in
device_ctx.rr_indexed_data[index].seg_index = iseg;
}
load_rr_indexed_data_T_values(CHANX_COST_INDEX_START, num_segment, CHANX,
nodes_per_chan, L_rr_node_indices);
nodes_per_chan, rr_graph);
/* Y-directed segments. */
for (iseg = 0; iseg < num_segment; iseg++) {
@ -114,11 +114,11 @@ void alloc_and_load_rr_indexed_data(const std::vector<t_segment_inf>& segment_in
device_ctx.rr_indexed_data[index].seg_index = iseg;
}
load_rr_indexed_data_T_values((CHANX_COST_INDEX_START + num_segment),
num_segment, CHANY, nodes_per_chan, L_rr_node_indices);
num_segment, CHANY, nodes_per_chan, rr_graph);
fixup_rr_indexed_data_T_values(num_segment);
load_rr_indexed_data_base_costs(nodes_per_chan, L_rr_node_indices,
load_rr_indexed_data_base_costs(nodes_per_chan, rr_graph,
base_cost_type);
}
@ -143,7 +143,7 @@ void load_rr_index_segments(const int num_segment) {
}
static void load_rr_indexed_data_base_costs(int nodes_per_chan,
const t_rr_node_indices& L_rr_node_indices,
const RRGraph& rr_graph,
enum e_base_cost_type base_cost_type) {
/* Loads the base_cost member of device_ctx.rr_indexed_data according to the specified *
* base_cost_type. */
@ -156,7 +156,7 @@ static void load_rr_indexed_data_base_costs(int nodes_per_chan,
if (base_cost_type == DEMAND_ONLY || base_cost_type == DEMAND_ONLY_NORMALIZED_LENGTH) {
delay_normalization_fac = 1.;
} else {
delay_normalization_fac = get_delay_normalization_fac(nodes_per_chan, L_rr_node_indices);
delay_normalization_fac = get_delay_normalization_fac(nodes_per_chan, rr_graph);
}
device_ctx.rr_indexed_data[SOURCE_COST_INDEX].base_cost = delay_normalization_fac;
@ -219,10 +219,10 @@ static std::vector<size_t> count_rr_segment_types() {
auto& device_ctx = g_vpr_ctx.device();
for (size_t inode = 0; inode < device_ctx.rr_nodes.size(); ++inode) {
if (device_ctx.rr_nodes[inode].type() != CHANX && device_ctx.rr_nodes[inode].type() != CHANY) continue;
for (const RRNodeId& inode : device_ctx.rr_graph.nodes()) {
if (device_ctx.rr_graph.node_type(inode) != CHANX && device_ctx.rr_graph.node_type(inode) != CHANY) continue;
int cost_index = device_ctx.rr_nodes[inode].cost_index();
int cost_index = device_ctx.rr_graph.node_cost_index(inode);
int seg_index = device_ctx.rr_indexed_data[cost_index].seg_index;
@ -240,12 +240,13 @@ static std::vector<size_t> count_rr_segment_types() {
}
static float get_delay_normalization_fac(int nodes_per_chan,
const t_rr_node_indices& L_rr_node_indices) {
const RRGraph& rr_graph) {
/* Returns the average delay to go 1 CLB distance along a wire. */
const int clb_dist = 3; /* Number of CLBs I think the average conn. goes. */
int inode, itrack, cost_index;
RRNodeId inode;
int itrack, cost_index;
float Tdel, Tdel_sum, frac_num_seg;
auto& device_ctx = g_vpr_ctx.device();
@ -254,10 +255,10 @@ static float get_delay_normalization_fac(int nodes_per_chan,
for (itrack = 0; itrack < nodes_per_chan; itrack++) {
inode = find_average_rr_node_index(device_ctx.grid.width(), device_ctx.grid.height(), CHANX, itrack,
L_rr_node_indices);
if (inode == -1)
rr_graph);
if (inode == RRNodeId::INVALID())
continue;
cost_index = device_ctx.rr_nodes[inode].cost_index();
cost_index = device_ctx.rr_graph.node_cost_index(inode);
frac_num_seg = clb_dist * device_ctx.rr_indexed_data[cost_index].inv_length;
Tdel = frac_num_seg * device_ctx.rr_indexed_data[cost_index].T_linear
+ frac_num_seg * frac_num_seg
@ -267,10 +268,10 @@ static float get_delay_normalization_fac(int nodes_per_chan,
for (itrack = 0; itrack < nodes_per_chan; itrack++) {
inode = find_average_rr_node_index(device_ctx.grid.width(), device_ctx.grid.height(), CHANY, itrack,
L_rr_node_indices);
if (inode == -1)
rr_graph);
if (inode == RRNodeId::INVALID())
continue;
cost_index = device_ctx.rr_nodes[inode].cost_index();
cost_index = device_ctx.rr_graph.node_cost_index(inode);
frac_num_seg = clb_dist * device_ctx.rr_indexed_data[cost_index].inv_length;
Tdel = frac_num_seg * device_ctx.rr_indexed_data[cost_index].T_linear
+ frac_num_seg * frac_num_seg
@ -285,7 +286,7 @@ static void load_rr_indexed_data_T_values(int index_start,
int num_indices_to_load,
t_rr_type rr_type,
int nodes_per_chan,
const t_rr_node_indices& L_rr_node_indices) {
const RRGraph& rr_graph) {
/* Loads the average propagation times through segments of each index type *
* for either all CHANX segment types or all CHANY segment types. It does *
* this by looking at all the segments in one channel in the middle of the *
@ -293,7 +294,8 @@ static void load_rr_indexed_data_T_values(int index_start,
* same type and using them to compute average delay values for this type of *
* segment. */
int itrack, inode, cost_index;
int itrack, cost_index;
RRNodeId inode;
float *C_total, *R_total; /* [0..device_ctx.rr_indexed_data.size() - 1] */
double *switch_R_total, *switch_T_total, *switch_Cinternal_total; /* [0..device_ctx.rr_indexed_data.size() - 1] */
short* switches_buffered;
@ -327,26 +329,25 @@ static void load_rr_indexed_data_T_values(int index_start,
for (itrack = 0; itrack < nodes_per_chan; itrack++) {
inode = find_average_rr_node_index(device_ctx.grid.width(), device_ctx.grid.height(), rr_type, itrack,
L_rr_node_indices);
if (inode == -1)
rr_graph);
if (inode == RRNodeId::INVALID())
continue;
cost_index = device_ctx.rr_nodes[inode].cost_index();
cost_index = rr_graph.node_cost_index(inode);
num_nodes_of_index[cost_index]++;
C_total[cost_index] += device_ctx.rr_nodes[inode].C();
R_total[cost_index] += device_ctx.rr_nodes[inode].R();
C_total[cost_index] += rr_graph.node_C(inode);
R_total[cost_index] += rr_graph.node_R(inode);
/* get average switch parameters */
int num_edges = device_ctx.rr_nodes[inode].num_edges();
double avg_switch_R = 0;
double avg_switch_T = 0;
double avg_switch_Cinternal = 0;
int num_switches = 0;
short buffered = UNDEFINED;
for (int iedge = 0; iedge < num_edges; iedge++) {
int to_node_index = device_ctx.rr_nodes[inode].edge_sink_node(iedge);
for (const RREdgeId& iedge : rr_graph.node_out_edges(inode)) {
RRNodeId to_node_index = device_ctx.rr_graph.edge_sink_node(iedge);
/* want to get C/R/Tdel/Cinternal of switches that connect this track segment to other track segments */
if (device_ctx.rr_nodes[to_node_index].type() == CHANX || device_ctx.rr_nodes[to_node_index].type() == CHANY) {
int switch_index = device_ctx.rr_nodes[inode].edge_switch(iedge);
if (device_ctx.rr_graph.node_type(to_node_index) == CHANX || device_ctx.rr_graph.node_type(to_node_index) == CHANY) {
int switch_index = (int)size_t(device_ctx.rr_graph.edge_switch(iedge));
avg_switch_R += device_ctx.rr_switch_inf[switch_index].R;
avg_switch_T += device_ctx.rr_switch_inf[switch_index].Tdel;
avg_switch_Cinternal += device_ctx.rr_switch_inf[switch_index].Cinternal;

View File

@ -1,9 +1,10 @@
#ifndef RR_GRAPH_INDEXED_DATA_H
#define RR_GRAPH_INDEXED_DATA_H
#include "physical_types.h"
#include "rr_graph_obj.h"
void alloc_and_load_rr_indexed_data(const std::vector<t_segment_inf>& segment_inf,
const t_rr_node_indices& L_rr_node_indices,
const RRGraph& rr_graph,
int nodes_per_chan,
int wire_to_ipin_switch,
enum e_base_cost_type base_cost_type);

View File

@ -44,6 +44,9 @@
#include "vpr_utils.h"
#include "vpr_error.h"
#include "rr_graph_obj.h"
#include "check_rr_graph_obj.h"
#include "rr_graph_reader.h"
/*********************** Subroutines local to this module *******************/
@ -142,7 +145,7 @@ void load_rr_file(const t_graph_type graph_type,
int num_rr_nodes = count_children(next_component, "node", loc_data);
device_ctx.rr_nodes.resize(num_rr_nodes);
device_ctx.rr_graph.reserve_nodes(num_rr_nodes);
process_nodes(next_component, loc_data);
/* Loads edges, switches, and node look up tables*/
@ -158,17 +161,22 @@ void load_rr_file(const t_graph_type graph_type,
//Partition the rr graph edges for efficient access to configurable/non-configurable
//edge subsets. Must be done after RR switches have been allocated
partition_rr_graph_edges(device_ctx);
device_ctx.rr_graph.rebuild_node_edges();
process_rr_node_indices(grid);
init_fan_in(device_ctx.rr_nodes, device_ctx.rr_nodes.size());
/* Essential check for rr_graph, build look-up */
if (false == device_ctx.rr_graph.validate()) {
/* Error out if built-in validator of rr_graph fails */
vpr_throw(VPR_ERROR_ROUTE,
__FILE__,
__LINE__,
"Fundamental errors occurred when validating rr_graph object!\n");
}
//sets the cost index and seg id information
next_component = get_single_child(rr_graph, "rr_nodes", loc_data);
set_cost_indices(next_component, loc_data, is_global_graph, segment_inf.size());
alloc_and_load_rr_indexed_data(segment_inf, device_ctx.rr_node_indices,
alloc_and_load_rr_indexed_data(segment_inf, device_ctx.rr_graph,
max_chan_width, *wire_to_rr_ipin_switch, base_cost_type);
process_seg_id(next_component, loc_data);
@ -177,7 +185,14 @@ void load_rr_file(const t_graph_type graph_type,
device_ctx.read_rr_graph_filename = std::string(read_rr_graph_name);
check_rr_graph(graph_type, grid, device_ctx.physical_tile_types);
/* Error out if advanced checker of rr_graph fails */
if (false == check_rr_graph(device_ctx.rr_graph)) {
vpr_throw(VPR_ERROR_ROUTE,
__FILE__,
__LINE__,
"Advanced checking rr_graph object fails! Routing may still work "
"but not smooth\n");
}
} catch (pugiutil::XmlError& e) {
vpr_throw(VPR_ERROR_ROUTE, read_rr_graph_name, e.line(), "%s", e.what());
}
@ -247,6 +262,13 @@ void process_switches(pugi::xml_node parent, const pugiutil::loc_data& loc_data)
Switch = Switch.next_sibling(Switch.name());
}
/* Add the switch to RRGraph local data */
device_ctx.rr_graph.reserve_switches(device_ctx.rr_switch_inf.size());
// Create the switches
for (size_t iswitch = 0; iswitch < device_ctx.rr_switch_inf.size(); ++iswitch) {
device_ctx.rr_graph.create_switch(device_ctx.rr_switch_inf[iswitch]);
}
}
/*Only CHANX and CHANY components have a segment id. This function
@ -262,17 +284,17 @@ void process_seg_id(pugi::xml_node parent, const pugiutil::loc_data& loc_data) {
while (rr_node) {
id = get_attribute(rr_node, "id", loc_data).as_int();
auto& node = device_ctx.rr_nodes[id];
RRNodeId node = RRNodeId(id);
segmentSubnode = get_single_child(rr_node, "segment", loc_data, pugiutil::OPTIONAL);
if (segmentSubnode) {
attribute = get_attribute(segmentSubnode, "segment_id", loc_data, pugiutil::OPTIONAL);
if (attribute) {
int seg_id = get_attribute(segmentSubnode, "segment_id", loc_data).as_int(0);
device_ctx.rr_indexed_data[node.cost_index()].seg_index = seg_id;
device_ctx.rr_indexed_data[device_ctx.rr_graph.node_cost_index(node)].seg_index = seg_id;
} else {
//-1 for non chanx or chany nodes
device_ctx.rr_indexed_data[node.cost_index()].seg_index = -1;
device_ctx.rr_indexed_data[device_ctx.rr_graph.node_cost_index(node)].seg_index = -1;
}
}
rr_node = rr_node.next_sibling(rr_node.name());
@ -289,41 +311,43 @@ void process_nodes(pugi::xml_node parent, const pugiutil::loc_data& loc_data) {
while (rr_node) {
int inode = get_attribute(rr_node, "id", loc_data).as_int();
auto& node = device_ctx.rr_nodes[inode];
t_rr_type node_type = NUM_RR_TYPES;
const char* node_type = get_attribute(rr_node, "type", loc_data).as_string();
if (strcmp(node_type, "CHANX") == 0) {
node.set_type(CHANX);
} else if (strcmp(node_type, "CHANY") == 0) {
node.set_type(CHANY);
} else if (strcmp(node_type, "SOURCE") == 0) {
node.set_type(SOURCE);
} else if (strcmp(node_type, "SINK") == 0) {
node.set_type(SINK);
} else if (strcmp(node_type, "OPIN") == 0) {
node.set_type(OPIN);
} else if (strcmp(node_type, "IPIN") == 0) {
node.set_type(IPIN);
const char* node_type_str = get_attribute(rr_node, "type", loc_data).as_string();
if (strcmp(node_type_str, "CHANX") == 0) {
node_type = CHANX;
} else if (strcmp(node_type_str, "CHANY") == 0) {
node_type = CHANY;
} else if (strcmp(node_type_str, "SOURCE") == 0) {
node_type = SOURCE;
} else if (strcmp(node_type_str, "SINK") == 0) {
node_type = SINK;
} else if (strcmp(node_type_str, "OPIN") == 0) {
node_type = OPIN;
} else if (strcmp(node_type_str, "IPIN") == 0) {
node_type = IPIN;
} else {
VPR_FATAL_ERROR(VPR_ERROR_OTHER,
"Valid inputs for class types are \"CHANX\", \"CHANY\",\"SOURCE\", \"SINK\",\"OPIN\", and \"IPIN\".");
}
if (node.type() == CHANX || node.type() == CHANY) {
const RRNodeId& node = device_ctx.rr_graph.create_node(node_type);
if (device_ctx.rr_graph.node_type(node) == CHANX || device_ctx.rr_graph.node_type(node) == CHANY) {
const char* correct_direction = get_attribute(rr_node, "direction", loc_data).as_string();
if (strcmp(correct_direction, "INC_DIR") == 0) {
node.set_direction(INC_DIRECTION);
device_ctx.rr_graph.set_node_direction(node, INC_DIRECTION);
} else if (strcmp(correct_direction, "DEC_DIR") == 0) {
node.set_direction(DEC_DIRECTION);
device_ctx.rr_graph.set_node_direction(node, DEC_DIRECTION);
} else if (strcmp(correct_direction, "BI_DIR") == 0) {
node.set_direction(BI_DIRECTION);
device_ctx.rr_graph.set_node_direction(node, BI_DIRECTION);
} else {
VTR_ASSERT((strcmp(correct_direction, "NO_DIR") == 0));
node.set_direction(NO_DIRECTION);
device_ctx.rr_graph.set_node_direction(node, NO_DIRECTION);
}
}
node.set_capacity(get_attribute(rr_node, "capacity", loc_data).as_float());
device_ctx.rr_graph.set_node_capacity(node, get_attribute(rr_node, "capacity", loc_data).as_float());
//--------------
locSubnode = get_single_child(rr_node, "loc", loc_data);
@ -334,7 +358,7 @@ void process_nodes(pugi::xml_node parent, const pugiutil::loc_data& loc_data) {
y1 = get_attribute(locSubnode, "ylow", loc_data).as_float();
y2 = get_attribute(locSubnode, "yhigh", loc_data).as_float();
if (node.type() == IPIN || node.type() == OPIN) {
if (device_ctx.rr_graph.node_type(node) == IPIN || device_ctx.rr_graph.node_type(node) == OPIN) {
e_side side;
std::string side_str = get_attribute(locSubnode, "side", loc_data).as_string();
if (side_str == "LEFT") {
@ -347,11 +371,11 @@ void process_nodes(pugi::xml_node parent, const pugiutil::loc_data& loc_data) {
VTR_ASSERT(side_str == "BOTTOM");
side = BOTTOM;
}
node.set_side(side);
device_ctx.rr_graph.set_node_side(node, side);
}
node.set_coordinates(x1, y1, x2, y2);
node.set_ptc_num(get_attribute(locSubnode, "ptc", loc_data).as_int());
device_ctx.rr_graph.set_node_bounding_box(node, vtr::Rect<short>(x1, y1, x2, y2));
device_ctx.rr_graph.set_node_ptc_num(node, get_attribute(locSubnode, "ptc", loc_data).as_int());
//-------
timingSubnode = get_single_child(rr_node, "timing", loc_data, pugiutil::OPTIONAL);
@ -362,10 +386,7 @@ void process_nodes(pugi::xml_node parent, const pugiutil::loc_data& loc_data) {
R = get_attribute(timingSubnode, "R", loc_data).as_float();
C = get_attribute(timingSubnode, "C", loc_data).as_float();
}
node.set_rc_index(find_create_rr_rc_data(R, C));
//clear each node edge
node.set_num_edges(0);
device_ctx.rr_graph.set_node_rc_data_index(node, find_create_rr_rc_data(R, C));
// <metadata>
// <meta name='grid_prefix' >CLBLL_L_</meta>
@ -394,32 +415,42 @@ void process_edges(pugi::xml_node parent, const pugiutil::loc_data& loc_data, in
edges = get_first_child(parent, "edge", loc_data);
//count the number of edges and store it in a vector
std::vector<size_t> num_edges_for_node;
num_edges_for_node.resize(device_ctx.rr_nodes.size(), 0);
vtr::vector<RRNodeId, size_t> num_edges_for_node;
num_edges_for_node.resize(device_ctx.rr_graph.nodes().size(), 0);
unsigned long num_edges_to_reserve = 0;
while (edges) {
size_t source_node = get_attribute(edges, "src_node", loc_data).as_uint();
if (source_node >= device_ctx.rr_nodes.size()) {
RRNodeId source_node = RRNodeId(get_attribute(edges, "src_node", loc_data).as_uint());
if (false == device_ctx.rr_graph.valid_node_id(source_node)) {
VPR_FATAL_ERROR(VPR_ERROR_OTHER,
"source_node %d is larger than rr_nodes.size() %d",
source_node, device_ctx.rr_nodes.size());
size_t(source_node), device_ctx.rr_graph.nodes().size());
}
num_edges_for_node[source_node]++;
num_edges_to_reserve++;
edges = edges.next_sibling(edges.name());
}
//reset this vector in order to start count for num edges again
for (size_t inode = 0; inode < device_ctx.rr_nodes.size(); inode++) {
if (num_edges_for_node[inode] > std::numeric_limits<t_edge_size>::max()) {
for (const RRNodeId& inode : device_ctx.rr_graph.nodes()) {
/* uint16_t is the data type for each type of edges in RRGraph object
* Multiplied by 4 is due to the fact that each node has 4 groups of edges
* and each group is bounded by uint16_t
* See rr_graph_obj.h for more details
*/
if (num_edges_for_node[inode] > 4 * std::numeric_limits<uint16_t>::max()) {
VPR_FATAL_ERROR(VPR_ERROR_OTHER,
"source node %d edge count %d is too high",
inode, num_edges_for_node[inode]);
size_t(inode), num_edges_for_node[inode]);
}
device_ctx.rr_nodes[inode].set_num_edges(num_edges_for_node[inode]);
num_edges_for_node[inode] = 0;
}
/* Reserve the memory for edges */
device_ctx.rr_graph.reserve_edges(num_edges_to_reserve);
edges = get_first_child(parent, "edge", loc_data);
/*initialize a vector that keeps track of the number of wire to ipin switches
* There should be only one wire to ipin switch. In case there are more, make sure to
@ -430,14 +461,14 @@ void process_edges(pugi::xml_node parent, const pugiutil::loc_data& loc_data, in
std::pair<int, int> most_frequent_switch(-1, 0);
while (edges) {
size_t source_node = get_attribute(edges, "src_node", loc_data).as_uint();
size_t sink_node = get_attribute(edges, "sink_node", loc_data).as_uint();
RRNodeId source_node = RRNodeId(get_attribute(edges, "src_node", loc_data).as_uint());
RRNodeId sink_node = RRNodeId(get_attribute(edges, "sink_node", loc_data).as_uint());
int switch_id = get_attribute(edges, "switch_id", loc_data).as_int();
if (sink_node >= device_ctx.rr_nodes.size()) {
if (false == device_ctx.rr_graph.valid_node_id(sink_node)) {
VPR_FATAL_ERROR(VPR_ERROR_OTHER,
"sink_node %d is larger than rr_nodes.size() %d",
sink_node, device_ctx.rr_nodes.size());
size_t(sink_node), device_ctx.rr_graph.nodes().size());
}
if (switch_id >= num_rr_switches) {
@ -448,8 +479,8 @@ void process_edges(pugi::xml_node parent, const pugiutil::loc_data& loc_data, in
/*Keeps track of the number of the specific type of switch that connects a wire to an ipin
* use the pair data structure to keep the maximum*/
if (device_ctx.rr_nodes[source_node].type() == CHANX || device_ctx.rr_nodes[source_node].type() == CHANY) {
if (device_ctx.rr_nodes[sink_node].type() == IPIN) {
if (device_ctx.rr_graph.node_type(source_node) == CHANX || device_ctx.rr_graph.node_type(source_node) == CHANY) {
if (device_ctx.rr_graph.node_type(sink_node) == IPIN) {
count_for_wire_to_ipin_switches[switch_id]++;
if (count_for_wire_to_ipin_switches[switch_id] > most_frequent_switch.second) {
most_frequent_switch.first = switch_id;
@ -458,8 +489,7 @@ void process_edges(pugi::xml_node parent, const pugiutil::loc_data& loc_data, in
}
}
//set edge in correct rr_node data structure
device_ctx.rr_nodes[source_node].set_edge_sink_node(num_edges_for_node[source_node], sink_node);
device_ctx.rr_nodes[source_node].set_edge_switch(num_edges_for_node[source_node], switch_id);
device_ctx.rr_graph.create_edge(source_node, sink_node, RRSwitchId(switch_id));
// Read the metadata for the edge
auto metadata = get_single_child(edges, "metadata", loc_data, pugiutil::OPTIONAL);
@ -468,7 +498,7 @@ void process_edges(pugi::xml_node parent, const pugiutil::loc_data& loc_data, in
while (edges_meta) {
auto key = get_attribute(edges_meta, "name", loc_data).as_string();
vpr::add_rr_edge_metadata(source_node, sink_node, switch_id,
vpr::add_rr_edge_metadata(size_t(source_node), size_t(sink_node), switch_id,
key, edges_meta.child_value());
edges_meta = edges_meta.next_sibling(edges_meta.name());
@ -842,15 +872,15 @@ void set_cost_indices(pugi::xml_node parent, const pugiutil::loc_data& loc_data,
auto& device_ctx = g_vpr_ctx.mutable_device();
//set the cost index in order to load the segment information, rr nodes should be set already
for (size_t inode = 0; inode < device_ctx.rr_nodes.size(); inode++) {
if (device_ctx.rr_nodes[inode].type() == SOURCE) {
device_ctx.rr_nodes[inode].set_cost_index(SOURCE_COST_INDEX);
} else if (device_ctx.rr_nodes[inode].type() == SINK) {
device_ctx.rr_nodes[inode].set_cost_index(SINK_COST_INDEX);
} else if (device_ctx.rr_nodes[inode].type() == IPIN) {
device_ctx.rr_nodes[inode].set_cost_index(IPIN_COST_INDEX);
} else if (device_ctx.rr_nodes[inode].type() == OPIN) {
device_ctx.rr_nodes[inode].set_cost_index(OPIN_COST_INDEX);
for (const RRNodeId& inode : device_ctx.rr_graph.nodes()) {
if (device_ctx.rr_graph.node_type(inode) == SOURCE) {
device_ctx.rr_graph.set_node_cost_index(inode, SOURCE_COST_INDEX);
} else if (device_ctx.rr_graph.node_type(inode) == SINK) {
device_ctx.rr_graph.set_node_cost_index(inode, SINK_COST_INDEX);
} else if (device_ctx.rr_graph.node_type(inode) == IPIN) {
device_ctx.rr_graph.set_node_cost_index(inode, IPIN_COST_INDEX);
} else if (device_ctx.rr_graph.node_type(inode) == OPIN) {
device_ctx.rr_graph.set_node_cost_index(inode, OPIN_COST_INDEX);
}
}
@ -862,8 +892,7 @@ void set_cost_indices(pugi::xml_node parent, const pugiutil::loc_data& loc_data,
rr_node = get_first_child(parent, "node", loc_data);
while (rr_node) {
int inode = get_attribute(rr_node, "id", loc_data).as_int();
auto& node = device_ctx.rr_nodes[inode];
RRNodeId inode = RRNodeId(get_attribute(rr_node, "id", loc_data).as_int());
/*CHANX and CHANY cost index is dependent on the segment id*/
@ -873,11 +902,11 @@ void set_cost_indices(pugi::xml_node parent, const pugiutil::loc_data& loc_data,
if (attribute) {
int seg_id = get_attribute(segmentSubnode, "segment_id", loc_data).as_int(0);
if (is_global_graph) {
node.set_cost_index(0);
} else if (node.type() == CHANX) {
node.set_cost_index(CHANX_COST_INDEX_START + seg_id);
} else if (node.type() == CHANY) {
node.set_cost_index(CHANX_COST_INDEX_START + num_seg_types + seg_id);
device_ctx.rr_graph.set_node_cost_index(inode, 0);
} else if (device_ctx.rr_graph.node_type(inode) == CHANX) {
device_ctx.rr_graph.set_node_cost_index(inode, CHANX_COST_INDEX_START + seg_id);
} else if (device_ctx.rr_graph.node_type(inode) == CHANY) {
device_ctx.rr_graph.set_node_cost_index(inode, CHANX_COST_INDEX_START + num_seg_types + seg_id);
}
}
}

View File

@ -28,14 +28,14 @@ void add_rr_graph_C_from_switches(float C_ipin_cblock) {
* INCLUDE_TRACK_BUFFERS) */
int switch_index, maxlen;
size_t to_node;
RRNodeId to_node;
int icblock, isblock, iseg_low, iseg_high;
float Cin, Cout;
t_rr_type from_rr_type, to_rr_type;
bool* cblock_counted; /* [0..maxlen-1] -- 0th element unused. */
float* buffer_Cin; /* [0..maxlen-1] */
bool buffered;
float* Couts_to_add; /* UDSD */
vtr::vector<RRNodeId, float> Couts_to_add; /* UDSD */
auto& device_ctx = g_vpr_ctx.device();
auto& mutable_device_ctx = g_vpr_ctx.mutable_device();
@ -44,21 +44,21 @@ void add_rr_graph_C_from_switches(float C_ipin_cblock) {
cblock_counted = (bool*)vtr::calloc(maxlen, sizeof(bool));
buffer_Cin = (float*)vtr::calloc(maxlen, sizeof(float));
std::vector<float> rr_node_C(device_ctx.rr_nodes.size(), 0.); //Stores the final C
vtr::vector<RRNodeId, float> rr_node_C(device_ctx.rr_graph.nodes().size(), 0.); //Stores the final C
for (size_t inode = 0; inode < device_ctx.rr_nodes.size(); inode++) {
for (const RRNodeId& inode : device_ctx.rr_graph.nodes()) {
//The C may have already been partly initialized (e.g. with metal capacitance)
rr_node_C[inode] += device_ctx.rr_nodes[inode].C();
rr_node_C[inode] += device_ctx.rr_graph.node_C(inode);
from_rr_type = device_ctx.rr_nodes[inode].type();
from_rr_type = device_ctx.rr_graph.node_type(inode);
if (from_rr_type == CHANX || from_rr_type == CHANY) {
for (t_edge_size iedge = 0; iedge < device_ctx.rr_nodes[inode].num_edges(); iedge++) {
to_node = device_ctx.rr_nodes[inode].edge_sink_node(iedge);
to_rr_type = device_ctx.rr_nodes[to_node].type();
for (const RREdgeId& iedge : device_ctx.rr_graph.node_out_edges(inode)) {
to_node = device_ctx.rr_graph.edge_sink_node(iedge);
to_rr_type = device_ctx.rr_graph.node_type(to_node);
if (to_rr_type == CHANX || to_rr_type == CHANY) {
switch_index = device_ctx.rr_nodes[inode].edge_switch(iedge);
switch_index = (int)size_t(device_ctx.rr_graph.edge_switch(iedge));
Cin = device_ctx.rr_switch_inf[switch_index].Cin;
Cout = device_ctx.rr_switch_inf[switch_index].Cout;
buffered = device_ctx.rr_switch_inf[switch_index].buffered();
@ -80,14 +80,14 @@ void add_rr_graph_C_from_switches(float C_ipin_cblock) {
* the buffers at that location have different sizes, I use the *
* input capacitance of the largest one. */
if (!buffered && inode < to_node) { /* Pass transistor. */
if (!buffered && size_t(inode) < size_t(to_node)) { /* Pass transistor. */
rr_node_C[inode] += Cin;
rr_node_C[to_node] += Cout;
}
else if (buffered) {
/* Prevent double counting of capacitance for UDSD */
if (device_ctx.rr_nodes[to_node].direction() == BI_DIRECTION) {
if (device_ctx.rr_graph.node_direction(to_node) == BI_DIRECTION) {
/* For multiple-driver architectures the output capacitance can
* be added now since each edge is actually a driver */
rr_node_C[to_node] += Cout;
@ -129,11 +129,11 @@ void add_rr_graph_C_from_switches(float C_ipin_cblock) {
* } */
if (from_rr_type == CHANX) {
iseg_low = device_ctx.rr_nodes[inode].xlow();
iseg_high = device_ctx.rr_nodes[inode].xhigh();
iseg_low = device_ctx.rr_graph.node_xlow(inode);
iseg_high = device_ctx.rr_graph.node_xhigh(inode);
} else { /* CHANY */
iseg_low = device_ctx.rr_nodes[inode].ylow();
iseg_high = device_ctx.rr_nodes[inode].yhigh();
iseg_low = device_ctx.rr_graph.node_ylow(inode);
iseg_high = device_ctx.rr_graph.node_yhigh(inode);
}
for (icblock = iseg_low; icblock <= iseg_high; icblock++) {
@ -148,17 +148,17 @@ void add_rr_graph_C_from_switches(float C_ipin_cblock) {
}
/* End node is CHANX or CHANY */
else if (from_rr_type == OPIN) {
for (t_edge_size iedge = 0; iedge < device_ctx.rr_nodes[inode].num_edges(); iedge++) {
switch_index = device_ctx.rr_nodes[inode].edge_switch(iedge);
to_node = device_ctx.rr_nodes[inode].edge_sink_node(iedge);
to_rr_type = device_ctx.rr_nodes[to_node].type();
for (const RREdgeId& iedge : device_ctx.rr_graph.node_out_edges(inode)) {
switch_index = (int)size_t(device_ctx.rr_graph.edge_switch(iedge));
to_node = device_ctx.rr_graph.edge_sink_node(iedge);
to_rr_type = device_ctx.rr_graph.node_type(to_node);
if (to_rr_type != CHANX && to_rr_type != CHANY)
continue;
if (device_ctx.rr_nodes[to_node].direction() == BI_DIRECTION) {
if (device_ctx.rr_graph.node_direction(to_node) == BI_DIRECTION) {
Cout = device_ctx.rr_switch_inf[switch_index].Cout;
to_node = device_ctx.rr_nodes[inode].edge_sink_node(iedge); /* Will be CHANX or CHANY */
to_node = device_ctx.rr_graph.edge_sink_node(iedge); /* Will be CHANX or CHANY */
rr_node_C[to_node] += Cout;
}
}
@ -170,30 +170,30 @@ void add_rr_graph_C_from_switches(float C_ipin_cblock) {
* Current structures only keep switch information from a node to the next node and
* not the reverse. Therefore I need to go through all the possible edges to figure
* out what the Cout's should be */
Couts_to_add = (float*)vtr::calloc(device_ctx.rr_nodes.size(), sizeof(float));
for (size_t inode = 0; inode < device_ctx.rr_nodes.size(); inode++) {
for (t_edge_size iedge = 0; iedge < device_ctx.rr_nodes[inode].num_edges(); iedge++) {
switch_index = device_ctx.rr_nodes[inode].edge_switch(iedge);
to_node = device_ctx.rr_nodes[inode].edge_sink_node(iedge);
to_rr_type = device_ctx.rr_nodes[to_node].type();
Couts_to_add.resize(device_ctx.rr_graph.nodes().size(), 0.);
for (const RRNodeId& inode : device_ctx.rr_graph.nodes()) {
for (const RREdgeId& iedge : device_ctx.rr_graph.node_out_edges(inode)) {
switch_index = (int)size_t(device_ctx.rr_graph.edge_switch(iedge));
to_node = device_ctx.rr_graph.edge_sink_node(iedge);
to_rr_type = device_ctx.rr_graph.node_type(to_node);
if (to_rr_type == CHANX || to_rr_type == CHANY) {
if (device_ctx.rr_nodes[to_node].direction() != BI_DIRECTION) {
if (device_ctx.rr_graph.node_direction(to_node) != BI_DIRECTION) {
/* Cout was not added in these cases */
Couts_to_add[to_node] = std::max(Couts_to_add[to_node], device_ctx.rr_switch_inf[switch_index].Cout);
}
}
}
}
for (size_t inode = 0; inode < device_ctx.rr_nodes.size(); inode++) {
for (const RRNodeId& inode : device_ctx.rr_graph.nodes()) {
rr_node_C[inode] += Couts_to_add[inode];
}
//Create the final flywieghted t_rr_rc_data
for (size_t inode = 0; inode < device_ctx.rr_nodes.size(); inode++) {
mutable_device_ctx.rr_nodes[inode].set_rc_index(find_create_rr_rc_data(device_ctx.rr_nodes[inode].R(), rr_node_C[inode]));
for (const RRNodeId& inode : device_ctx.rr_graph.nodes()) {
mutable_device_ctx.rr_graph.set_node_rc_data_index(inode, find_create_rr_rc_data(device_ctx.rr_graph.node_R(inode), rr_node_C[inode]));
}
free(Couts_to_add);
Couts_to_add.clear();
free(cblock_counted);
free(buffer_Cin);
}

View File

@ -6,20 +6,20 @@
#include "globals.h"
#include "rr_graph_util.h"
int seg_index_of_cblock(t_rr_type from_rr_type, int to_node) {
int seg_index_of_cblock(t_rr_type from_rr_type, const RRNodeId& to_node) {
/* Returns the segment number (distance along the channel) of the connection *
* box from from_rr_type (CHANX or CHANY) to to_node (IPIN). */
auto& device_ctx = g_vpr_ctx.device();
if (from_rr_type == CHANX)
return (device_ctx.rr_nodes[to_node].xlow());
return (device_ctx.rr_graph.node_xlow(to_node));
else
/* CHANY */
return (device_ctx.rr_nodes[to_node].ylow());
return (device_ctx.rr_graph.node_ylow(to_node));
}
int seg_index_of_sblock(int from_node, int to_node) {
int seg_index_of_sblock(const RRNodeId& from_node, const RRNodeId& to_node) {
/* Returns the segment number (distance along the channel) of the switch box *
* box from from_node (CHANX or CHANY) to to_node (CHANX or CHANY). The *
* switch box on the left side of a CHANX segment at (i,j) has seg_index = *
@ -31,47 +31,47 @@ int seg_index_of_sblock(int from_node, int to_node) {
auto& device_ctx = g_vpr_ctx.device();
from_rr_type = device_ctx.rr_nodes[from_node].type();
to_rr_type = device_ctx.rr_nodes[to_node].type();
from_rr_type = device_ctx.rr_graph.node_type(from_node);
to_rr_type = device_ctx.rr_graph.node_type(to_node);
if (from_rr_type == CHANX) {
if (to_rr_type == CHANY) {
return (device_ctx.rr_nodes[to_node].xlow());
return (device_ctx.rr_graph.node_xlow(to_node));
} else if (to_rr_type == CHANX) {
if (device_ctx.rr_nodes[to_node].xlow() > device_ctx.rr_nodes[from_node].xlow()) { /* Going right */
return (device_ctx.rr_nodes[from_node].xhigh());
if (device_ctx.rr_graph.node_xlow(to_node) > device_ctx.rr_graph.node_xlow(from_node)) { /* Going right */
return (device_ctx.rr_graph.node_xhigh(from_node));
} else { /* Going left */
return (device_ctx.rr_nodes[to_node].xhigh());
return (device_ctx.rr_graph.node_xhigh(to_node));
}
} else {
VPR_FATAL_ERROR(VPR_ERROR_ROUTE,
"in seg_index_of_sblock: to_node %d is of type %d.\n",
to_node, to_rr_type);
"in seg_index_of_sblock: to_node %ld is of type %d.\n",
size_t(to_node), to_rr_type);
return OPEN; //Should not reach here once thrown
}
}
/* End from_rr_type is CHANX */
else if (from_rr_type == CHANY) {
if (to_rr_type == CHANX) {
return (device_ctx.rr_nodes[to_node].ylow());
return (device_ctx.rr_graph.node_ylow(to_node));
} else if (to_rr_type == CHANY) {
if (device_ctx.rr_nodes[to_node].ylow() > device_ctx.rr_nodes[from_node].ylow()) { /* Going up */
return (device_ctx.rr_nodes[from_node].yhigh());
if (device_ctx.rr_graph.node_ylow(to_node) > device_ctx.rr_graph.node_ylow(from_node)) { /* Going up */
return (device_ctx.rr_graph.node_yhigh(from_node));
} else { /* Going down */
return (device_ctx.rr_nodes[to_node].yhigh());
return (device_ctx.rr_graph.node_yhigh(to_node));
}
} else {
VPR_FATAL_ERROR(VPR_ERROR_ROUTE,
"in seg_index_of_sblock: to_node %d is of type %d.\n",
to_node, to_rr_type);
"in seg_index_of_sblock: to_node %ld is of type %d.\n",
size_t(to_node), to_rr_type);
return OPEN; //Should not reach here once thrown
}
}
/* End from_rr_type is CHANY */
else {
VPR_FATAL_ERROR(VPR_ERROR_ROUTE,
"in seg_index_of_sblock: from_node %d is of type %d.\n",
from_node, from_rr_type);
"in seg_index_of_sblock: from_node %ld is of type %d.\n",
size_t(from_node), from_rr_type);
return OPEN; //Should not reach here once thrown
}
}

View File

@ -1,8 +1,8 @@
#ifndef RR_GRAPH_UTIL_H
#define RR_GRAPH_UTIL_H
int seg_index_of_cblock(t_rr_type from_rr_type, int to_node);
int seg_index_of_cblock(t_rr_type from_rr_type, const RRNodeId& to_node);
int seg_index_of_sblock(int from_node, int to_node);
int seg_index_of_sblock(const RRNodeId& from_node, const RRNodeId& to_node);
#endif

View File

@ -43,9 +43,9 @@ void get_segment_usage_stats(std::vector<t_segment_inf>& segment_inf) {
seg_occ_by_type = (int*)vtr::calloc(segment_inf.size(), sizeof(int));
seg_cap_by_type = (int*)vtr::calloc(segment_inf.size(), sizeof(int));
for (size_t inode = 0; inode < device_ctx.rr_nodes.size(); inode++) {
if (device_ctx.rr_nodes[inode].type() == CHANX || device_ctx.rr_nodes[inode].type() == CHANY) {
cost_index = device_ctx.rr_nodes[inode].cost_index();
for (const RRNodeId& inode : device_ctx.rr_graph.nodes()) {
if (device_ctx.rr_graph.node_type(inode) == CHANX || device_ctx.rr_graph.node_type(inode) == CHANY) {
cost_index = device_ctx.rr_graph.node_cost_index(inode);
size_t seg_type = device_ctx.rr_indexed_data[cost_index].seg_index;
if (!segment_inf[seg_type].longline)
@ -54,9 +54,9 @@ void get_segment_usage_stats(std::vector<t_segment_inf>& segment_inf) {
length = LONGLINE;
seg_occ_by_length[length] += route_ctx.rr_node_route_inf[inode].occ();
seg_cap_by_length[length] += device_ctx.rr_nodes[inode].capacity();
seg_cap_by_length[length] += device_ctx.rr_graph.node_capacity(inode);
seg_occ_by_type[seg_type] += route_ctx.rr_node_route_inf[inode].occ();
seg_cap_by_type[seg_type] += device_ctx.rr_nodes[inode].capacity();
seg_cap_by_type[seg_type] += device_ctx.rr_graph.node_capacity(inode);
}
}

View File

@ -32,12 +32,10 @@ SpatialRouteTreeLookup build_route_tree_spatial_lookup(ClusterNetId net, t_rt_no
void update_route_tree_spatial_lookup_recur(t_rt_node* rt_node, SpatialRouteTreeLookup& spatial_lookup) {
auto& device_ctx = g_vpr_ctx.device();
auto& rr_node = device_ctx.rr_nodes[rt_node->inode];
int bin_xlow = grid_to_bin_x(rr_node.xlow(), spatial_lookup);
int bin_ylow = grid_to_bin_y(rr_node.ylow(), spatial_lookup);
int bin_xhigh = grid_to_bin_x(rr_node.xhigh(), spatial_lookup);
int bin_yhigh = grid_to_bin_y(rr_node.yhigh(), spatial_lookup);
int bin_xlow = grid_to_bin_x(device_ctx.rr_graph.node_xlow(rt_node->inode), spatial_lookup);
int bin_ylow = grid_to_bin_y(device_ctx.rr_graph.node_ylow(rt_node->inode), spatial_lookup);
int bin_xhigh = grid_to_bin_x(device_ctx.rr_graph.node_xhigh(rt_node->inode), spatial_lookup);
int bin_yhigh = grid_to_bin_y(device_ctx.rr_graph.node_yhigh(rt_node->inode), spatial_lookup);
spatial_lookup[bin_xlow][bin_ylow].push_back(rt_node);
@ -77,27 +75,27 @@ size_t grid_to_bin_y(size_t grid_y, const SpatialRouteTreeLookup& spatial_lookup
bool validate_route_tree_spatial_lookup(t_rt_node* rt_node, const SpatialRouteTreeLookup& spatial_lookup) {
auto& device_ctx = g_vpr_ctx.device();
auto& rr_node = device_ctx.rr_nodes[rt_node->inode];
const RRNodeId& rr_node = rt_node->inode;
int bin_xlow = grid_to_bin_x(rr_node.xlow(), spatial_lookup);
int bin_ylow = grid_to_bin_y(rr_node.ylow(), spatial_lookup);
int bin_xhigh = grid_to_bin_x(rr_node.xhigh(), spatial_lookup);
int bin_yhigh = grid_to_bin_y(rr_node.yhigh(), spatial_lookup);
int bin_xlow = grid_to_bin_x(device_ctx.rr_graph.node_xlow(rr_node), spatial_lookup);
int bin_ylow = grid_to_bin_y(device_ctx.rr_graph.node_ylow(rr_node), spatial_lookup);
int bin_xhigh = grid_to_bin_x(device_ctx.rr_graph.node_xhigh(rr_node), spatial_lookup);
int bin_yhigh = grid_to_bin_y(device_ctx.rr_graph.node_yhigh(rr_node), spatial_lookup);
bool valid = true;
auto& low_bin_rt_nodes = spatial_lookup[bin_xlow][bin_ylow];
if (std::find(low_bin_rt_nodes.begin(), low_bin_rt_nodes.end(), rt_node) == low_bin_rt_nodes.end()) {
valid = false;
VPR_FATAL_ERROR(VPR_ERROR_ROUTE, "Failed to find route tree node %d at (low coord %d,%d) in spatial lookup [bin %d,%d]",
rt_node->inode, rr_node.xlow(), rr_node.ylow(), bin_xlow, bin_ylow);
VPR_FATAL_ERROR(VPR_ERROR_ROUTE, "Failed to find route tree node %ld at (low coord %d,%d) in spatial lookup [bin %d,%d]",
size_t(rt_node->inode), device_ctx.rr_graph.node_xlow(rr_node), device_ctx.rr_graph.node_ylow(rr_node), bin_xlow, bin_ylow);
}
auto& high_bin_rt_nodes = spatial_lookup[bin_xhigh][bin_yhigh];
if (std::find(high_bin_rt_nodes.begin(), high_bin_rt_nodes.end(), rt_node) == high_bin_rt_nodes.end()) {
valid = false;
VPR_FATAL_ERROR(VPR_ERROR_ROUTE, "Failed to find route tree node %d at (high coord %d,%d) in spatial lookup [bin %d,%d]",
rt_node->inode, rr_node.xhigh(), rr_node.yhigh(), bin_xhigh, bin_yhigh);
VPR_FATAL_ERROR(VPR_ERROR_ROUTE, "Failed to find route tree node %ld at (high coord %d,%d) in spatial lookup [bin %d,%d]",
size_t(rt_node->inode), device_ctx.rr_graph.node_xhigh(rr_node), device_ctx.rr_graph.node_yhigh(rr_node), bin_xhigh, bin_yhigh);
}
//Recurse

View File

@ -280,45 +280,45 @@ void VprTimingGraphResolver::get_detailed_interconnect_components_helper(std::ve
while (node != nullptr) {
//Process the current interconnect component if it is of type OPIN, CHANX, CHANY, IPIN
if (device_ctx.rr_nodes[node->inode].type() == OPIN
|| device_ctx.rr_nodes[node->inode].type() == IPIN
|| device_ctx.rr_nodes[node->inode].type() == CHANX
|| device_ctx.rr_nodes[node->inode].type() == CHANY) {
if (device_ctx.rr_graph.node_type(node->inode) == OPIN
|| device_ctx.rr_graph.node_type(node->inode) == IPIN
|| device_ctx.rr_graph.node_type(node->inode) == CHANX
|| device_ctx.rr_graph.node_type(node->inode) == CHANY) {
tatum::DelayComponent net_component; //declare a new instance of DelayComponent
net_component.type_name = device_ctx.rr_nodes[node->inode].type_string(); //write the component's type as a routing resource node
net_component.type_name += ":" + std::to_string(node->inode) + " "; //add the index of the routing resource node
if (device_ctx.rr_nodes[node->inode].type() == OPIN || device_ctx.rr_nodes[node->inode].type() == IPIN) {
net_component.type_name = std::string(rr_node_typename[device_ctx.rr_graph.node_type(node->inode)]); //write the component's type as a routing resource node
net_component.type_name += ":" + std::to_string(size_t(node->inode)) + " "; //add the index of the routing resource node
if (device_ctx.rr_graph.node_type(node->inode) == OPIN || device_ctx.rr_graph.node_type(node->inode) == IPIN) {
net_component.type_name += "side:"; //add the side of the routing resource node
net_component.type_name += device_ctx.rr_nodes[node->inode].side_string(); //add the side of the routing resource node
net_component.type_name += std::string(SIDE_STRING[device_ctx.rr_graph.node_side(node->inode)]); //add the side of the routing resource node
// For OPINs and IPINs the starting and ending coordinate are identical, so we can just arbitrarily assign the start to larger values
// and the end to the lower coordinate
start_x = " (" + std::to_string(device_ctx.rr_nodes[node->inode].xhigh()) + ","; //start and end coordinates are the same for OPINs and IPINs
start_y = std::to_string(device_ctx.rr_nodes[node->inode].yhigh()) + ")";
start_x = " (" + std::to_string(device_ctx.rr_graph.node_xhigh(node->inode)) + ","; //start and end coordinates are the same for OPINs and IPINs
start_y = std::to_string(device_ctx.rr_graph.node_yhigh(node->inode)) + ")";
end_x = "";
end_y = "";
arrow = "";
}
if (device_ctx.rr_nodes[node->inode].type() == CHANX || device_ctx.rr_nodes[node->inode].type() == CHANY) { //for channels, we would like to describe the component with segment specific information
net_component.type_name += device_ctx.arch->Segments[device_ctx.rr_indexed_data[device_ctx.rr_nodes[node->inode].cost_index()].seg_index].name; //Write the segment name
net_component.type_name += " length:" + std::to_string(device_ctx.rr_nodes[node->inode].length()); //add the length of the segment
if (device_ctx.rr_graph.node_type(node->inode) == CHANX || device_ctx.rr_graph.node_type(node->inode) == CHANY) { //for channels, we would like to describe the component with segment specific information
net_component.type_name += device_ctx.arch->Segments[device_ctx.rr_indexed_data[device_ctx.rr_graph.node_cost_index(node->inode)].seg_index].name; //Write the segment name
net_component.type_name += " length:" + std::to_string(device_ctx.rr_graph.node_length(node->inode)); //add the length of the segment
//Figure out the starting and ending coordinate of the segment depending on the direction
arrow = "->"; //we will point the coordinates from start to finish, left to right
if (device_ctx.rr_nodes[node->inode].direction() == DEC_DIRECTION) { //signal travels along decreasing direction
start_x = " (" + std::to_string(device_ctx.rr_nodes[node->inode].xhigh()) + ","; //start coordinates have large value
start_y = std::to_string(device_ctx.rr_nodes[node->inode].yhigh()) + ")";
end_x = "(" + std::to_string(device_ctx.rr_nodes[node->inode].xlow()) + ","; //end coordinates have smaller value
end_y = std::to_string(device_ctx.rr_nodes[node->inode].ylow()) + ")";
if (device_ctx.rr_graph.node_direction(node->inode) == DEC_DIRECTION) { //signal travels along decreasing direction
start_x = " (" + std::to_string(device_ctx.rr_graph.node_xhigh(node->inode)) + ","; //start coordinates have large value
start_y = std::to_string(device_ctx.rr_graph.node_yhigh(node->inode)) + ")";
end_x = "(" + std::to_string(device_ctx.rr_graph.node_xlow(node->inode)) + ","; //end coordinates have smaller value
end_y = std::to_string(device_ctx.rr_graph.node_ylow(node->inode)) + ")";
}
else { // signal travels in increasing direction, stays at same point, or can travel both directions
start_x = " (" + std::to_string(device_ctx.rr_nodes[node->inode].xlow()) + ","; //start coordinates have smaller value
start_y = std::to_string(device_ctx.rr_nodes[node->inode].ylow()) + ")";
end_x = "(" + std::to_string(device_ctx.rr_nodes[node->inode].xhigh()) + ","; //end coordinates have larger value
end_y = std::to_string(device_ctx.rr_nodes[node->inode].yhigh()) + ")";
if (device_ctx.rr_nodes[node->inode].direction() == BI_DIRECTION) {
start_x = " (" + std::to_string(device_ctx.rr_graph.node_xlow(node->inode)) + ","; //start coordinates have smaller value
start_y = std::to_string(device_ctx.rr_graph.node_ylow(node->inode)) + ")";
end_x = "(" + std::to_string(device_ctx.rr_graph.node_xhigh(node->inode)) + ","; //end coordinates have larger value
end_y = std::to_string(device_ctx.rr_graph.node_yhigh(node->inode)) + ")";
if (device_ctx.rr_graph.node_direction(node->inode) == BI_DIRECTION) {
arrow = "<->"; //indicate that signal can travel both direction
}
}

View File

@ -29,7 +29,7 @@
* associated with that node. The map will be used to store delays while *
* traversing the nodes of the route tree in load_one_net_delay_recurr. */
static std::unordered_map<int, float> inode_to_Tdel_map;
static std::unordered_map<RRNodeId, float> inode_to_Tdel_map;
/*********************** Subroutines local to this module ********************/
@ -107,7 +107,7 @@ static void load_one_net_delay(vtr::vector<ClusterNetId, float*>& net_delay, Clu
}
auto& cluster_ctx = g_vpr_ctx.clustering();
int inode;
RRNodeId inode;
t_rt_node* rt_root = traceback_to_route_tree(net_id); // obtain the root of the tree constructed from the traceback
load_new_subtree_R_upstream(rt_root); // load in the resistance values for the route tree

View File

@ -235,22 +235,22 @@ std::vector<std::string> block_type_class_index_to_pin_names(t_physical_tile_typ
return pin_names;
}
std::string rr_node_arch_name(int inode) {
std::string rr_node_arch_name(const RRNodeId& inode) {
auto& device_ctx = g_vpr_ctx.device();
const t_rr_node& rr_node = device_ctx.rr_nodes[inode];
const RRGraph& rr_graph = device_ctx.rr_graph;
std::string rr_node_arch_name;
if (rr_node.type() == OPIN || rr_node.type() == IPIN) {
if (rr_graph.node_type(inode) == OPIN || rr_graph.node_type(inode) == IPIN) {
//Pin names
auto type = device_ctx.grid[rr_node.xlow()][rr_node.ylow()].type;
rr_node_arch_name += block_type_pin_index_to_name(type, rr_node.ptc_num());
} else if (rr_node.type() == SOURCE || rr_node.type() == SINK) {
auto type = device_ctx.grid[rr_graph.node_xlow(inode)][rr_graph.node_ylow(inode)].type;
rr_node_arch_name += block_type_pin_index_to_name(type, rr_graph.node_ptc_num(inode));
} else if (rr_graph.node_type(inode) == SOURCE || rr_graph.node_type(inode) == SINK) {
//Set of pins associated with SOURCE/SINK
auto type = device_ctx.grid[rr_node.xlow()][rr_node.ylow()].type;
auto pin_names = block_type_class_index_to_pin_names(type, rr_node.ptc_num());
auto type = device_ctx.grid[rr_graph.node_xlow(inode)][rr_graph.node_ylow(inode)].type;
auto pin_names = block_type_class_index_to_pin_names(type, rr_graph.node_ptc_num(inode));
if (pin_names.size() > 1) {
rr_node_arch_name += rr_node.type_string();
rr_node_arch_name += rr_node_typename[rr_graph.node_type(inode)];
rr_node_arch_name += " connected to ";
rr_node_arch_name += "{";
rr_node_arch_name += vtr::join(pin_names, ", ");
@ -259,9 +259,9 @@ std::string rr_node_arch_name(int inode) {
rr_node_arch_name += pin_names[0];
}
} else {
VTR_ASSERT(rr_node.type() == CHANX || rr_node.type() == CHANY);
VTR_ASSERT(rr_graph.node_type(inode) == CHANX || rr_graph.node_type(inode) == CHANY);
//Wire segment name
auto cost_index = rr_node.cost_index();
auto cost_index = rr_graph.node_cost_index(inode);
int seg_index = device_ctx.rr_indexed_data[cost_index].seg_index;
rr_node_arch_name += device_ctx.arch->Segments[seg_index].name;
@ -1951,13 +1951,11 @@ void print_switch_usage() {
switch_fanin_delay = new std::map<int, float>[device_ctx.num_arch_switches];
// a node can have multiple inward switches, so
// map key: switch index; map value: count (fanin)
std::map<int, int>* inward_switch_inf = new std::map<int, int>[device_ctx.rr_nodes.size()];
for (size_t inode = 0; inode < device_ctx.rr_nodes.size(); inode++) {
const t_rr_node& from_node = device_ctx.rr_nodes[inode];
int num_edges = from_node.num_edges();
for (int iedge = 0; iedge < num_edges; iedge++) {
int switch_index = from_node.edge_switch(iedge);
int to_node_index = from_node.edge_sink_node(iedge);
vtr::vector<RRNodeId, std::map<int, int>> inward_switch_inf(device_ctx.rr_graph.nodes().size());
for (const RRNodeId& from_node : device_ctx.rr_graph.nodes()) {
for (const RREdgeId& iedge : device_ctx.rr_graph.node_out_edges(from_node)) {
int switch_index = (int)size_t(device_ctx.rr_graph.edge_switch(iedge));
const RRNodeId& to_node_index = device_ctx.rr_graph.edge_sink_node(iedge);
// Assumption: suppose for a L4 wire (bi-directional): ----+----+----+----, it can be driven from any point (0, 1, 2, 3).
// physically, the switch driving from point 1 & 3 should be the same. But we will assign then different switch
// index; or there is no way to differentiate them after abstracting a 2D wire into a 1D node
@ -1967,7 +1965,7 @@ void print_switch_usage() {
inward_switch_inf[to_node_index][switch_index]++;
}
}
for (size_t inode = 0; inode < device_ctx.rr_nodes.size(); inode++) {
for (const RRNodeId& inode : device_ctx.rr_graph.nodes()) {
std::map<int, int>::iterator itr;
for (itr = inward_switch_inf[inode].begin(); itr != inward_switch_inf[inode].end(); itr++) {
int switch_index = itr->first;
@ -1977,7 +1975,7 @@ void print_switch_usage() {
if (status == -1) {
delete[] switch_fanin_count;
delete[] switch_fanin_delay;
delete[] inward_switch_inf;
inward_switch_inf.clear();
return;
}
if (switch_fanin_count[switch_index].count(fanin) == 0) {
@ -2003,7 +2001,7 @@ void print_switch_usage() {
VTR_LOG("\n==================================================\n\n");
delete[] switch_fanin_count;
delete[] switch_fanin_delay;
delete[] inward_switch_inf;
inward_switch_inf.clear();
}
/*

View File

@ -48,7 +48,7 @@ std::string block_type_pin_index_to_name(t_physical_tile_type_ptr type, int pin_
std::vector<std::string> block_type_class_index_to_pin_names(t_physical_tile_type_ptr type, int class_index);
//Returns a user-friendly architectural identifier for the specified RR node
std::string rr_node_arch_name(int inode);
std::string rr_node_arch_name(const RRNodeId& inode);
/**************************************************************
* Intra-Logic Block Utility Functions