start deploy RRGraph in the placement engine

This commit is contained in:
tangxifan 2020-01-31 19:55:32 -07:00
parent 5e2559dc14
commit 35c136d8a4
7 changed files with 56 additions and 33 deletions

View File

@ -40,8 +40,9 @@
#include "vtr_flat_map.h" #include "vtr_flat_map.h"
#include "vtr_cache.h" #include "vtr_cache.h"
/* Header for rr_graph related definition */ /* Xifan Tang - Header for rr_graph related definition */
#include "rr_graph_types.h" #include "rr_graph_types.h"
#include "rr_graph_obj.h"
/******************************************************************************* /*******************************************************************************
* Global data types and constants * Global data types and constants
@ -1137,6 +1138,10 @@ struct t_trace {
t_trace* next; t_trace* next;
int index; int index;
short iswitch; short iswitch;
/* Xifan Tang - RRGraph unique ids */
RRNodeId node_id;
RRSwitchId switch_id;
}; };
/* Extra information about each rr_node needed only during routing (i.e. * /* Extra information about each rr_node needed only during routing (i.e. *
@ -1160,6 +1165,9 @@ struct t_trace {
* occ: The current occupancy of the associated rr node */ * occ: The current occupancy of the associated rr node */
struct t_rr_node_route_inf { struct t_rr_node_route_inf {
int prev_node; int prev_node;
/* Xifan Tang - prev_node for RRGraph object */
RRNodeId prev_node_id;
t_edge_size prev_edge; t_edge_size prev_edge;
float pres_cost; float pres_cost;

View File

@ -880,10 +880,10 @@ void init_draw_coords(float width_val) {
return; //do not initialize only if --disp off and --save_graphics off return; //do not initialize only if --disp off and --save_graphics off
/* Each time routing is on screen, need to reallocate the color of each * /* Each time routing is on screen, need to reallocate the color of each *
* rr_node, as the number of rr_nodes may change. */ * rr_node, as the number of rr_nodes may change. */
if (device_ctx.rr_nodes.size() != 0) { if (device_ctx.rr_graph.nodes().size() != 0) {
draw_state->draw_rr_node = (t_draw_rr_node*)vtr::realloc(draw_state->draw_rr_node, draw_state->draw_rr_node = (t_draw_rr_node*)vtr::realloc(draw_state->draw_rr_node,
(device_ctx.rr_nodes.size()) * sizeof(t_draw_rr_node)); (device_ctx.rr_graph.nodes().size()) * sizeof(t_draw_rr_node));
for (size_t i = 0; i < device_ctx.rr_nodes.size(); i++) { for (size_t i = 0; i < device_ctx.rr_graph.nodes().size(); i++) {
draw_state->draw_rr_node[i].color = DEFAULT_RR_NODE_COLOR; draw_state->draw_rr_node[i].color = DEFAULT_RR_NODE_COLOR;
draw_state->draw_rr_node[i].node_highlighted = false; draw_state->draw_rr_node[i].node_highlighted = false;
} }

View File

@ -304,16 +304,16 @@ static float route_connection_delay(
for (int driver_ptc : best_driver_ptcs) { for (int driver_ptc : best_driver_ptcs) {
VTR_ASSERT(driver_ptc != OPEN); VTR_ASSERT(driver_ptc != OPEN);
int source_rr_node = get_rr_node_index(device_ctx.rr_node_indices, source_x, source_y, SOURCE, driver_ptc); RRNodeId source_rr_node = device_ctx.rr_graph.find_node(source_x, source_y, SOURCE, driver_ptc);
VTR_ASSERT(source_rr_node != OPEN); VTR_ASSERT(source_rr_node != RRNodeId::INVALID());
for (int sink_ptc : best_sink_ptcs) { for (int sink_ptc : best_sink_ptcs) {
VTR_ASSERT(sink_ptc != OPEN); VTR_ASSERT(sink_ptc != OPEN);
int sink_rr_node = get_rr_node_index(device_ctx.rr_node_indices, sink_x, sink_y, SINK, sink_ptc); RRNodeId sink_rr_node = device_ctx.rr_graph.find_node(sink_x, sink_y, SINK, sink_ptc);
VTR_ASSERT(sink_rr_node != OPEN); VTR_ASSERT(sink_rr_node != RRNodeId::INVALID());
if (!measure_directconnect && directconnect_exists(source_rr_node, sink_rr_node)) { if (!measure_directconnect && directconnect_exists(source_rr_node, sink_rr_node)) {
//Skip if we shouldn't measure direct connects and a direct connect exists //Skip if we shouldn't measure direct connects and a direct connect exists
@ -322,7 +322,7 @@ static float route_connection_delay(
{ {
successfully_routed = route_profiler.calculate_delay( successfully_routed = route_profiler.calculate_delay(
source_rr_node, sink_rr_node, size_t(source_rr_node), size_t(sink_rr_node),
router_opts, router_opts,
&net_delay_value); &net_delay_value);
} }
@ -933,29 +933,29 @@ void OverrideDelayModel::compute_override_delay_model(
} }
} }
bool directconnect_exists(int src_rr_node, int sink_rr_node) { bool directconnect_exists(RRNodeId src_rr_node, RRNodeId sink_rr_node) {
//Returns true if there is a directconnect between the two RR nodes //Returns true if there is a directconnect between the two RR nodes
// //
//This is checked by looking for a SOURCE -> OPIN -> IPIN -> SINK path //This is checked by looking for a SOURCE -> OPIN -> IPIN -> SINK path
//which starts at src_rr_node and ends at sink_rr_node //which starts at src_rr_node and ends at sink_rr_node
auto& device_ctx = g_vpr_ctx.device(); auto& device_ctx = g_vpr_ctx.device();
auto& rr_nodes = device_ctx.rr_nodes; auto& rr_graph = device_ctx.rr_graph;
VTR_ASSERT(rr_nodes[src_rr_node].type() == SOURCE && rr_nodes[sink_rr_node].type() == SINK); VTR_ASSERT(rr_graph.node_type(src_rr_node) == SOURCE && rr_graph.node_type(sink_rr_node) == SINK);
//TODO: This is a constant depth search, but still may be too slow //TODO: This is a constant depth search, but still may be too slow
for (t_edge_size i_src_edge = 0; i_src_edge < rr_nodes[src_rr_node].num_edges(); ++i_src_edge) { for (const RREdgeId& src_edge : rr_graph.node_out_edges(src_rr_node)) {
int opin_rr_node = rr_nodes[src_rr_node].edge_sink_node(i_src_edge); RRNodeId opin_rr_node = rr_graph.edge_sink_node(src_edge);
if (rr_nodes[opin_rr_node].type() != OPIN) continue; if (rr_graph.node_type(opin_rr_node) != OPIN) continue;
for (t_edge_size i_opin_edge = 0; i_opin_edge < rr_nodes[opin_rr_node].num_edges(); ++i_opin_edge) { for (const RREdgeId& opin_edge : rr_graph.node_out_edges(opin_rr_node)) {
int ipin_rr_node = rr_nodes[opin_rr_node].edge_sink_node(i_opin_edge); RRNodeId ipin_rr_node = rr_graph.edge_sink_node(opin_edge);
if (rr_nodes[ipin_rr_node].type() != IPIN) continue; if (rr_graph.node_type(ipin_rr_node) != IPIN) continue;
for (t_edge_size i_ipin_edge = 0; i_ipin_edge < rr_nodes[ipin_rr_node].num_edges(); ++i_ipin_edge) { for (const RREdgeId& ipin_edge : rr_graph.node_out_edges(ipin_rr_node)) {
if (sink_rr_node == rr_nodes[ipin_rr_node].edge_sink_node(i_ipin_edge)) { if (sink_rr_node == rr_graph.edge_sink_node(ipin_edge)) {
return true; return true;
} }
} }

View File

@ -1,6 +1,7 @@
#ifndef TIMING_PLACE_LOOKUP_H #ifndef TIMING_PLACE_LOOKUP_H
#define TIMING_PLACE_LOOKUP_H #define TIMING_PLACE_LOOKUP_H
#include "place_delay_model.h" #include "place_delay_model.h"
#include "rr_graph_obj.h"
std::unique_ptr<PlaceDelayModel> compute_place_delay_model(const t_placer_opts& placer_opts, std::unique_ptr<PlaceDelayModel> compute_place_delay_model(const t_placer_opts& placer_opts,
const t_router_opts& router_opts, const t_router_opts& router_opts,
@ -11,6 +12,6 @@ std::unique_ptr<PlaceDelayModel> compute_place_delay_model(const t_placer_opts&
const int num_directs); const int num_directs);
std::vector<int> get_best_classes(enum e_pin_type pintype, t_physical_tile_type_ptr type); std::vector<int> get_best_classes(enum e_pin_type pintype, t_physical_tile_type_ptr type);
bool directconnect_exists(int src_rr_node, int sink_rr_node); bool directconnect_exists(RRNodeId src_rr_node, RRNodeId sink_rr_node);
#endif #endif

View File

@ -336,7 +336,7 @@ bool feasible_routing() {
auto& device_ctx = g_vpr_ctx.device(); auto& device_ctx = g_vpr_ctx.device();
auto& route_ctx = g_vpr_ctx.routing(); auto& route_ctx = g_vpr_ctx.routing();
for (size_t inode = 0; inode < device_ctx.rr_nodes.size(); inode++) { for (size_t inode = 0; inode < device_ctx.rr_graph.nodes().size(); inode++) {
if (route_ctx.rr_node_route_inf[inode].occ() > device_ctx.rr_nodes[inode].capacity()) { if (route_ctx.rr_node_route_inf[inode].occ() > device_ctx.rr_nodes[inode].capacity()) {
return (false); return (false);
} }
@ -351,7 +351,7 @@ std::vector<int> collect_congested_rr_nodes() {
auto& route_ctx = g_vpr_ctx.routing(); auto& route_ctx = g_vpr_ctx.routing();
std::vector<int> congested_rr_nodes; std::vector<int> congested_rr_nodes;
for (size_t inode = 0; inode < device_ctx.rr_nodes.size(); inode++) { for (size_t inode = 0; inode < device_ctx.rr_graph.nodes().size(); inode++) {
short occ = route_ctx.rr_node_route_inf[inode].occ(); short occ = route_ctx.rr_node_route_inf[inode].occ();
short capacity = device_ctx.rr_nodes[inode].capacity(); short capacity = device_ctx.rr_nodes[inode].capacity();
@ -362,14 +362,14 @@ std::vector<int> collect_congested_rr_nodes() {
return congested_rr_nodes; return congested_rr_nodes;
} }
/* Returns a vector from [0..device_ctx.rr_nodes.size()-1] containing the set /* Returns a vector from [0..device_ctx.rr_graph.nodes().size()-1] containing the set
* of nets using each RR node */ * of nets using each RR node */
std::vector<std::set<ClusterNetId>> collect_rr_node_nets() { std::vector<std::set<ClusterNetId>> collect_rr_node_nets() {
auto& device_ctx = g_vpr_ctx.device(); auto& device_ctx = g_vpr_ctx.device();
auto& route_ctx = g_vpr_ctx.routing(); auto& route_ctx = g_vpr_ctx.routing();
auto& cluster_ctx = g_vpr_ctx.clustering(); auto& cluster_ctx = g_vpr_ctx.clustering();
std::vector<std::set<ClusterNetId>> rr_node_nets(device_ctx.rr_nodes.size()); std::vector<std::set<ClusterNetId>> rr_node_nets(device_ctx.rr_graph.nodes().size());
for (ClusterNetId inet : cluster_ctx.clb_nlist.nets()) { for (ClusterNetId inet : cluster_ctx.clb_nlist.nets()) {
t_trace* trace_elem = route_ctx.trace[inet].head; t_trace* trace_elem = route_ctx.trace[inet].head;
while (trace_elem) { while (trace_elem) {
@ -449,7 +449,7 @@ void pathfinder_update_cost(float pres_fac, float acc_fac) {
auto& device_ctx = g_vpr_ctx.device(); auto& device_ctx = g_vpr_ctx.device();
auto& route_ctx = g_vpr_ctx.mutable_routing(); auto& route_ctx = g_vpr_ctx.mutable_routing();
for (size_t inode = 0; inode < device_ctx.rr_nodes.size(); inode++) { for (size_t inode = 0; inode < device_ctx.rr_graph.nodes().size(); inode++) {
occ = route_ctx.rr_node_route_inf[inode].occ(); occ = route_ctx.rr_node_route_inf[inode].occ();
capacity = device_ctx.rr_nodes[inode].capacity(); capacity = device_ctx.rr_nodes[inode].capacity();
@ -1003,7 +1003,7 @@ void alloc_and_load_rr_node_route_structs() {
auto& route_ctx = g_vpr_ctx.mutable_routing(); auto& route_ctx = g_vpr_ctx.mutable_routing();
auto& device_ctx = g_vpr_ctx.device(); auto& device_ctx = g_vpr_ctx.device();
route_ctx.rr_node_route_inf.resize(device_ctx.rr_nodes.size()); route_ctx.rr_node_route_inf.resize(device_ctx.rr_graph.nodes().size());
reset_rr_node_route_structs(); reset_rr_node_route_structs();
} }
@ -1014,10 +1014,11 @@ void reset_rr_node_route_structs() {
auto& route_ctx = g_vpr_ctx.mutable_routing(); auto& route_ctx = g_vpr_ctx.mutable_routing();
auto& device_ctx = g_vpr_ctx.device(); auto& device_ctx = g_vpr_ctx.device();
VTR_ASSERT(route_ctx.rr_node_route_inf.size() == size_t(device_ctx.rr_nodes.size())); VTR_ASSERT(route_ctx.rr_node_route_inf.size() == size_t(device_ctx.rr_graph.nodes().size()));
for (size_t inode = 0; inode < device_ctx.rr_nodes.size(); inode++) { for (size_t inode = 0; inode < device_ctx.rr_graph.nodes().size(); inode++) {
route_ctx.rr_node_route_inf[inode].prev_node = NO_PREVIOUS; route_ctx.rr_node_route_inf[inode].prev_node = NO_PREVIOUS;
route_ctx.rr_node_route_inf[inode].prev_node_id = RRNodeId::INVALID();
route_ctx.rr_node_route_inf[inode].prev_edge = NO_PREVIOUS; route_ctx.rr_node_route_inf[inode].prev_edge = NO_PREVIOUS;
route_ctx.rr_node_route_inf[inode].pres_cost = 1.0; route_ctx.rr_node_route_inf[inode].pres_cost = 1.0;
route_ctx.rr_node_route_inf[inode].acc_cost = 1.0; route_ctx.rr_node_route_inf[inode].acc_cost = 1.0;
@ -1826,7 +1827,7 @@ void print_invalid_routing_info() {
} }
} }
for (size_t inode = 0; inode < device_ctx.rr_nodes.size(); inode++) { for (size_t inode = 0; inode < device_ctx.rr_graph.nodes().size(); inode++) {
int occ = route_ctx.rr_node_route_inf[inode].occ(); int occ = route_ctx.rr_node_route_inf[inode].occ();
int cap = device_ctx.rr_nodes[inode].capacity(); int cap = device_ctx.rr_nodes[inode].capacity();
if (occ > cap) { if (occ > cap) {

View File

@ -27,7 +27,7 @@
/* Array below allows mapping from any rr_node to any rt_node currently in /* Array below allows mapping from any rr_node to any rt_node currently in
* the rt_tree. */ * the rt_tree. */
static std::vector<t_rt_node*> rr_node_to_rt_node; /* [0..device_ctx.rr_nodes.size()-1] */ static std::vector<t_rt_node*> rr_node_to_rt_node; /* [0..device_ctx.rr_graph.nodes().size()-1] */
/* Frees lists for fast addition and deletion of nodes and edges. */ /* Frees lists for fast addition and deletion of nodes and edges. */
@ -73,7 +73,7 @@ bool alloc_route_tree_timing_structs(bool exists_ok) {
auto& device_ctx = g_vpr_ctx.device(); auto& device_ctx = g_vpr_ctx.device();
bool route_tree_structs_are_allocated = (rr_node_to_rt_node.size() == size_t(device_ctx.rr_nodes.size()) bool route_tree_structs_are_allocated = (rr_node_to_rt_node.size() == size_t(device_ctx.rr_graph.nodes().size())
|| rt_node_free_list != nullptr); || rt_node_free_list != nullptr);
if (route_tree_structs_are_allocated) { if (route_tree_structs_are_allocated) {
if (exists_ok) { if (exists_ok) {
@ -84,7 +84,7 @@ bool alloc_route_tree_timing_structs(bool exists_ok) {
} }
} }
rr_node_to_rt_node = std::vector<t_rt_node*>(device_ctx.rr_nodes.size(), nullptr); rr_node_to_rt_node = std::vector<t_rt_node*>(device_ctx.rr_graph.nodes().size(), nullptr);
return true; return true;
} }

View File

@ -1,4 +1,5 @@
#pragma once #pragma once
#include "rr_graph_obj.h"
/************** Types and defines exported by route_tree_timing.c ************/ /************** Types and defines exported by route_tree_timing.c ************/
struct t_rt_node; struct t_rt_node;
@ -11,6 +12,10 @@ struct t_rt_node;
struct t_linked_rt_edge { struct t_linked_rt_edge {
t_rt_node* child; t_rt_node* child;
short iswitch; short iswitch;
/* Xifan Tang - RRGraph switch*/
RRSwitchId iswitch_id;
t_linked_rt_edge* next; t_linked_rt_edge* next;
}; };
@ -41,8 +46,16 @@ struct t_rt_node {
} u; } u;
t_rt_node* parent_node; t_rt_node* parent_node;
short parent_switch; short parent_switch;
/* Xifan Tang - RRGraph switch*/
RRSwitchId parent_switch_id;
bool re_expand; bool re_expand;
int inode; int inode;
/* Xifan Tang - RRGraph node */
RRNodeId inode_id;
float C_downstream; float C_downstream;
float R_upstream; float R_upstream;
float Tdel; float Tdel;