Merge branch 'refactoring' into dev
This commit is contained in:
commit
91338994c8
|
@ -69,7 +69,7 @@ std::string PbTypeAnnotation::circuit_model_name() const {
|
|||
return circuit_model_name_;
|
||||
}
|
||||
|
||||
int PbTypeAnnotation::physical_pb_type_index_factor() const {
|
||||
float PbTypeAnnotation::physical_pb_type_index_factor() const {
|
||||
return physical_pb_type_index_factor_;
|
||||
}
|
||||
|
||||
|
@ -164,7 +164,7 @@ void PbTypeAnnotation::set_circuit_model_name(const std::string& name) {
|
|||
circuit_model_name_ = name;
|
||||
}
|
||||
|
||||
void PbTypeAnnotation::set_physical_pb_type_index_factor(const int& value) {
|
||||
void PbTypeAnnotation::set_physical_pb_type_index_factor(const float& value) {
|
||||
VTR_ASSERT(true == is_operating_pb_type());
|
||||
physical_pb_type_index_factor_ = value;
|
||||
}
|
||||
|
|
|
@ -45,7 +45,7 @@ class PbTypeAnnotation {
|
|||
std::string idle_mode_name() const;
|
||||
std::vector<size_t> mode_bits() const;
|
||||
std::string circuit_model_name() const;
|
||||
int physical_pb_type_index_factor() const;
|
||||
float physical_pb_type_index_factor() const;
|
||||
int physical_pb_type_index_offset() const;
|
||||
std::vector<std::string> port_names() const;
|
||||
BasicPort physical_pb_type_port(const std::string& port_name) const;
|
||||
|
@ -63,7 +63,7 @@ class PbTypeAnnotation {
|
|||
void set_idle_mode_name(const std::string& name);
|
||||
void set_mode_bits(const std::vector<size_t>& mode_bits);
|
||||
void set_circuit_model_name(const std::string& name);
|
||||
void set_physical_pb_type_index_factor(const int& value);
|
||||
void set_physical_pb_type_index_factor(const float& value);
|
||||
void set_physical_pb_type_index_offset(const int& value);
|
||||
void add_pb_type_port_pair(const std::string& operating_pb_port_name,
|
||||
const BasicPort& physical_pb_port);
|
||||
|
@ -119,7 +119,7 @@ class PbTypeAnnotation {
|
|||
* operating pb_type adder[5] with a full path clb.fle[arith].adder[5]
|
||||
* to physical pb_type adder[10] with a full path clb.fle[physical].adder[10]
|
||||
*/
|
||||
int physical_pb_type_index_factor_;
|
||||
float physical_pb_type_index_factor_;
|
||||
|
||||
/* The offset aims to align the indices for pb_type between operating and physical modes,
|
||||
* especially when an operating mode contains multiple pb_type (num_pb>1)
|
||||
|
|
|
@ -156,7 +156,7 @@ void read_xml_pb_type_annotation(pugi::xml_node& xml_pb_type,
|
|||
|
||||
/* If this is an operating pb_type, index factor and offset may be optional needed */
|
||||
if (true == pb_type_annotation.is_operating_pb_type()) {
|
||||
pb_type_annotation.set_physical_pb_type_index_factor(get_attribute(xml_pb_type, "physical_pb_type_index_factor", loc_data, pugiutil::ReqOpt::OPTIONAL).as_int(1));
|
||||
pb_type_annotation.set_physical_pb_type_index_factor(get_attribute(xml_pb_type, "physical_pb_type_index_factor", loc_data, pugiutil::ReqOpt::OPTIONAL).as_float(1.));
|
||||
pb_type_annotation.set_physical_pb_type_index_offset(get_attribute(xml_pb_type, "physical_pb_type_index_offset", loc_data, pugiutil::ReqOpt::OPTIONAL).as_int(0));
|
||||
}
|
||||
|
||||
|
|
|
@ -405,6 +405,7 @@ void rec_build_vpr_physical_pb_graph_node_annotation(t_pb_graph_node* pb_graph_n
|
|||
* (size_t)vpr_device_annotation.pb_graph_node_unique_index(pb_graph_node)
|
||||
+ vpr_device_annotation.physical_pb_type_index_offset(pb_graph_node->pb_type)
|
||||
);
|
||||
|
||||
t_pb_graph_node* physical_pb_graph_node = vpr_device_annotation.pb_graph_node(physical_pb_type, physical_pb_graph_node_id);
|
||||
VTR_ASSERT(nullptr != physical_pb_graph_node);
|
||||
vpr_device_annotation.add_physical_pb_graph_node(pb_graph_node, physical_pb_graph_node);
|
||||
|
|
|
@ -41,6 +41,14 @@ AtomNetlist::TruthTable VprClusteringAnnotation::truth_table(t_pb* pb) const {
|
|||
return block_truth_tables_.at(pb);
|
||||
}
|
||||
|
||||
PhysicalPb VprClusteringAnnotation::physical_pb(const ClusterBlockId& block_id) const {
|
||||
if (physical_pbs_.end() == physical_pbs_.find(block_id)) {
|
||||
return PhysicalPb();
|
||||
}
|
||||
|
||||
return physical_pbs_.at(block_id);
|
||||
}
|
||||
|
||||
/************************************************************************
|
||||
* Public mutators
|
||||
***********************************************************************/
|
||||
|
@ -67,4 +75,16 @@ void VprClusteringAnnotation::adapt_truth_table(t_pb* pb,
|
|||
block_truth_tables_[pb] = tt;
|
||||
}
|
||||
|
||||
void VprClusteringAnnotation::add_physical_pb(const ClusterBlockId& block_id,
|
||||
const PhysicalPb& physical_pb) {
|
||||
/* Warn any override attempt */
|
||||
if (physical_pbs_.end() != physical_pbs_.find(block_id)) {
|
||||
VTR_LOG_WARN("Override the physical pb for clustered block %lu in clustering context annotation!\n",
|
||||
size_t(block_id));
|
||||
}
|
||||
|
||||
physical_pbs_[block_id] = physical_pb;
|
||||
}
|
||||
|
||||
|
||||
} /* End namespace openfpga*/
|
||||
|
|
|
@ -9,6 +9,8 @@
|
|||
/* Header from vpr library */
|
||||
#include "clustered_netlist.h"
|
||||
|
||||
#include "physical_pb.h"
|
||||
|
||||
/* Begin namespace openfpga */
|
||||
namespace openfpga {
|
||||
|
||||
|
@ -33,14 +35,19 @@ class VprClusteringAnnotation {
|
|||
ClusterNetId net(const ClusterBlockId& block_id, const int& pin_index) const;
|
||||
bool is_truth_table_adapted(t_pb* pb) const;
|
||||
AtomNetlist::TruthTable truth_table(t_pb* pb) const;
|
||||
PhysicalPb physical_pb(const ClusterBlockId& block_id) const;
|
||||
public: /* Public mutators */
|
||||
void rename_net(const ClusterBlockId& block_id, const int& pin_index,
|
||||
const ClusterNetId& net_id);
|
||||
void adapt_truth_table(t_pb* pb, const AtomNetlist::TruthTable& tt);
|
||||
void add_physical_pb(const ClusterBlockId& block_id, const PhysicalPb& physical_pb);
|
||||
private: /* Internal data */
|
||||
/* Pair a regular pb_type to its physical pb_type */
|
||||
std::map<ClusterBlockId, std::map<int, ClusterNetId>> net_names_;
|
||||
std::map<t_pb*, AtomNetlist::TruthTable> block_truth_tables_;
|
||||
|
||||
/* Link clustered blocks to physical pb (mapping results) */
|
||||
std::map<ClusterBlockId, PhysicalPb> physical_pbs_;
|
||||
};
|
||||
|
||||
} /* End namespace openfpga*/
|
||||
|
|
|
@ -165,12 +165,12 @@ t_pb_graph_node* VprDeviceAnnotation::physical_pb_graph_node(t_pb_graph_node* pb
|
|||
return physical_pb_graph_nodes_.at(pb_graph_node);
|
||||
}
|
||||
|
||||
int VprDeviceAnnotation::physical_pb_type_index_factor(t_pb_type* pb_type) const {
|
||||
float VprDeviceAnnotation::physical_pb_type_index_factor(t_pb_type* pb_type) const {
|
||||
/* Ensure that the pb_type is in the list */
|
||||
std::map<t_pb_type*, int>::const_iterator it = physical_pb_type_index_factors_.find(pb_type);
|
||||
std::map<t_pb_type*, float>::const_iterator it = physical_pb_type_index_factors_.find(pb_type);
|
||||
if (it == physical_pb_type_index_factors_.end()) {
|
||||
/* Default value is 1 */
|
||||
return 1;
|
||||
return 1.;
|
||||
}
|
||||
return physical_pb_type_index_factors_.at(pb_type);
|
||||
}
|
||||
|
@ -206,9 +206,9 @@ int VprDeviceAnnotation::physical_pb_pin_offset(t_port* pb_port) const {
|
|||
}
|
||||
|
||||
|
||||
t_pb_graph_pin* VprDeviceAnnotation::physical_pb_graph_pin(t_pb_graph_pin* pb_graph_pin) const {
|
||||
t_pb_graph_pin* VprDeviceAnnotation::physical_pb_graph_pin(const t_pb_graph_pin* pb_graph_pin) const {
|
||||
/* Ensure that the pb_type is in the list */
|
||||
std::map<t_pb_graph_pin*, t_pb_graph_pin*>::const_iterator it = physical_pb_graph_pins_.find(pb_graph_pin);
|
||||
std::map<const t_pb_graph_pin*, t_pb_graph_pin*>::const_iterator it = physical_pb_graph_pins_.find(pb_graph_pin);
|
||||
if (it == physical_pb_graph_pins_.end()) {
|
||||
return nullptr;
|
||||
}
|
||||
|
@ -374,11 +374,11 @@ void VprDeviceAnnotation::add_physical_pb_graph_node(t_pb_graph_node* operating_
|
|||
physical_pb_graph_nodes_[operating_pb_graph_node] = physical_pb_graph_node;
|
||||
}
|
||||
|
||||
void VprDeviceAnnotation::add_physical_pb_type_index_factor(t_pb_type* pb_type, const int& factor) {
|
||||
void VprDeviceAnnotation::add_physical_pb_type_index_factor(t_pb_type* pb_type, const float& factor) {
|
||||
/* Warn any override attempt */
|
||||
std::map<t_pb_type*, int>::const_iterator it = physical_pb_type_index_factors_.find(pb_type);
|
||||
std::map<t_pb_type*, float>::const_iterator it = physical_pb_type_index_factors_.find(pb_type);
|
||||
if (it != physical_pb_type_index_factors_.end()) {
|
||||
VTR_LOG_WARN("Override the annotation between operating pb_type '%s' and it physical pb_type index factor '%d'!\n",
|
||||
VTR_LOG_WARN("Override the annotation between operating pb_type '%s' and it physical pb_type index factor '%f'!\n",
|
||||
pb_type->name, factor);
|
||||
}
|
||||
|
||||
|
@ -409,10 +409,10 @@ void VprDeviceAnnotation::add_physical_pb_pin_rotate_offset(t_port* pb_port, con
|
|||
physical_pb_pin_offsets_[pb_port] = 0;
|
||||
}
|
||||
|
||||
void VprDeviceAnnotation::add_physical_pb_graph_pin(t_pb_graph_pin* operating_pb_graph_pin,
|
||||
void VprDeviceAnnotation::add_physical_pb_graph_pin(const t_pb_graph_pin* operating_pb_graph_pin,
|
||||
t_pb_graph_pin* physical_pb_graph_pin) {
|
||||
/* Warn any override attempt */
|
||||
std::map<t_pb_graph_pin*, t_pb_graph_pin*>::const_iterator it = physical_pb_graph_pins_.find(operating_pb_graph_pin);
|
||||
std::map<const t_pb_graph_pin*, t_pb_graph_pin*>::const_iterator it = physical_pb_graph_pins_.find(operating_pb_graph_pin);
|
||||
if (it != physical_pb_graph_pins_.end()) {
|
||||
VTR_LOG_WARN("Override the annotation between operating pb_graph_pin '%s' and it physical pb_graph_pin '%s'!\n",
|
||||
operating_pb_graph_pin->port->name, physical_pb_graph_pin->port->name);
|
||||
|
|
|
@ -57,7 +57,7 @@ class VprDeviceAnnotation {
|
|||
/* Get the pointer to a pb_graph node using an unique index */
|
||||
t_pb_graph_node* pb_graph_node(t_pb_type* pb_type, const PbGraphNodeId& unique_index) const;
|
||||
t_pb_graph_node* physical_pb_graph_node(t_pb_graph_node* pb_graph_node) const;
|
||||
int physical_pb_type_index_factor(t_pb_type* pb_type) const;
|
||||
float physical_pb_type_index_factor(t_pb_type* pb_type) const;
|
||||
int physical_pb_type_index_offset(t_pb_type* pb_type) const;
|
||||
|
||||
int physical_pb_pin_rotate_offset(t_port* pb_port) const;
|
||||
|
@ -70,7 +70,7 @@ class VprDeviceAnnotation {
|
|||
* The accumulated offset will be reset to 0 when it exceeds the msb() of the physical port
|
||||
*/
|
||||
int physical_pb_pin_offset(t_port* pb_port) const;
|
||||
t_pb_graph_pin* physical_pb_graph_pin(t_pb_graph_pin* pb_graph_pin) const;
|
||||
t_pb_graph_pin* physical_pb_graph_pin(const t_pb_graph_pin* pb_graph_pin) const;
|
||||
CircuitModelId rr_switch_circuit_model(const RRSwitchId& rr_switch) const;
|
||||
CircuitModelId rr_segment_circuit_model(const RRSegmentId& rr_segment) const;
|
||||
ArchDirectId direct_annotation(const size_t& direct) const;
|
||||
|
@ -88,10 +88,10 @@ class VprDeviceAnnotation {
|
|||
void add_pb_graph_node_unique_index(t_pb_graph_node* pb_graph_node);
|
||||
void add_physical_pb_graph_node(t_pb_graph_node* operating_pb_graph_node,
|
||||
t_pb_graph_node* physical_pb_graph_node);
|
||||
void add_physical_pb_type_index_factor(t_pb_type* pb_type, const int& factor);
|
||||
void add_physical_pb_type_index_factor(t_pb_type* pb_type, const float& factor);
|
||||
void add_physical_pb_type_index_offset(t_pb_type* pb_type, const int& offset);
|
||||
void add_physical_pb_pin_rotate_offset(t_port* pb_port, const int& offset);
|
||||
void add_physical_pb_graph_pin(t_pb_graph_pin* operating_pb_graph_pin, t_pb_graph_pin* physical_pb_graph_pin);
|
||||
void add_physical_pb_graph_pin(const t_pb_graph_pin* operating_pb_graph_pin, t_pb_graph_pin* physical_pb_graph_pin);
|
||||
void add_rr_switch_circuit_model(const RRSwitchId& rr_switch, const CircuitModelId& circuit_model);
|
||||
void add_rr_segment_circuit_model(const RRSegmentId& rr_segment, const CircuitModelId& circuit_model);
|
||||
void add_direct_annotation(const size_t& direct, const ArchDirectId& arch_direct_id);
|
||||
|
@ -99,7 +99,7 @@ class VprDeviceAnnotation {
|
|||
private: /* Internal data */
|
||||
/* Pair a regular pb_type to its physical pb_type */
|
||||
std::map<t_pb_type*, t_pb_type*> physical_pb_types_;
|
||||
std::map<t_pb_type*, int> physical_pb_type_index_factors_;
|
||||
std::map<t_pb_type*, float> physical_pb_type_index_factors_;
|
||||
std::map<t_pb_type*, int> physical_pb_type_index_offsets_;
|
||||
|
||||
/* Pair a physical mode for a pb_type
|
||||
|
@ -169,7 +169,7 @@ class VprDeviceAnnotation {
|
|||
std::map<t_pb_graph_node*, t_pb_graph_node*> physical_pb_graph_nodes_;
|
||||
|
||||
/* Pair a pb_graph_pin to a physical pb_graph_pin */
|
||||
std::map<t_pb_graph_pin*, t_pb_graph_pin*> physical_pb_graph_pins_;
|
||||
std::map<const t_pb_graph_pin*, t_pb_graph_pin*> physical_pb_graph_pins_;
|
||||
|
||||
/* Pair a Routing Resource Switch (rr_switch) to a circuit model */
|
||||
std::map<RRSwitchId, CircuitModelId> rr_switch_circuit_models_;
|
||||
|
|
|
@ -15,7 +15,6 @@
|
|||
#include "annotate_rr_graph.h"
|
||||
#include "mux_library_builder.h"
|
||||
#include "build_tile_direct.h"
|
||||
#include "build_physical_lb_rr_graph.h"
|
||||
#include "openfpga_link_arch.h"
|
||||
|
||||
/* Include global variables of VPR */
|
||||
|
@ -112,11 +111,6 @@ void link_arch(OpenfpgaContext& openfpga_ctx,
|
|||
/* Build tile direct annotation */
|
||||
openfpga_ctx.mutable_tile_direct() = build_device_tile_direct(g_vpr_ctx.device(),
|
||||
openfpga_ctx.arch().arch_direct);
|
||||
|
||||
|
||||
build_physical_lb_rr_graphs(g_vpr_ctx.device(),
|
||||
openfpga_ctx.mutable_vpr_device_annotation(),
|
||||
cmd_context.option_enable(cmd, opt_verbose));
|
||||
}
|
||||
|
||||
} /* end namespace openfpga */
|
||||
|
|
|
@ -60,14 +60,19 @@ void update_cluster_pin_with_post_routing_results(const DeviceContext& device_ct
|
|||
const vtr::Point<size_t>& grid_coord,
|
||||
const ClusterBlockId& blk_id,
|
||||
const e_side& border_side,
|
||||
const size_t& z,
|
||||
const bool& verbose) {
|
||||
/* Handle each pin */
|
||||
auto logical_block = clustering_ctx.clb_nlist.block_type(blk_id);
|
||||
auto physical_tile = pick_best_physical_type(logical_block);
|
||||
|
||||
for (int j = 0; j < logical_block->pb_type->num_pins; j++) {
|
||||
/* Get the ptc num for the pin in rr_graph */
|
||||
int physical_pin = get_physical_pin(physical_tile, logical_block, j);
|
||||
/* Get the ptc num for the pin in rr_graph, we need t consider the z offset here
|
||||
* z offset is the location in the multiple-logic-tile tile
|
||||
* Get physical pin does not consider THIS!!!!
|
||||
*/
|
||||
int physical_pin = z * logical_block->pb_type->num_pins
|
||||
+ get_physical_pin(physical_tile, logical_block, j);
|
||||
auto pin_class = physical_tile->pin_class[physical_pin];
|
||||
auto class_inf = physical_tile->class_inf[pin_class];
|
||||
|
||||
|
@ -122,7 +127,7 @@ void update_cluster_pin_with_post_routing_results(const DeviceContext& device_ct
|
|||
|
||||
/* If matched, we finish here */
|
||||
if (routing_net_id == cluster_net_id) {
|
||||
continue;
|
||||
continue;
|
||||
}
|
||||
/* Add to net modification */
|
||||
vpr_clustering_annotation.rename_net(blk_id, j, routing_net_id);
|
||||
|
@ -179,8 +184,10 @@ void update_pb_pin_with_post_routing_results(const DeviceContext& device_ctx,
|
|||
/* We know the entrance to grid info and mapping results, do the fix-up for this block */
|
||||
vtr::Point<size_t> grid_coord(x, y);
|
||||
update_cluster_pin_with_post_routing_results(device_ctx, clustering_ctx,
|
||||
vpr_routing_annotation, vpr_clustering_annotation,
|
||||
vpr_routing_annotation,
|
||||
vpr_clustering_annotation,
|
||||
grid_coord, cluster_blk_id, NUM_SIDES,
|
||||
placement_ctx.block_locs[cluster_blk_id].loc.z,
|
||||
verbose);
|
||||
}
|
||||
}
|
||||
|
@ -227,8 +234,10 @@ void update_pb_pin_with_post_routing_results(const DeviceContext& device_ctx,
|
|||
}
|
||||
/* Update on I/O grid */
|
||||
update_cluster_pin_with_post_routing_results(device_ctx, clustering_ctx,
|
||||
vpr_routing_annotation, vpr_clustering_annotation,
|
||||
vpr_routing_annotation,
|
||||
vpr_clustering_annotation,
|
||||
io_coord, cluster_blk_id, io_side,
|
||||
placement_ctx.block_locs[cluster_blk_id].loc.z,
|
||||
verbose);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -24,9 +24,10 @@ void repack(OpenfpgaContext& openfpga_ctx,
|
|||
CommandOptionId opt_verbose = cmd.option("verbose");
|
||||
|
||||
pack_physical_pbs(g_vpr_ctx.device(),
|
||||
openfpga_ctx.vpr_device_annotation(),
|
||||
g_vpr_ctx.atom(),
|
||||
g_vpr_ctx.clustering(),
|
||||
openfpga_ctx.mutable_vpr_device_annotation(),
|
||||
openfpga_ctx.mutable_vpr_clustering_annotation(),
|
||||
openfpga_ctx.vpr_routing_annotation(),
|
||||
cmd_context.option_enable(cmd, opt_verbose));
|
||||
}
|
||||
|
||||
|
|
|
@ -1053,7 +1053,7 @@ void build_physical_tile_module(ModuleManager& module_manager,
|
|||
sram_orgz_type, circuit_lib.design_tech_type(sram_model));
|
||||
}
|
||||
|
||||
VTR_LOG("Done\n");
|
||||
VTR_LOGV(verbose, "Done\n");
|
||||
}
|
||||
|
||||
/*****************************************************************************
|
||||
|
|
|
@ -10,6 +10,7 @@
|
|||
#include "pb_type_utils.h"
|
||||
|
||||
#include "build_physical_lb_rr_graph.h"
|
||||
#include "check_lb_rr_graph.h"
|
||||
|
||||
/* begin namespace openfpga */
|
||||
namespace openfpga {
|
||||
|
@ -395,6 +396,16 @@ void build_physical_lb_rr_graphs(const DeviceContext& device_ctx,
|
|||
lb_type.pb_graph_head->pb_type->name);
|
||||
|
||||
const LbRRGraph& lb_rr_graph = build_lb_type_physical_lb_rr_graph(lb_type.pb_graph_head, const_cast<const VprDeviceAnnotation&>(device_annotation), verbose);
|
||||
/* Check the rr_graph */
|
||||
if (false == lb_rr_graph.validate()) {
|
||||
exit(1);
|
||||
}
|
||||
if (false == check_lb_rr_graph(lb_rr_graph)) {
|
||||
exit(1);
|
||||
}
|
||||
VTR_LOGV(verbose,
|
||||
"Check routing resource graph for logical tile passed\n");
|
||||
|
||||
device_annotation.add_physical_lb_rr_graph(lb_type.pb_graph_head, lb_rr_graph);
|
||||
}
|
||||
|
||||
|
|
|
@ -0,0 +1,206 @@
|
|||
#include <map>
|
||||
|
||||
#include "vtr_log.h"
|
||||
|
||||
#include "lb_rr_graph_utils.h"
|
||||
#include "check_lb_rr_graph.h"
|
||||
|
||||
/* begin namespace openfpga */
|
||||
namespace openfpga {
|
||||
|
||||
/***********************************************************************
|
||||
* This function aims at checking any duplicated edges (with same EdgeId)
|
||||
* of a given node.
|
||||
* We will walkthrough the input edges of a node and see if there is any duplication
|
||||
**********************************************************************/
|
||||
static bool check_lb_rr_graph_node_duplicated_edges(const LbRRGraph& lb_rr_graph,
|
||||
const LbRRNodeId& node) {
|
||||
bool no_duplication = true;
|
||||
|
||||
/* Create a map for each input edge */
|
||||
std::map<const t_mode*, std::map<LbRREdgeId, size_t>> edge_counter;
|
||||
|
||||
/* Check each input edges */
|
||||
for (const auto edge : lb_rr_graph.node_in_edges(node)) {
|
||||
if (nullptr == lb_rr_graph.edge_mode(edge)) {
|
||||
continue;
|
||||
}
|
||||
auto result = edge_counter[lb_rr_graph.edge_mode(edge)].insert(std::pair<LbRREdgeId, size_t>(edge, 1));
|
||||
if (false == result.second) {
|
||||
result.first->second++;
|
||||
}
|
||||
}
|
||||
|
||||
for (auto& edge_mode : edge_counter) {
|
||||
for (auto& elem : edge_mode.second) {
|
||||
if (elem.second > 1) {
|
||||
/* Reach here it means we find some duplicated edges and report errors */
|
||||
/* Print a warning! */
|
||||
VTR_LOG_WARN("Node %d has duplicated input edges (id = %d)!\n",
|
||||
size_t(node), size_t(elem.first));
|
||||
print_lb_rr_node(lb_rr_graph, node);
|
||||
no_duplication = false;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return no_duplication;
|
||||
}
|
||||
|
||||
/***********************************************************************
|
||||
* Check the whole Routing Resource Graph
|
||||
* identify and report any duplicated edges between two nodes
|
||||
**********************************************************************/
|
||||
static bool check_lb_rr_graph_duplicated_edges(const LbRRGraph& lb_rr_graph) {
|
||||
bool no_duplication = true;
|
||||
/* For each node:
|
||||
* Search input edges, see there are two edges with same id or address
|
||||
*/
|
||||
for (const auto& node : lb_rr_graph.nodes()) {
|
||||
if (false == check_lb_rr_graph_node_duplicated_edges(lb_rr_graph, node)) {
|
||||
no_duplication = false;
|
||||
}
|
||||
}
|
||||
|
||||
return no_duplication;
|
||||
}
|
||||
|
||||
/***********************************************************************
|
||||
* Identify and report any dangling node (nodes without any fan-in or fan-out)
|
||||
* in the LbRRGraph
|
||||
**********************************************************************/
|
||||
static bool check_lb_rr_graph_dangling_nodes(const LbRRGraph& lb_rr_graph) {
|
||||
bool no_dangling = true;
|
||||
/* For each node:
|
||||
* check if the number of input edges and output edges are both 0
|
||||
* If so, this is a dangling nodes and report
|
||||
*/
|
||||
for (auto node : lb_rr_graph.nodes()) {
|
||||
if ((0 == lb_rr_graph.node_in_edges(node).size())
|
||||
&& (0 == lb_rr_graph.node_out_edges(node).size())) {
|
||||
/* Print a warning! */
|
||||
VTR_LOG_WARN("Node %s is dangling (zero fan-in and zero fan-out)!\n",
|
||||
node);
|
||||
VTR_LOG_WARN("Node details for debugging:\n");
|
||||
print_lb_rr_node(lb_rr_graph, node);
|
||||
no_dangling = false;
|
||||
}
|
||||
}
|
||||
|
||||
return no_dangling;
|
||||
}
|
||||
|
||||
/***********************************************************************
|
||||
* check if all the source nodes are in the right condition:
|
||||
* 1. zero fan-in and non-zero fanout
|
||||
**********************************************************************/
|
||||
static bool check_lb_rr_graph_source_nodes(const LbRRGraph& lb_rr_graph) {
|
||||
bool invalid_sources = false;
|
||||
/* For each node:
|
||||
* check if the number of input edges and output edges are both 0
|
||||
* If so, this is a dangling nodes and report
|
||||
*/
|
||||
for (auto node : lb_rr_graph.nodes()) {
|
||||
/* Pass nodes whose types are not LB_SOURCE */
|
||||
if (LB_SOURCE != lb_rr_graph.node_type(node)) {
|
||||
continue;
|
||||
}
|
||||
if ((0 != lb_rr_graph.node_in_edges(node).size())
|
||||
|| (0 == lb_rr_graph.node_out_edges(node).size())) {
|
||||
/* Print a warning! */
|
||||
VTR_LOG_WARN("Source node %d is invalid (should have zero fan-in and non-zero fan-out)!\n",
|
||||
size_t(node));
|
||||
VTR_LOG_WARN("Node details for debugging:\n");
|
||||
print_lb_rr_node(lb_rr_graph, node);
|
||||
invalid_sources = true;
|
||||
}
|
||||
}
|
||||
|
||||
return !invalid_sources;
|
||||
}
|
||||
|
||||
/***********************************************************************
|
||||
* check if all the sink nodes are in the right condition:
|
||||
* 1. non-zero fan-in and zero fanout
|
||||
**********************************************************************/
|
||||
static bool check_lb_rr_graph_sink_nodes(const LbRRGraph& lb_rr_graph) {
|
||||
bool invalid_sinks = false;
|
||||
/* For each node:
|
||||
* check if the number of input edges and output edges are both 0
|
||||
* If so, this is a dangling nodes and report
|
||||
*/
|
||||
for (auto node : lb_rr_graph.nodes()) {
|
||||
/* Pass nodes whose types are not LB_SINK */
|
||||
if (LB_SINK != lb_rr_graph.node_type(node)) {
|
||||
continue;
|
||||
}
|
||||
if ((0 == lb_rr_graph.node_in_edges(node).size())
|
||||
|| (0 != lb_rr_graph.node_out_edges(node).size())) {
|
||||
/* Print a warning! */
|
||||
VTR_LOG_WARN("Sink node %s is invalid (should have non-zero fan-in and zero fan-out)!\n",
|
||||
node);
|
||||
VTR_LOG_WARN("Node details for debugging:\n");
|
||||
print_lb_rr_node(lb_rr_graph, node);
|
||||
invalid_sinks = true;
|
||||
}
|
||||
}
|
||||
|
||||
return !invalid_sinks;
|
||||
}
|
||||
|
||||
/***********************************************************************
|
||||
* This is an advanced checker for LbRRGraph object
|
||||
* Note that the checker try to report as many problems as it can.
|
||||
* The problems may cause routing efficiency or even failures, depending
|
||||
* on routing algorithms.
|
||||
* It is strongly suggested to run this sanitizer before conducting
|
||||
* routing algorithms
|
||||
*
|
||||
* For those who will develop customized lb_rr_graphs and routers:
|
||||
* On the other hand, it is suggested that developers to create their
|
||||
* own checking function for the lb_rr_graph, to guarantee their routers
|
||||
* will work properly.
|
||||
**********************************************************************/
|
||||
bool check_lb_rr_graph(const LbRRGraph& lb_rr_graph) {
|
||||
size_t num_err = 0;
|
||||
|
||||
if (false == check_lb_rr_graph_duplicated_edges(lb_rr_graph)) {
|
||||
VTR_LOG_WARN("Fail in checking duplicated edges !\n");
|
||||
num_err++;
|
||||
}
|
||||
|
||||
if (false == check_lb_rr_graph_dangling_nodes(lb_rr_graph)) {
|
||||
VTR_LOG_WARN("Fail in checking dangling nodes !\n");
|
||||
num_err++;
|
||||
}
|
||||
|
||||
if (false == check_lb_rr_graph_source_nodes(lb_rr_graph)) {
|
||||
VTR_LOG_WARN("Fail in checking source nodes!\n");
|
||||
num_err++;
|
||||
}
|
||||
|
||||
if (false == check_lb_rr_graph_sink_nodes(lb_rr_graph)) {
|
||||
VTR_LOG_WARN("Fail in checking sink nodes!\n");
|
||||
num_err++;
|
||||
}
|
||||
|
||||
if (false == check_lb_rr_graph_source_nodes(lb_rr_graph)) {
|
||||
VTR_LOG_WARN("Fail in checking source nodes!\n");
|
||||
num_err++;
|
||||
}
|
||||
|
||||
if (false == check_lb_rr_graph_sink_nodes(lb_rr_graph)) {
|
||||
VTR_LOG_WARN("Fail in checking sink nodes!\n");
|
||||
num_err++;
|
||||
}
|
||||
|
||||
/* Error out if there is any fatal errors found */
|
||||
if (0 < num_err) {
|
||||
VTR_LOG_WARN("Checked Logical tile Routing Resource graph with %d errors !\n",
|
||||
num_err);
|
||||
}
|
||||
|
||||
return (0 == num_err);
|
||||
}
|
||||
|
||||
} /* end namespace openfpga */
|
|
@ -0,0 +1,21 @@
|
|||
#ifndef CHECK_LB_RR_GRAPH_H
|
||||
#define CHECK_LB_RR_GRAPH_H
|
||||
|
||||
/********************************************************************
|
||||
* Include header files that are required by function declaration
|
||||
*******************************************************************/
|
||||
#include "lb_rr_graph.h"
|
||||
|
||||
|
||||
/********************************************************************
|
||||
* Function declaration
|
||||
*******************************************************************/
|
||||
|
||||
/* begin namespace openfpga */
|
||||
namespace openfpga {
|
||||
|
||||
bool check_lb_rr_graph(const LbRRGraph& rr_graph);
|
||||
|
||||
} /* end namespace openfpga */
|
||||
|
||||
#endif
|
|
@ -0,0 +1,873 @@
|
|||
/******************************************************************************
|
||||
* Memember functions for data structure LbRouter
|
||||
******************************************************************************/
|
||||
#include "vtr_assert.h"
|
||||
#include "vtr_log.h"
|
||||
|
||||
#include "physical_types.h"
|
||||
#include "pb_type_graph.h"
|
||||
#include "vpr_error.h"
|
||||
|
||||
#include "pb_type_utils.h"
|
||||
#include "lb_rr_graph_utils.h"
|
||||
#include "lb_router.h"
|
||||
|
||||
/* begin namespace openfpga */
|
||||
namespace openfpga {
|
||||
|
||||
/**************************************************
|
||||
* Public Constructors
|
||||
*************************************************/
|
||||
LbRouter::LbRouter(const LbRRGraph& lb_rr_graph, t_logical_block_type_ptr lb_type) {
|
||||
routing_status_.resize(lb_rr_graph.nodes().size());
|
||||
explored_node_tb_.resize(lb_rr_graph.nodes().size());
|
||||
explore_id_index_ = 1;
|
||||
|
||||
lb_type_ = lb_type;
|
||||
|
||||
/* Default routing parameters */
|
||||
params_.max_iterations = 50;
|
||||
params_.pres_fac = 1;
|
||||
params_.pres_fac_mult = 2;
|
||||
params_.hist_fac = 0.3;
|
||||
|
||||
is_routed_ = false;
|
||||
|
||||
pres_con_fac_ = 1;
|
||||
}
|
||||
|
||||
/**************************************************
|
||||
* Public Accessors
|
||||
*************************************************/
|
||||
std::vector<LbRRNodeId> LbRouter::find_congested_rr_nodes(const LbRRGraph& lb_rr_graph) const {
|
||||
/* Validate if the rr_graph is the one we used to initialize the router */
|
||||
VTR_ASSERT(true == matched_lb_rr_graph(lb_rr_graph));
|
||||
|
||||
std::vector<LbRRNodeId> congested_rr_nodes;
|
||||
|
||||
for (const LbRRNodeId& inode : lb_rr_graph.nodes()) {
|
||||
if (routing_status_[inode].occ > lb_rr_graph.node_capacity(inode)) {
|
||||
congested_rr_nodes.push_back(inode);
|
||||
}
|
||||
}
|
||||
|
||||
return congested_rr_nodes;
|
||||
}
|
||||
|
||||
bool LbRouter::is_routed() const {
|
||||
return is_routed_;
|
||||
}
|
||||
|
||||
std::vector<LbRRNodeId> LbRouter::net_routed_nodes(const NetId& net) const {
|
||||
VTR_ASSERT(true == is_routed());
|
||||
VTR_ASSERT(true == valid_net_id(net));
|
||||
|
||||
std::vector<LbRRNodeId> routed_nodes;
|
||||
|
||||
t_trace* rt_tree = lb_net_rt_trees_[net];
|
||||
if (nullptr == rt_tree) {
|
||||
return routed_nodes;
|
||||
}
|
||||
/* Walk through the routing tree of the net */
|
||||
rec_collect_trace_nodes(rt_tree, routed_nodes);
|
||||
|
||||
return routed_nodes;
|
||||
}
|
||||
|
||||
/**************************************************
|
||||
* Private accessors
|
||||
*************************************************/
|
||||
bool LbRouter::is_route_success(const LbRRGraph& lb_rr_graph) const {
|
||||
/* Validate if the rr_graph is the one we used to initialize the router */
|
||||
VTR_ASSERT(true == matched_lb_rr_graph(lb_rr_graph));
|
||||
|
||||
for (const LbRRNodeId& inode : lb_rr_graph.nodes()) {
|
||||
if (routing_status_[inode].occ > lb_rr_graph.node_capacity(inode)) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
LbRouter::t_trace* LbRouter::find_node_in_rt(t_trace* rt, const LbRRNodeId& rt_index) {
|
||||
t_trace* cur;
|
||||
if (rt->current_node == rt_index) {
|
||||
return rt;
|
||||
} else {
|
||||
for (unsigned int i = 0; i < rt->next_nodes.size(); i++) {
|
||||
cur = find_node_in_rt(&rt->next_nodes[i], rt_index);
|
||||
if (cur != nullptr) {
|
||||
return cur;
|
||||
}
|
||||
}
|
||||
}
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
bool LbRouter::route_has_conflict(const LbRRGraph& lb_rr_graph, t_trace* rt) const {
|
||||
t_mode* cur_mode = nullptr;
|
||||
for (unsigned int i = 0; i < rt->next_nodes.size(); i++) {
|
||||
std::vector<LbRREdgeId> edges = lb_rr_graph.find_edge(rt->current_node, rt->next_nodes[i].current_node);
|
||||
VTR_ASSERT(1 == edges.size());
|
||||
t_mode* new_mode = lb_rr_graph.edge_mode(edges[0]);
|
||||
if (cur_mode != nullptr && cur_mode != new_mode) {
|
||||
return true;
|
||||
}
|
||||
if (route_has_conflict(lb_rr_graph, &rt->next_nodes[i]) == true) {
|
||||
return true;
|
||||
}
|
||||
cur_mode = new_mode;
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
void LbRouter::rec_collect_trace_nodes(const t_trace* trace, std::vector<LbRRNodeId>& routed_nodes) const {
|
||||
if (routed_nodes.end() == std::find(routed_nodes.begin(), routed_nodes.end(), trace->current_node)) {
|
||||
routed_nodes.push_back(trace->current_node);
|
||||
}
|
||||
|
||||
for (const t_trace& next : trace->next_nodes) {
|
||||
rec_collect_trace_nodes(&next, routed_nodes);
|
||||
}
|
||||
}
|
||||
|
||||
/**************************************************
|
||||
* Public mutators
|
||||
*************************************************/
|
||||
LbRouter::NetId LbRouter::create_net_to_route(const LbRRNodeId& source, const std::vector<LbRRNodeId>& terminals) {
|
||||
/* Create an new id */
|
||||
NetId net = NetId(lb_net_ids_.size());
|
||||
lb_net_ids_.push_back(net);
|
||||
|
||||
/* Allocate other attributes */
|
||||
lb_net_atom_net_ids_.push_back(AtomNetId::INVALID());
|
||||
lb_net_atom_pins_.emplace_back();
|
||||
|
||||
std::vector<LbRRNodeId> net_terminals = terminals;
|
||||
net_terminals.insert(net_terminals.begin(), source);
|
||||
|
||||
lb_net_terminals_.push_back(net_terminals);
|
||||
lb_net_rt_trees_.push_back(nullptr);
|
||||
|
||||
return net;
|
||||
}
|
||||
|
||||
void LbRouter::add_net_atom_net_id(const NetId& net, const AtomNetId& atom_net) {
|
||||
VTR_ASSERT(true == valid_net_id(net));
|
||||
lb_net_atom_net_ids_[net] = atom_net;
|
||||
}
|
||||
|
||||
void LbRouter::add_net_atom_pins(const NetId& net, const AtomPinId& src_pin, const std::vector<AtomPinId>& terminal_pins) {
|
||||
VTR_ASSERT(true == valid_net_id(net));
|
||||
lb_net_atom_pins_[net] = terminal_pins;
|
||||
lb_net_atom_pins_[net].insert(lb_net_atom_pins_[net].begin(), src_pin);
|
||||
}
|
||||
|
||||
void LbRouter::set_physical_pb_modes(const LbRRGraph& lb_rr_graph,
|
||||
const VprDeviceAnnotation& device_annotation) {
|
||||
/* Go through each node in the routing resource graph
|
||||
* Find the physical mode of each pb_graph_pin that is binded to the node
|
||||
* For input pins, the physical mode is a mode of its parent pb_type
|
||||
* For output pins, the physical mode is a mode of the parent pb_type of its parent
|
||||
*/
|
||||
for (const LbRRNodeId& node : lb_rr_graph.nodes()) {
|
||||
t_pb_graph_pin* pb_pin = lb_rr_graph.node_pb_graph_pin(node);
|
||||
if (nullptr == pb_pin) {
|
||||
routing_status_[node].mode = nullptr;
|
||||
} else {
|
||||
if (IN_PORT == pb_pin->port->type) {
|
||||
routing_status_[node].mode = device_annotation.physical_mode(pb_pin->parent_node->pb_type);
|
||||
} else {
|
||||
VTR_ASSERT(OUT_PORT == pb_pin->port->type);
|
||||
/* For top-level pb_graph node, the physical mode is nullptr */
|
||||
if (true == pb_pin->parent_node->is_root()) {
|
||||
routing_status_[node].mode = nullptr;
|
||||
} else {
|
||||
routing_status_[node].mode = device_annotation.physical_mode(pb_pin->parent_node->parent_pb_graph_node->pb_type);
|
||||
/* TODO: need to think about how to handle INOUT ports !!! */
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
bool LbRouter::try_route(const LbRRGraph& lb_rr_graph,
|
||||
const AtomNetlist& atom_nlist,
|
||||
const int& verbosity) {
|
||||
/* Validate if the rr_graph is the one we used to initialize the router */
|
||||
VTR_ASSERT(true == matched_lb_rr_graph(lb_rr_graph));
|
||||
|
||||
/* Ensure each net to be routed is valid */
|
||||
for (const NetId& net : lb_net_ids_) {
|
||||
VTR_ASSERT(true == check_net(lb_rr_graph, atom_nlist, net));
|
||||
}
|
||||
|
||||
is_routed_ = false;
|
||||
|
||||
bool is_impossible = false;
|
||||
|
||||
mode_status_.is_mode_conflict = false;
|
||||
mode_status_.try_expand_all_modes = false;
|
||||
|
||||
t_expansion_node exp_node;
|
||||
|
||||
reset_explored_node_tb();
|
||||
|
||||
/* Reset current routing */
|
||||
reset_net_rt();
|
||||
reset_routing_status();
|
||||
|
||||
std::unordered_map<const t_pb_graph_node*, const t_mode*> mode_map;
|
||||
|
||||
/* Iteratively remove congestion until a successful route is found.
|
||||
* Cap the total number of iterations tried so that if a solution does not exist, then the router won't run indefinitely */
|
||||
pres_con_fac_ = params_.pres_fac;
|
||||
for (int iter = 0; iter < params_.max_iterations && !is_routed_ && !is_impossible; iter++) {
|
||||
unsigned int inet;
|
||||
/* Iterate across all nets internal to logic block */
|
||||
for (inet = 0; inet < lb_net_ids_.size() && !is_impossible; inet++) {
|
||||
NetId idx = NetId(inet);
|
||||
if (is_skip_route_net(lb_rr_graph, lb_net_rt_trees_[idx])) {
|
||||
continue;
|
||||
}
|
||||
|
||||
commit_remove_rt(lb_rr_graph, lb_net_rt_trees_[idx], RT_REMOVE, mode_map);
|
||||
free_net_rt(lb_net_rt_trees_[idx]);
|
||||
lb_net_rt_trees_[idx] = nullptr;
|
||||
add_source_to_rt(idx);
|
||||
|
||||
/* Route each sink of net */
|
||||
for (unsigned int itarget = 1; itarget < lb_net_terminals_[idx].size() && !is_impossible; itarget++) {
|
||||
pq_.clear();
|
||||
/* Get lowest cost next node, repeat until a path is found or if it is impossible to route */
|
||||
|
||||
expand_rt(idx, idx);
|
||||
|
||||
is_impossible = try_expand_nodes(atom_nlist, lb_rr_graph, idx, exp_node, itarget, mode_status_.expand_all_modes, verbosity);
|
||||
|
||||
if (is_impossible && !mode_status_.expand_all_modes) {
|
||||
mode_status_.try_expand_all_modes = true;
|
||||
mode_status_.expand_all_modes = true;
|
||||
break;
|
||||
}
|
||||
|
||||
if (exp_node.node_index == lb_net_terminals_[idx][itarget]) {
|
||||
/* Net terminal is routed, add this to the route tree, clear data structures, and keep going */
|
||||
is_impossible = add_to_rt(lb_net_rt_trees_[idx], exp_node.node_index, idx);
|
||||
}
|
||||
|
||||
if (is_impossible) {
|
||||
VTR_LOG("Routing was impossible!\n");
|
||||
} else if (mode_status_.expand_all_modes) {
|
||||
is_impossible = route_has_conflict(lb_rr_graph, lb_net_rt_trees_[idx]);
|
||||
if (is_impossible) {
|
||||
VTR_LOG("Routing was impossible due to modes!\n");
|
||||
}
|
||||
}
|
||||
|
||||
explore_id_index_++;
|
||||
if (explore_id_index_ > 2000000000) {
|
||||
/* overflow protection */
|
||||
for (const LbRRNodeId& id : lb_rr_graph.nodes()) {
|
||||
explored_node_tb_[id].explored_id = OPEN;
|
||||
explored_node_tb_[id].enqueue_id = OPEN;
|
||||
explore_id_index_ = 1;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (!is_impossible) {
|
||||
commit_remove_rt(lb_rr_graph, lb_net_rt_trees_[idx], RT_COMMIT, mode_map);
|
||||
if (mode_status_.is_mode_conflict) {
|
||||
is_impossible = true;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (!is_impossible) {
|
||||
is_routed_ = is_route_success(lb_rr_graph);
|
||||
} else {
|
||||
--inet;
|
||||
VTR_LOGV(verbosity < 3,
|
||||
"Net %lu '%s' is impossible to route within proposed %s cluster\n",
|
||||
inet,
|
||||
atom_nlist.net_name(lb_net_atom_net_ids_[NetId(inet)]).c_str(),
|
||||
lb_type_->name);
|
||||
VTR_LOGV(verbosity < 3,
|
||||
"\tNet source pin '%s'\n",
|
||||
lb_rr_graph.node_pb_graph_pin(lb_net_terminals_[NetId(inet)][0])->to_string().c_str());
|
||||
VTR_LOGV(verbosity < 3,
|
||||
"\tNet sink pins:\n");
|
||||
for (size_t isink = 1; isink < lb_net_terminals_[NetId(inet)].size(); ++isink) {
|
||||
VTR_LOGV(verbosity < 3,
|
||||
"\t\t%s\n",
|
||||
lb_rr_graph.node_pb_graph_pin(lb_net_terminals_[NetId(inet)][isink])->to_string().c_str());
|
||||
}
|
||||
VTR_LOGV(verbosity < 3,
|
||||
"Please check your architecture XML to see if it is routable\n");
|
||||
|
||||
is_routed_ = false;
|
||||
}
|
||||
pres_con_fac_ *= params_.pres_fac_mult;
|
||||
}
|
||||
|
||||
/* TODO:
|
||||
* Let user to decide to how proceed upon the routing results:
|
||||
* - route success: save the results through public accessors to lb_nets_
|
||||
* print the route results to files
|
||||
* - route fail: report all the congestion nodes
|
||||
*/
|
||||
return is_routed_;
|
||||
}
|
||||
|
||||
/**************************************************
|
||||
* Private mutators
|
||||
*************************************************/
|
||||
void LbRouter::fix_duplicate_equivalent_pins(const AtomContext& atom_ctx,
|
||||
const LbRRGraph& lb_rr_graph) {
|
||||
for (const NetId& ilb_net : lb_net_ids_) {
|
||||
//Collect all the sink terminals indicies which target a particular node
|
||||
std::map<LbRRNodeId, std::vector<int>> duplicate_terminals;
|
||||
for (size_t iterm = 1; iterm < lb_net_terminals_[ilb_net].size(); ++iterm) {
|
||||
LbRRNodeId node = lb_net_terminals_[ilb_net][iterm];
|
||||
|
||||
duplicate_terminals[node].push_back(iterm);
|
||||
}
|
||||
|
||||
for (auto kv : duplicate_terminals) {
|
||||
if (kv.second.size() < 2) continue; //Only process duplicates
|
||||
|
||||
//Remap all the duplicate terminals so they target the pin instead of the sink
|
||||
for (size_t idup_term = 0; idup_term < kv.second.size(); ++idup_term) {
|
||||
int iterm = kv.second[idup_term]; //The index in terminals which is duplicated
|
||||
|
||||
VTR_ASSERT(lb_net_atom_pins_[ilb_net].size() == lb_net_terminals_[ilb_net].size());
|
||||
AtomPinId atom_pin = lb_net_atom_pins_[ilb_net][iterm];
|
||||
VTR_ASSERT(atom_pin);
|
||||
|
||||
const t_pb_graph_pin* pb_graph_pin = find_pb_graph_pin(atom_ctx.nlist, atom_ctx.lookup, atom_pin);
|
||||
VTR_ASSERT(pb_graph_pin);
|
||||
|
||||
if (pb_graph_pin->port->equivalent == PortEquivalence::NONE) continue; //Only need to remap equivalent ports
|
||||
|
||||
//Remap this terminal to an explicit pin instead of the common sink
|
||||
LbRRNodeId pin_index = lb_rr_graph.find_node(LB_INTERMEDIATE, pb_graph_pin);
|
||||
VTR_ASSERT(true == lb_rr_graph.valid_node_id(pin_index));
|
||||
|
||||
VTR_LOG_WARN(
|
||||
"Found duplicate nets connected to logically equivalent pins. "
|
||||
"Remapping intra lb net %d (atom net %zu '%s') from common sink "
|
||||
"pb_route %d to fixed pin pb_route %d\n",
|
||||
size_t(ilb_net), size_t(lb_net_atom_net_ids_[ilb_net]), atom_ctx.nlist.net_name(lb_net_atom_net_ids_[ilb_net]).c_str(),
|
||||
kv.first, size_t(pin_index));
|
||||
|
||||
VTR_ASSERT(1 == lb_rr_graph.node_out_edges(pin_index, &(pb_graph_pin->parent_node->pb_type->modes[0])).size());
|
||||
LbRRNodeId sink_index = lb_rr_graph.edge_sink_node(lb_rr_graph.node_out_edges(pin_index, &(pb_graph_pin->parent_node->pb_type->modes[0]))[0]);
|
||||
VTR_ASSERT(LB_SINK == lb_rr_graph.node_type(sink_index));
|
||||
VTR_ASSERT_MSG(sink_index == lb_net_terminals_[ilb_net][iterm], "Remapped pin must be connected to original sink");
|
||||
|
||||
//Change the target
|
||||
lb_net_terminals_[ilb_net][iterm] = pin_index;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Check one edge for mode conflict.
|
||||
bool LbRouter::check_edge_for_route_conflicts(std::unordered_map<const t_pb_graph_node*, const t_mode*>& mode_map,
|
||||
const t_pb_graph_pin* driver_pin,
|
||||
const t_pb_graph_pin* pin) {
|
||||
if (driver_pin == nullptr) {
|
||||
return false;
|
||||
}
|
||||
|
||||
// Only check pins that are OUT_PORTs.
|
||||
if (pin == nullptr || pin->port == nullptr || pin->port->type != OUT_PORT) {
|
||||
return false;
|
||||
}
|
||||
VTR_ASSERT(!pin->port->is_clock);
|
||||
|
||||
auto* pb_graph_node = pin->parent_node;
|
||||
VTR_ASSERT(pb_graph_node->pb_type == pin->port->parent_pb_type);
|
||||
|
||||
const t_pb_graph_edge* edge = get_edge_between_pins(driver_pin, pin);
|
||||
VTR_ASSERT(edge != nullptr);
|
||||
|
||||
auto mode_of_edge = edge->interconnect->parent_mode_index;
|
||||
auto* mode = &pb_graph_node->pb_type->modes[mode_of_edge];
|
||||
|
||||
auto result = mode_map.insert(std::make_pair(pb_graph_node, mode));
|
||||
if (!result.second) {
|
||||
if (result.first->second != mode) {
|
||||
VTR_LOG("Differing modes for block. Got %s mode, while previously was %s for interconnect %s.\n",
|
||||
mode->name, result.first->second->name,
|
||||
edge->interconnect->name);
|
||||
// The illegal mode is added to the pb_graph_node as it resulted in a conflict during atom-to-atom routing. This mode cannot be used in the consequent cluster
|
||||
// generation try.
|
||||
auto it = illegal_modes_.find(pb_graph_node);
|
||||
if (it == illegal_modes_.end()) {
|
||||
illegal_modes_[pb_graph_node].push_back(result.first->second);
|
||||
} else {
|
||||
if (std::find(illegal_modes_.at(pb_graph_node).begin(), illegal_modes_.at(pb_graph_node).end(), result.first->second) == illegal_modes_.at(pb_graph_node).end()) {
|
||||
it->second.push_back(result.first->second);
|
||||
}
|
||||
}
|
||||
|
||||
// If the number of illegal modes equals the number of available mode for a specific pb_graph_node it means that no cluster can be generated. This resuts
|
||||
// in a fatal error.
|
||||
if ((int)illegal_modes_.at(pb_graph_node).size() >= pb_graph_node->pb_type->num_modes) {
|
||||
VPR_FATAL_ERROR(VPR_ERROR_PACK, "There are no more available modes to be used. Routing Failed!");
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
void LbRouter::commit_remove_rt(const LbRRGraph& lb_rr_graph,
|
||||
t_trace* rt,
|
||||
const e_commit_remove& op,
|
||||
std::unordered_map<const t_pb_graph_node*, const t_mode*>& mode_map) {
|
||||
int incr;
|
||||
|
||||
if (nullptr == rt) {
|
||||
return;
|
||||
}
|
||||
|
||||
LbRRNodeId inode = rt->current_node;
|
||||
|
||||
/* Determine if node is being used or removed */
|
||||
if (op == RT_COMMIT) {
|
||||
incr = 1;
|
||||
if (routing_status_[inode].occ >= lb_rr_graph.node_capacity(inode)) {
|
||||
routing_status_[inode].historical_usage += (routing_status_[inode].occ - lb_rr_graph.node_capacity(inode) + 1); /* store historical overuse */
|
||||
}
|
||||
} else {
|
||||
incr = -1;
|
||||
explored_node_tb_[inode].inet = NetId::INVALID();
|
||||
}
|
||||
|
||||
routing_status_[inode].occ += incr;
|
||||
VTR_ASSERT(routing_status_[inode].occ >= 0);
|
||||
|
||||
t_pb_graph_pin* driver_pin = lb_rr_graph.node_pb_graph_pin(inode);
|
||||
|
||||
/* Recursively update route tree */
|
||||
for (unsigned int i = 0; i < rt->next_nodes.size(); i++) {
|
||||
// Check to see if there is no mode conflict between previous nets.
|
||||
// A conflict is present if there are differing modes between a pb_graph_node
|
||||
// and its children.
|
||||
if (op == RT_COMMIT && mode_status_.try_expand_all_modes) {
|
||||
const LbRRNodeId& node = rt->next_nodes[i].current_node;
|
||||
t_pb_graph_pin* pin = lb_rr_graph.node_pb_graph_pin(node);
|
||||
|
||||
if (check_edge_for_route_conflicts(mode_map, driver_pin, pin)) {
|
||||
mode_status_.is_mode_conflict = true;
|
||||
}
|
||||
}
|
||||
|
||||
commit_remove_rt(lb_rr_graph, &rt->next_nodes[i], op, mode_map);
|
||||
}
|
||||
}
|
||||
|
||||
bool LbRouter::is_skip_route_net(const LbRRGraph& lb_rr_graph,
|
||||
t_trace* rt) {
|
||||
/* Validate if the rr_graph is the one we used to initialize the router */
|
||||
VTR_ASSERT(true == matched_lb_rr_graph(lb_rr_graph));
|
||||
|
||||
if (rt == nullptr) {
|
||||
return false; /* Net is not routed, therefore must route net */
|
||||
}
|
||||
|
||||
LbRRNodeId inode = rt->current_node;
|
||||
|
||||
/* Determine if node is overused */
|
||||
if (routing_status_[inode].occ > lb_rr_graph.node_capacity(inode)) {
|
||||
/* Conflict between this net and another net at this node, reroute net */
|
||||
return false;
|
||||
}
|
||||
|
||||
/* Recursively check that rest of route tree does not have a conflict */
|
||||
for (unsigned int i = 0; i < rt->next_nodes.size(); i++) {
|
||||
if (!is_skip_route_net(lb_rr_graph, &rt->next_nodes[i])) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
/* No conflict, this net's current route is legal, skip routing this net */
|
||||
return true;
|
||||
}
|
||||
|
||||
bool LbRouter::add_to_rt(t_trace* rt, const LbRRNodeId& node_index, const NetId& irt_net) {
|
||||
std::vector<LbRRNodeId> trace_forward;
|
||||
t_trace* link_node;
|
||||
t_trace curr_node;
|
||||
|
||||
/* Store path all the way back to route tree */
|
||||
LbRRNodeId rt_index = node_index;
|
||||
while (explored_node_tb_[rt_index].inet != irt_net) {
|
||||
trace_forward.push_back(rt_index);
|
||||
rt_index = explored_node_tb_[rt_index].prev_index;
|
||||
VTR_ASSERT(rt_index != LbRRNodeId::INVALID());
|
||||
}
|
||||
|
||||
/* Find rt_index on the route tree */
|
||||
link_node = find_node_in_rt(rt, rt_index);
|
||||
if (link_node == nullptr) {
|
||||
VTR_LOG("Link node is nullptr. Routing impossible");
|
||||
return true;
|
||||
}
|
||||
|
||||
/* Add path to root tree */
|
||||
LbRRNodeId trace_index;
|
||||
while (!trace_forward.empty()) {
|
||||
trace_index = trace_forward.back();
|
||||
curr_node.current_node = trace_index;
|
||||
link_node->next_nodes.push_back(curr_node);
|
||||
link_node = &link_node->next_nodes.back();
|
||||
trace_forward.pop_back();
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
void LbRouter::add_source_to_rt(const NetId& inet) {
|
||||
/* TODO: Validate net id */
|
||||
VTR_ASSERT(nullptr == lb_net_rt_trees_[inet]);
|
||||
lb_net_rt_trees_[inet] = new t_trace;
|
||||
lb_net_rt_trees_[inet]->current_node = lb_net_terminals_[inet][0];
|
||||
}
|
||||
|
||||
void LbRouter::expand_rt_rec(t_trace* rt,
|
||||
const LbRRNodeId& prev_index,
|
||||
const NetId& irt_net,
|
||||
const int& explore_id_index) {
|
||||
t_expansion_node enode;
|
||||
|
||||
/* Perhaps should use a cost other than zero */
|
||||
enode.cost = 0;
|
||||
enode.node_index = rt->current_node;
|
||||
enode.prev_index = prev_index;
|
||||
pq_.push(enode);
|
||||
explored_node_tb_[enode.node_index].inet = irt_net;
|
||||
explored_node_tb_[enode.node_index].explored_id = OPEN;
|
||||
explored_node_tb_[enode.node_index].enqueue_id = explore_id_index;
|
||||
explored_node_tb_[enode.node_index].enqueue_cost = 0;
|
||||
explored_node_tb_[enode.node_index].prev_index = prev_index;
|
||||
|
||||
for (unsigned int i = 0; i < rt->next_nodes.size(); i++) {
|
||||
expand_rt_rec(&rt->next_nodes[i], rt->current_node, irt_net, explore_id_index);
|
||||
}
|
||||
}
|
||||
|
||||
void LbRouter::expand_rt(const NetId& inet,
|
||||
const NetId& irt_net) {
|
||||
VTR_ASSERT(pq_.empty());
|
||||
|
||||
expand_rt_rec(lb_net_rt_trees_[inet], LbRRNodeId::INVALID(), irt_net, explore_id_index_);
|
||||
}
|
||||
|
||||
void LbRouter::expand_edges(const LbRRGraph& lb_rr_graph,
|
||||
t_mode* mode,
|
||||
const LbRRNodeId& cur_inode,
|
||||
float cur_cost,
|
||||
int net_fanout) {
|
||||
/* Validate if the rr_graph is the one we used to initialize the router */
|
||||
VTR_ASSERT(true == matched_lb_rr_graph(lb_rr_graph));
|
||||
|
||||
t_expansion_node enode;
|
||||
int usage;
|
||||
float incr_cost;
|
||||
|
||||
for (const LbRREdgeId& iedge : lb_rr_graph.node_out_edges(cur_inode, mode)) {
|
||||
/* Init new expansion node */
|
||||
enode.prev_index = cur_inode;
|
||||
enode.node_index = lb_rr_graph.edge_sink_node(iedge);
|
||||
enode.cost = cur_cost;
|
||||
|
||||
/* Determine incremental cost of using expansion node */
|
||||
usage = routing_status_[enode.node_index].occ + 1 - lb_rr_graph.node_capacity(enode.node_index);
|
||||
incr_cost = lb_rr_graph.node_intrinsic_cost(enode.node_index);
|
||||
incr_cost += lb_rr_graph.edge_intrinsic_cost(iedge);
|
||||
incr_cost += params_.hist_fac * routing_status_[enode.node_index].historical_usage;
|
||||
if (usage > 0) {
|
||||
incr_cost *= (usage * pres_con_fac_);
|
||||
}
|
||||
|
||||
/* Adjust cost so that higher fanout nets prefer higher fanout routing nodes while lower fanout nets prefer lower fanout routing nodes */
|
||||
float fanout_factor = 1.0;
|
||||
t_mode* next_mode = routing_status_[enode.node_index].mode;
|
||||
/* Assume first mode if a mode hasn't been forced. */
|
||||
if (nullptr == next_mode) {
|
||||
/* If the node is mapped to a nullptr pb_graph_pin, this is a special SINK. Use nullptr mode */
|
||||
if (nullptr == lb_rr_graph.node_pb_graph_pin(enode.node_index)) {
|
||||
next_mode = nullptr;
|
||||
} else if (true == is_primitive_pb_type(lb_rr_graph.node_pb_graph_pin(enode.node_index)->parent_node->pb_type)) {
|
||||
/* For primitive node, we give nullptr as default */
|
||||
next_mode = nullptr;
|
||||
} else {
|
||||
next_mode = &(lb_rr_graph.node_pb_graph_pin(enode.node_index)->parent_node->pb_type->modes[0]);
|
||||
}
|
||||
}
|
||||
if (lb_rr_graph.node_out_edges(enode.node_index, next_mode).size() > 1) {
|
||||
fanout_factor = 0.85 + (0.25 / net_fanout);
|
||||
} else {
|
||||
fanout_factor = 1.15 - (0.25 / net_fanout);
|
||||
}
|
||||
|
||||
incr_cost *= fanout_factor;
|
||||
enode.cost = cur_cost + incr_cost;
|
||||
|
||||
/* Add to queue if cost is lower than lowest cost path to this enode */
|
||||
if (explored_node_tb_[enode.node_index].enqueue_id == explore_id_index_) {
|
||||
if (enode.cost < explored_node_tb_[enode.node_index].enqueue_cost) {
|
||||
pq_.push(enode);
|
||||
}
|
||||
} else {
|
||||
explored_node_tb_[enode.node_index].enqueue_id = explore_id_index_;
|
||||
explored_node_tb_[enode.node_index].enqueue_cost = enode.cost;
|
||||
pq_.push(enode);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void LbRouter::expand_node(const LbRRGraph& lb_rr_graph,
|
||||
const t_expansion_node& exp_node,
|
||||
const int& net_fanout) {
|
||||
/* Validate if the rr_graph is the one we used to initialize the router */
|
||||
VTR_ASSERT(true == matched_lb_rr_graph(lb_rr_graph));
|
||||
|
||||
t_expansion_node enode;
|
||||
|
||||
LbRRNodeId cur_node = exp_node.node_index;
|
||||
float cur_cost = exp_node.cost;
|
||||
t_mode* mode = routing_status_[cur_node].mode;
|
||||
if (nullptr == mode) {
|
||||
if (nullptr == lb_rr_graph.node_pb_graph_pin(cur_node)) {
|
||||
mode = nullptr;
|
||||
} else if (true == is_primitive_pb_type(lb_rr_graph.node_pb_graph_pin(cur_node)->parent_node->pb_type)) {
|
||||
mode = nullptr;
|
||||
} else {
|
||||
mode = &(lb_rr_graph.node_pb_graph_pin(cur_node)->parent_node->pb_type->modes[0]);
|
||||
}
|
||||
}
|
||||
|
||||
expand_edges(lb_rr_graph, mode, cur_node, cur_cost, net_fanout);
|
||||
}
|
||||
|
||||
void LbRouter::expand_node_all_modes(const LbRRGraph& lb_rr_graph,
|
||||
const t_expansion_node& exp_node,
|
||||
const int& net_fanout) {
|
||||
/* Validate if the rr_graph is the one we used to initialize the router */
|
||||
VTR_ASSERT(true == matched_lb_rr_graph(lb_rr_graph));
|
||||
|
||||
LbRRNodeId cur_inode = exp_node.node_index;
|
||||
float cur_cost = exp_node.cost;
|
||||
t_mode* cur_mode = routing_status_[cur_inode].mode;
|
||||
auto* pin = lb_rr_graph.node_pb_graph_pin(cur_inode);
|
||||
|
||||
for (const LbRREdgeId& edge : lb_rr_graph.node_out_edges(cur_inode)) {
|
||||
t_mode* mode = lb_rr_graph.edge_mode(edge);
|
||||
/* If a mode has been forced, only add edges from that mode, otherwise add edges from all modes. */
|
||||
if (cur_mode != nullptr && mode != cur_mode) {
|
||||
continue;
|
||||
}
|
||||
|
||||
/* Check whether a mode is illegal. If it is then the node will not be expanded */
|
||||
bool is_illegal = false;
|
||||
if (pin != nullptr) {
|
||||
auto* pb_graph_node = pin->parent_node;
|
||||
if (0 == illegal_modes_.count(pb_graph_node)) {
|
||||
continue;
|
||||
}
|
||||
for (auto illegal_mode : illegal_modes_.at(pb_graph_node)) {
|
||||
if (mode == illegal_mode) {
|
||||
is_illegal = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (is_illegal == true) {
|
||||
continue;
|
||||
}
|
||||
expand_edges(lb_rr_graph, mode, cur_inode, cur_cost, net_fanout);
|
||||
}
|
||||
}
|
||||
|
||||
bool LbRouter::try_expand_nodes(const AtomNetlist& atom_nlist,
|
||||
const LbRRGraph& lb_rr_graph,
|
||||
const NetId& lb_net,
|
||||
t_expansion_node& exp_node,
|
||||
const int& itarget,
|
||||
const bool& try_other_modes,
|
||||
const int& verbosity) {
|
||||
bool is_impossible = false;
|
||||
|
||||
do {
|
||||
if (pq_.empty()) {
|
||||
/* No connection possible */
|
||||
is_impossible = true;
|
||||
|
||||
if (verbosity > 3) {
|
||||
//Print detailed debug info
|
||||
AtomNetId net_id = lb_net_atom_net_ids_[lb_net];
|
||||
AtomPinId driver_pin = lb_net_atom_pins_[lb_net][0];
|
||||
AtomPinId sink_pin = lb_net_atom_pins_[lb_net][itarget];
|
||||
LbRRNodeId driver_rr_node = lb_net_terminals_[lb_net][0];
|
||||
LbRRNodeId sink_rr_node = lb_net_terminals_[lb_net][itarget];
|
||||
|
||||
VTR_LOG("\t\t\tNo possible routing path from %s to %s: needed for net '%s' from net pin '%s'",
|
||||
describe_lb_rr_node(lb_rr_graph, driver_rr_node).c_str(),
|
||||
describe_lb_rr_node(lb_rr_graph, sink_rr_node).c_str(),
|
||||
atom_nlist.net_name(net_id).c_str(),
|
||||
atom_nlist.pin_name(driver_pin).c_str());
|
||||
VTR_LOGV(sink_pin, " to net pin '%s'", atom_nlist.pin_name(sink_pin).c_str());
|
||||
VTR_LOG("\n");
|
||||
}
|
||||
} else {
|
||||
exp_node = pq_.top();
|
||||
pq_.pop();
|
||||
LbRRNodeId exp_inode = exp_node.node_index;
|
||||
|
||||
if (explored_node_tb_[exp_inode].explored_id != explore_id_index_) {
|
||||
/* First time node is popped implies path to this node is the lowest cost.
|
||||
* If the node is popped a second time, then the path to that node is higher
|
||||
* than this path so ignore.
|
||||
*/
|
||||
explored_node_tb_[exp_inode].explored_id = explore_id_index_;
|
||||
explored_node_tb_[exp_inode].prev_index = exp_node.prev_index;
|
||||
if (exp_inode != lb_net_terminals_[lb_net][itarget]) {
|
||||
if (!try_other_modes) {
|
||||
expand_node(lb_rr_graph, exp_node, lb_net_terminals_[lb_net].size() - 1);
|
||||
} else {
|
||||
expand_node_all_modes(lb_rr_graph, exp_node, lb_net_terminals_[lb_net].size() - 1);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
} while (exp_node.node_index != lb_net_terminals_[lb_net][itarget] && !is_impossible);
|
||||
|
||||
return is_impossible;
|
||||
}
|
||||
|
||||
/**************************************************
|
||||
* Private validators
|
||||
*************************************************/
|
||||
bool LbRouter::matched_lb_rr_graph(const LbRRGraph& lb_rr_graph) const {
|
||||
return ( (routing_status_.size() == lb_rr_graph.nodes().size())
|
||||
&& (explored_node_tb_.size() == lb_rr_graph.nodes().size()) );
|
||||
}
|
||||
|
||||
bool LbRouter::valid_net_id(const NetId& net_id) const {
|
||||
return ( size_t(net_id) < lb_net_ids_.size() ) && ( net_id == lb_net_ids_[net_id] );
|
||||
}
|
||||
|
||||
bool LbRouter::check_net(const LbRRGraph& lb_rr_graph,
|
||||
const AtomNetlist& atom_nlist,
|
||||
const NetId& net) const {
|
||||
if (false == atom_nlist.valid_net_id(lb_net_atom_net_ids_[net])) {
|
||||
return false;
|
||||
}
|
||||
if (lb_net_atom_pins_[net].size() != lb_net_terminals_[net].size()) {
|
||||
VTR_LOGF_ERROR(__FILE__, __LINE__,
|
||||
"Net '%lu' has unmatched atom pins and terminals.\n",
|
||||
size_t(net));
|
||||
return false;
|
||||
}
|
||||
/* We must have 1 source and >1 terminal */
|
||||
if (2 > lb_net_terminals_[net].size()) {
|
||||
VTR_LOGF_ERROR(__FILE__, __LINE__,
|
||||
"Net '%lu' has only %lu terminal.\n",
|
||||
size_t(net), lb_net_terminals_[net].size());
|
||||
return false;
|
||||
}
|
||||
/* Each node must be valid */
|
||||
for (const LbRRNodeId& node : lb_net_terminals_[net]) {
|
||||
if (false == lb_rr_graph.valid_node_id(node)) {
|
||||
VTR_LOGF_ERROR(__FILE__, __LINE__,
|
||||
"Net '%lu' has invalid terminal node in lb_rr_graph.\n",
|
||||
size_t(net));
|
||||
return false;
|
||||
}
|
||||
}
|
||||
/* Each atom pin must be valid */
|
||||
for (const AtomPinId& pin : lb_net_atom_pins_[net]) {
|
||||
if (false == atom_nlist.valid_pin_id(pin)) {
|
||||
VTR_LOGF_ERROR(__FILE__, __LINE__,
|
||||
"Net '%lu' has invalid atom pin.\n",
|
||||
size_t(net));
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
/**************************************************
|
||||
* Private Initializer and cleaner
|
||||
*************************************************/
|
||||
void LbRouter::reset_explored_node_tb() {
|
||||
for (t_explored_node_stats& explored_node : explored_node_tb_) {
|
||||
explored_node.prev_index = LbRRNodeId::INVALID();
|
||||
explored_node.explored_id = OPEN;
|
||||
explored_node.inet = NetId::INVALID();
|
||||
explored_node.enqueue_id = OPEN;
|
||||
explored_node.enqueue_cost = 0;
|
||||
}
|
||||
}
|
||||
|
||||
void LbRouter::reset_net_rt() {
|
||||
for (const NetId& inet : lb_net_ids_) {
|
||||
free_net_rt(lb_net_rt_trees_[inet]);
|
||||
lb_net_rt_trees_[inet] = nullptr;
|
||||
}
|
||||
}
|
||||
|
||||
void LbRouter::reset_routing_status() {
|
||||
for (t_routing_status& status : routing_status_) {
|
||||
status.historical_usage = 0;
|
||||
status.occ = 0;
|
||||
}
|
||||
}
|
||||
|
||||
void LbRouter::clear_nets() {
|
||||
/* TODO: Trace should no longer use pointers */
|
||||
reset_net_rt();
|
||||
|
||||
lb_net_ids_.clear();
|
||||
lb_net_atom_net_ids_.clear();
|
||||
lb_net_atom_pins_.clear();
|
||||
lb_net_terminals_.clear();
|
||||
lb_net_rt_trees_.clear();
|
||||
}
|
||||
|
||||
void LbRouter::free_net_rt(t_trace* lb_trace) {
|
||||
if (lb_trace != nullptr) {
|
||||
for (unsigned int i = 0; i < lb_trace->next_nodes.size(); i++) {
|
||||
free_lb_trace(&lb_trace->next_nodes[i]);
|
||||
}
|
||||
lb_trace->next_nodes.clear();
|
||||
delete lb_trace;
|
||||
}
|
||||
}
|
||||
|
||||
void LbRouter::free_lb_trace(t_trace* lb_trace) {
|
||||
if (lb_trace != nullptr) {
|
||||
for (unsigned int i = 0; i < lb_trace->next_nodes.size(); i++) {
|
||||
free_lb_trace(&lb_trace->next_nodes[i]);
|
||||
}
|
||||
lb_trace->next_nodes.clear();
|
||||
}
|
||||
}
|
||||
|
||||
void LbRouter::reset_illegal_modes() {
|
||||
illegal_modes_.clear();
|
||||
}
|
||||
|
||||
|
||||
} /* end namespace openfpga */
|
|
@ -0,0 +1,336 @@
|
|||
#ifndef LB_ROUTER_H
|
||||
#define LB_ROUTER_H
|
||||
|
||||
/********************************************************************
|
||||
* Include header files that are required by function declaration
|
||||
*******************************************************************/
|
||||
#include <map>
|
||||
#include <unordered_map>
|
||||
#include <vector>
|
||||
#include <queue>
|
||||
|
||||
#include "vtr_vector.h"
|
||||
#include "vtr_strong_id.h"
|
||||
|
||||
#include "physical_types.h"
|
||||
#include "vpr_context.h"
|
||||
|
||||
#include "vpr_device_annotation.h"
|
||||
#include "lb_rr_graph.h"
|
||||
|
||||
/********************************************************************
|
||||
* Function declaration
|
||||
*******************************************************************/
|
||||
|
||||
/* begin namespace openfpga */
|
||||
namespace openfpga {
|
||||
|
||||
class LbRouter {
|
||||
public: /* Strong ids */
|
||||
struct net_id_tag;
|
||||
typedef vtr::StrongId<net_id_tag> NetId;
|
||||
public: /* Intra-Logic Block Routing Data Structures (by instance) */
|
||||
/**************************************************************************
|
||||
* Describes the status of a logic cluster_ctx.blocks routing resource node
|
||||
* for a given logic cluster_ctx.blocks instance
|
||||
***************************************************************************/
|
||||
struct t_routing_status {
|
||||
int occ; /* Number of nets currently using this lb_rr_node */
|
||||
t_mode* mode; /* Mode that this rr_node is set to */
|
||||
|
||||
int historical_usage; /* Historical usage of using this node */
|
||||
|
||||
t_routing_status() {
|
||||
occ = 0;
|
||||
mode = nullptr;
|
||||
historical_usage = 0;
|
||||
}
|
||||
};
|
||||
|
||||
/**************************************************************************
|
||||
* Data structure forming the route tree of a net within one logic cluster_ctx.blocks.
|
||||
* A net is implemented using routing resource nodes.
|
||||
* The t_lb_trace data structure records one of the nodes used by the net and the connections
|
||||
* to other nodes
|
||||
***************************************************************************/
|
||||
struct t_trace {
|
||||
LbRRNodeId current_node; /* current t_lb_type_rr_node used by net */
|
||||
std::vector<t_trace> next_nodes; /* index of previous edge that drives current node */
|
||||
};
|
||||
|
||||
/**************************************************************************
|
||||
* Stores tuning parameters used by intra-logic cluster_ctx.blocks router
|
||||
***************************************************************************/
|
||||
struct t_option {
|
||||
int max_iterations;
|
||||
float pres_fac;
|
||||
float pres_fac_mult;
|
||||
float hist_fac;
|
||||
};
|
||||
|
||||
/**************************************************************************
|
||||
* Node expanded by router
|
||||
***************************************************************************/
|
||||
struct t_expansion_node {
|
||||
LbRRNodeId node_index; /* Index of logic cluster_ctx.blocks rr node this expansion node represents */
|
||||
LbRRNodeId prev_index; /* Index of logic cluster_ctx.blocks rr node that drives this expansion node */
|
||||
float cost;
|
||||
|
||||
t_expansion_node() {
|
||||
node_index = LbRRNodeId::INVALID();
|
||||
prev_index = LbRRNodeId::INVALID();
|
||||
cost = 0;
|
||||
}
|
||||
};
|
||||
|
||||
class compare_expansion_node {
|
||||
public:
|
||||
/* Returns true if t1 is earlier than t2 */
|
||||
bool operator()(t_expansion_node& e1, t_expansion_node& e2) {
|
||||
if (e1.cost > e2.cost) {
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
};
|
||||
|
||||
/**************************************************************************
|
||||
* Stores explored nodes by router
|
||||
***************************************************************************/
|
||||
struct t_explored_node_stats {
|
||||
LbRRNodeId prev_index; /* Prevous node that drives this one */
|
||||
int explored_id; /* ID used to determine if this node has been explored */
|
||||
NetId inet; /* net index of route tree */
|
||||
int enqueue_id; /* ID used ot determine if this node has been pushed on exploration priority queue */
|
||||
float enqueue_cost; /* cost of node pused on exploration priority queue */
|
||||
|
||||
t_explored_node_stats() {
|
||||
prev_index = LbRRNodeId::INVALID();
|
||||
explored_id = OPEN;
|
||||
enqueue_id = OPEN;
|
||||
inet = NetId::INVALID();
|
||||
enqueue_cost = 0;
|
||||
}
|
||||
};
|
||||
|
||||
/**************************************************************************
|
||||
* Stores status of mode selection during clustering
|
||||
***************************************************************************/
|
||||
struct t_mode_selection_status {
|
||||
bool is_mode_conflict = false;
|
||||
bool try_expand_all_modes = false;
|
||||
bool expand_all_modes = false;
|
||||
|
||||
bool is_mode_issue() {
|
||||
return is_mode_conflict || try_expand_all_modes;
|
||||
}
|
||||
};
|
||||
|
||||
// TODO: check if this hacky class memory reserve thing is still necessary, if not, then delete
|
||||
/* Packing uses a priority queue that requires a large number of elements.
|
||||
* This backdoor
|
||||
* allows me to use a priority queue where I can pre-allocate the # of elements
|
||||
* in the underlying container
|
||||
* for efficiency reasons. Note: Must use vector with this
|
||||
*/
|
||||
template<class T, class U, class V>
|
||||
class reservable_pq : public std::priority_queue<T, U, V> {
|
||||
public:
|
||||
typedef typename std::priority_queue<T>::size_type size_type;
|
||||
reservable_pq(size_type capacity = 0) {
|
||||
reserve(capacity);
|
||||
cur_cap = capacity;
|
||||
}
|
||||
void reserve(size_type capacity) {
|
||||
this->c.reserve(capacity);
|
||||
cur_cap = capacity;
|
||||
}
|
||||
void clear() {
|
||||
this->c.clear();
|
||||
this->c.reserve(cur_cap);
|
||||
}
|
||||
|
||||
private:
|
||||
size_type cur_cap;
|
||||
};
|
||||
|
||||
enum e_commit_remove {
|
||||
RT_COMMIT,
|
||||
RT_REMOVE
|
||||
};
|
||||
|
||||
public : /* Public constructors */
|
||||
LbRouter(const LbRRGraph& lb_rr_graph, t_logical_block_type_ptr lb_type);
|
||||
|
||||
public : /* Public accessors */
|
||||
/**
|
||||
* Find all the routing resource nodes that are over-used, which they are used more than their capacity
|
||||
* This function is call to collect the nodes and router can reroute these net
|
||||
*/
|
||||
std::vector<LbRRNodeId> find_congested_rr_nodes(const LbRRGraph& lb_rr_graph) const;
|
||||
|
||||
/* Show if a valid routing solution has been founded or not */
|
||||
bool is_routed() const;
|
||||
|
||||
/**
|
||||
* Get the routing results for a Net
|
||||
*/
|
||||
std::vector<LbRRNodeId> net_routed_nodes(const NetId& net) const;
|
||||
|
||||
public : /* Public mutators */
|
||||
/**
|
||||
* Add net to be routed
|
||||
*/
|
||||
NetId create_net_to_route(const LbRRNodeId& source, const std::vector<LbRRNodeId>& terminals);
|
||||
void add_net_atom_net_id(const NetId& net, const AtomNetId& atom_net);
|
||||
void add_net_atom_pins(const NetId& net, const AtomPinId& src_pin, const std::vector<AtomPinId>& terminal_pins);
|
||||
|
||||
/* TODO: Initialize all the modes in routing status with the mode set in pb
|
||||
* This is function used for general purpose packing
|
||||
*/
|
||||
|
||||
/* Set all the modes in routing status with the physical mode defined in device annotation
|
||||
* This method is used only in repacking for physical logical blocks
|
||||
* Do NOT use it during the general purpose packing
|
||||
*/
|
||||
void set_physical_pb_modes(const LbRRGraph& lb_rr_graph,
|
||||
const VprDeviceAnnotation& device_annotation);
|
||||
|
||||
/**
|
||||
* Perform routing algorithm on a given logical tile routing resource graph
|
||||
* Note: the lb_rr_graph must be the same as you initilized the router!!!
|
||||
*/
|
||||
bool try_route(const LbRRGraph& lb_rr_graph,
|
||||
const AtomNetlist& atom_nlist,
|
||||
const int& verbosity);
|
||||
|
||||
private : /* Private accessors */
|
||||
/**
|
||||
* Report if the routing is successfully done on a logical block routing resource graph
|
||||
*/
|
||||
bool is_route_success(const LbRRGraph& lb_rr_graph) const;
|
||||
|
||||
/**
|
||||
* Try to find a node in the routing traces recursively
|
||||
* If not found, will return an empty pointer
|
||||
*/
|
||||
t_trace* find_node_in_rt(t_trace* rt, const LbRRNodeId& rt_index);
|
||||
|
||||
bool route_has_conflict(const LbRRGraph& lb_rr_graph, t_trace* rt) const;
|
||||
|
||||
/* Recursively find all the nodes in the trace */
|
||||
void rec_collect_trace_nodes(const t_trace* trace, std::vector<LbRRNodeId>& routed_nodes) const;
|
||||
|
||||
private : /* Private mutators */
|
||||
/*It is possible that a net may connect multiple times to a logically equivalent set of primitive pins.
|
||||
*The cluster router will only route one connection for a particular net to the common sink of the
|
||||
*equivalent pins.
|
||||
*
|
||||
*To work around this, we fix all but one of these duplicate connections to route to specific pins,
|
||||
*(instead of the common sink). This ensures a legal routing is produced and that the duplicate pins
|
||||
*are not 'missing' in the clustered netlist.
|
||||
*/
|
||||
void fix_duplicate_equivalent_pins(const AtomContext& atom_ctx,
|
||||
const LbRRGraph& lb_rr_graph);
|
||||
bool check_edge_for_route_conflicts(std::unordered_map<const t_pb_graph_node*, const t_mode*>& mode_map,
|
||||
const t_pb_graph_pin* driver_pin,
|
||||
const t_pb_graph_pin* pin);
|
||||
void commit_remove_rt(const LbRRGraph& lb_rr_graph,
|
||||
t_trace* rt,
|
||||
const e_commit_remove& op,
|
||||
std::unordered_map<const t_pb_graph_node*, const t_mode*>& mode_map);
|
||||
bool is_skip_route_net(const LbRRGraph& lb_rr_graph, t_trace* rt);
|
||||
bool add_to_rt(t_trace* rt, const LbRRNodeId& node_index, const NetId& irt_net);
|
||||
void add_source_to_rt(const NetId& inet);
|
||||
void expand_rt_rec(t_trace* rt,
|
||||
const LbRRNodeId& prev_index,
|
||||
const NetId& irt_net,
|
||||
const int& explore_id_index);
|
||||
void expand_rt(const NetId& inet,
|
||||
const NetId& irt_net);
|
||||
void expand_edges(const LbRRGraph& lb_rr_graph,
|
||||
t_mode* mode,
|
||||
const LbRRNodeId& cur_inode,
|
||||
float cur_cost,
|
||||
int net_fanout);
|
||||
void expand_node(const LbRRGraph& lb_rr_graph,
|
||||
const t_expansion_node& exp_node,
|
||||
const int& net_fanout);
|
||||
void expand_node_all_modes(const LbRRGraph& lb_rr_graph,
|
||||
const t_expansion_node& exp_node,
|
||||
const int& net_fanout);
|
||||
bool try_expand_nodes(const AtomNetlist& atom_nlist,
|
||||
const LbRRGraph& lb_rr_graph,
|
||||
const NetId& lb_net,
|
||||
t_expansion_node& exp_node,
|
||||
const int& itarget,
|
||||
const bool& try_other_modes,
|
||||
const int& verbosity);
|
||||
|
||||
private : /* Private validators */
|
||||
/**
|
||||
* Validate if the rr_graph is the one we used to initialize the router
|
||||
*/
|
||||
bool matched_lb_rr_graph(const LbRRGraph& lb_rr_graph) const;
|
||||
|
||||
bool valid_net_id(const NetId& net_id) const;
|
||||
|
||||
/* Validate that all the nets have
|
||||
* - valid source, terminal nodes in lb routing resource graph
|
||||
* - valid atom net and pin ids in atom netlist
|
||||
*/
|
||||
bool check_net(const LbRRGraph& lb_rr_graph,
|
||||
const AtomNetlist& atom_nlist,
|
||||
const NetId& net) const;
|
||||
|
||||
private : /* Private initializer and cleaner */
|
||||
void reset_explored_node_tb();
|
||||
void reset_net_rt();
|
||||
void reset_routing_status();
|
||||
void reset_illegal_modes();
|
||||
|
||||
void clear_nets();
|
||||
void free_net_rt(t_trace* lb_trace);
|
||||
void free_lb_trace(t_trace* lb_trace);
|
||||
|
||||
private : /* Stores all data needed by intra-logic cluster_ctx.blocks router */
|
||||
/* Logical Netlist Info */
|
||||
vtr::vector<NetId, NetId> lb_net_ids_; /* Pointer to vector of intra logic cluster_ctx.blocks nets and their connections */
|
||||
vtr::vector<NetId, AtomNetId> lb_net_atom_net_ids_; /* index of atom net this intra_lb_net represents */
|
||||
vtr::vector<NetId, std::vector<AtomPinId>> lb_net_atom_pins_; /* AtomPin's associated with each terminal */
|
||||
vtr::vector<NetId, std::vector<LbRRNodeId>> lb_net_terminals_; /* endpoints of the intra_lb_net, 0th position is the source, all others are sinks */
|
||||
vtr::vector<NetId, t_trace*> lb_net_rt_trees_; /* Route tree head */
|
||||
|
||||
/* Logical-to-physical mapping info */
|
||||
vtr::vector<LbRRNodeId, t_routing_status> routing_status_; /* [0..lb_type_graph->size()-1] Stats for each logic cluster_ctx.blocks rr node instance */
|
||||
|
||||
/* Stores state info during Pathfinder iterative routing */
|
||||
vtr::vector<LbRRNodeId, t_explored_node_stats> explored_node_tb_; /* [0..lb_type_graph->size()-1] Stores mode exploration and traceback info for nodes */
|
||||
|
||||
int explore_id_index_; /* used in conjunction with node_traceback to determine whether or not a location has been explored. By using a unique identifier every route, I don't have to clear the previous route exploration */
|
||||
|
||||
/* Current type */
|
||||
t_logical_block_type_ptr lb_type_;
|
||||
|
||||
/* Parameters used by router */
|
||||
t_option params_;
|
||||
|
||||
/* Stores whether or not the current logical-to-physical mapping has a routed solution */
|
||||
bool is_routed_;
|
||||
|
||||
/* Stores the mode selection status when expanding the edges */
|
||||
t_mode_selection_status mode_status_;
|
||||
|
||||
/* Stores state info of the priority queue in expanding edges during route */
|
||||
reservable_pq<t_expansion_node, std::vector<t_expansion_node>, compare_expansion_node> pq_;
|
||||
|
||||
/* Store the illegal modes for each pb_graph_node that is involved in the routing resource graph */
|
||||
std::map<const t_pb_graph_node*, std::vector<const t_mode*>> illegal_modes_;
|
||||
|
||||
/* current congestion factor */
|
||||
float pres_con_fac_;
|
||||
};
|
||||
|
||||
} /* end namespace openfpga */
|
||||
|
||||
#endif
|
|
@ -0,0 +1,69 @@
|
|||
/***************************************************************************************
|
||||
* This file includes functions that are used to redo packing for physical pbs
|
||||
***************************************************************************************/
|
||||
|
||||
/* Headers from vtrutil library */
|
||||
#include "vtr_assert.h"
|
||||
|
||||
#include "lb_router_utils.h"
|
||||
|
||||
/* begin namespace openfpga */
|
||||
namespace openfpga {
|
||||
|
||||
/***************************************************************************************
|
||||
* Add a net to route to a logical block router
|
||||
* This function will automatically find the source and sink atom pins
|
||||
* based on the given atom net
|
||||
***************************************************************************************/
|
||||
LbRouter::NetId add_lb_router_net_to_route(LbRouter& lb_router,
|
||||
const LbRRGraph& lb_rr_graph,
|
||||
const LbRRNodeId& source_node,
|
||||
const std::vector<LbRRNodeId>& sink_nodes,
|
||||
const AtomContext& atom_ctx,
|
||||
const AtomNetId& atom_net_id) {
|
||||
VTR_ASSERT(0 < sink_nodes.size());
|
||||
|
||||
LbRouter::NetId lb_net = lb_router.create_net_to_route(source_node, sink_nodes);
|
||||
|
||||
VTR_ASSERT(AtomNetId::INVALID() != atom_net_id);
|
||||
lb_router.add_net_atom_net_id(lb_net, atom_net_id);
|
||||
|
||||
std::vector<AtomPinId> terminal_pins;
|
||||
AtomPinId atom_pin_outside_pb = AtomPinId::INVALID();
|
||||
|
||||
for (const LbRRNodeId& sink_node : sink_nodes) {
|
||||
t_pb_graph_pin* sink_pb_pin = lb_rr_graph.node_pb_graph_pin(sink_node);
|
||||
bool atom_pin_inside_pb = false;
|
||||
for (const AtomPinId& atom_pin : atom_ctx.nlist.net_sinks(atom_net_id)) {
|
||||
VTR_ASSERT(AtomPinId::INVALID() != atom_pin);
|
||||
if (sink_pb_pin == find_pb_graph_pin(atom_ctx.nlist, atom_ctx.lookup, atom_pin)) {
|
||||
terminal_pins.push_back(atom_pin);
|
||||
atom_pin_inside_pb = true;
|
||||
break;
|
||||
}
|
||||
if (AtomPinId::INVALID() == atom_pin_outside_pb) {
|
||||
atom_pin_outside_pb = atom_pin;
|
||||
}
|
||||
}
|
||||
/* Add a atom pin which is not inside the pb */
|
||||
if (false == atom_pin_inside_pb) {
|
||||
VTR_ASSERT(AtomPinId::INVALID() != atom_pin_outside_pb);
|
||||
terminal_pins.push_back(atom_pin_outside_pb);
|
||||
}
|
||||
}
|
||||
VTR_ASSERT(AtomPinId::INVALID() != atom_ctx.nlist.net_driver(atom_net_id));
|
||||
if (sink_nodes.size() != terminal_pins.size()) {
|
||||
VTR_LOGF_ERROR(__FILE__, __LINE__,
|
||||
"Net '%s' has %lu sink nodes while has %lu associated atom pins!\n",
|
||||
atom_ctx.nlist.net_name(atom_net_id).c_str(),
|
||||
sink_nodes.size(),
|
||||
terminal_pins.size());
|
||||
}
|
||||
VTR_ASSERT(sink_nodes.size() == terminal_pins.size());
|
||||
|
||||
lb_router.add_net_atom_pins(lb_net, atom_ctx.nlist.net_driver(atom_net_id), terminal_pins);
|
||||
|
||||
return lb_net;
|
||||
}
|
||||
|
||||
} /* end namespace openfpga */
|
|
@ -0,0 +1,27 @@
|
|||
#ifndef LB_ROUTER_UTILS_H
|
||||
#define LB_ROUTER_UTILS_H
|
||||
|
||||
/********************************************************************
|
||||
* Include header files that are required by function declaration
|
||||
*******************************************************************/
|
||||
#include "atom_netlist.h"
|
||||
#include "lb_rr_graph.h"
|
||||
#include "lb_router.h"
|
||||
|
||||
/********************************************************************
|
||||
* Function declaration
|
||||
*******************************************************************/
|
||||
|
||||
/* begin namespace openfpga */
|
||||
namespace openfpga {
|
||||
|
||||
LbRouter::NetId add_lb_router_net_to_route(LbRouter& lb_router,
|
||||
const LbRRGraph& lb_rr_graph,
|
||||
const LbRRNodeId& source_node,
|
||||
const std::vector<LbRRNodeId>& sink_nodes,
|
||||
const AtomContext& atom_ctx,
|
||||
const AtomNetId& atom_net_id);
|
||||
|
||||
} /* end namespace openfpga */
|
||||
|
||||
#endif
|
|
@ -9,6 +9,14 @@
|
|||
/* begin namespace openfpga */
|
||||
namespace openfpga {
|
||||
|
||||
/**************************************************
|
||||
* Public Constructors
|
||||
*************************************************/
|
||||
LbRRGraph::LbRRGraph() {
|
||||
ext_source_node_ = LbRRNodeId::INVALID();
|
||||
ext_sink_node_ = LbRRNodeId::INVALID();
|
||||
}
|
||||
|
||||
/**************************************************
|
||||
* Public Accessors: Aggregates
|
||||
*************************************************/
|
||||
|
@ -43,6 +51,11 @@ float LbRRGraph::node_intrinsic_cost(const LbRRNodeId& node) const {
|
|||
return node_intrinsic_costs_[node];
|
||||
}
|
||||
|
||||
std::vector<LbRREdgeId> LbRRGraph::node_in_edges(const LbRRNodeId& node) const {
|
||||
VTR_ASSERT(true == valid_node_id(node));
|
||||
return node_in_edges_[node];
|
||||
}
|
||||
|
||||
std::vector<LbRREdgeId> LbRRGraph::node_in_edges(const LbRRNodeId& node, t_mode* mode) const {
|
||||
std::vector<LbRREdgeId> in_edges;
|
||||
|
||||
|
@ -56,6 +69,11 @@ std::vector<LbRREdgeId> LbRRGraph::node_in_edges(const LbRRNodeId& node, t_mode*
|
|||
return in_edges;
|
||||
}
|
||||
|
||||
std::vector<LbRREdgeId> LbRRGraph::node_out_edges(const LbRRNodeId& node) const {
|
||||
VTR_ASSERT(true == valid_node_id(node));
|
||||
return node_out_edges_[node];
|
||||
}
|
||||
|
||||
std::vector<LbRREdgeId> LbRRGraph::node_out_edges(const LbRRNodeId& node, t_mode* mode) const {
|
||||
std::vector<LbRREdgeId> out_edges;
|
||||
|
||||
|
@ -69,7 +87,7 @@ std::vector<LbRREdgeId> LbRRGraph::node_out_edges(const LbRRNodeId& node, t_mode
|
|||
return out_edges;
|
||||
}
|
||||
|
||||
LbRRNodeId LbRRGraph::find_node(const e_lb_rr_type& type, t_pb_graph_pin* pb_graph_pin) const {
|
||||
LbRRNodeId LbRRGraph::find_node(const e_lb_rr_type& type, const t_pb_graph_pin* pb_graph_pin) const {
|
||||
if (size_t(type) >= node_lookup_.size()) {
|
||||
return LbRRNodeId::INVALID();
|
||||
}
|
||||
|
@ -81,6 +99,24 @@ LbRRNodeId LbRRGraph::find_node(const e_lb_rr_type& type, t_pb_graph_pin* pb_gra
|
|||
return node_lookup_[size_t(type)].at(pb_graph_pin);
|
||||
}
|
||||
|
||||
LbRRNodeId LbRRGraph::ext_source_node() const {
|
||||
return ext_source_node_;
|
||||
}
|
||||
|
||||
LbRRNodeId LbRRGraph::ext_sink_node() const {
|
||||
return ext_sink_node_;
|
||||
}
|
||||
|
||||
std::vector<LbRREdgeId> LbRRGraph::find_edge(const LbRRNodeId& src_node, const LbRRNodeId& sink_node) const {
|
||||
std::vector<LbRREdgeId> edges;
|
||||
for (const LbRREdgeId& edge : node_out_edges_[src_node]) {
|
||||
if (sink_node == edge_sink_node(edge)) {
|
||||
edges.push_back(edge);
|
||||
}
|
||||
}
|
||||
return edges;
|
||||
}
|
||||
|
||||
LbRRNodeId LbRRGraph::edge_src_node(const LbRREdgeId& edge) const {
|
||||
VTR_ASSERT(true == valid_edge_id(edge));
|
||||
return edge_src_nodes_[edge];
|
||||
|
@ -141,6 +177,20 @@ LbRRNodeId LbRRGraph::create_node(const e_lb_rr_type& type) {
|
|||
return node;
|
||||
}
|
||||
|
||||
LbRRNodeId LbRRGraph::create_ext_source_node(const e_lb_rr_type& type) {
|
||||
LbRRNodeId ext_source_node = create_node(type);
|
||||
ext_source_node_ = ext_source_node;
|
||||
|
||||
return ext_source_node;
|
||||
}
|
||||
|
||||
LbRRNodeId LbRRGraph::create_ext_sink_node(const e_lb_rr_type& type) {
|
||||
LbRRNodeId ext_sink_node = create_node(type);
|
||||
ext_sink_node_ = ext_sink_node;
|
||||
|
||||
return ext_sink_node;
|
||||
}
|
||||
|
||||
void LbRRGraph::set_node_type(const LbRRNodeId& node, const e_lb_rr_type& type) {
|
||||
VTR_ASSERT(true == valid_node_id(node));
|
||||
node_types_[node] = type;
|
||||
|
@ -212,4 +262,199 @@ bool LbRRGraph::valid_edge_id(const LbRREdgeId& edge_id) const {
|
|||
return ( size_t(edge_id) < edge_ids_.size() ) && ( edge_id == edge_ids_[edge_id] );
|
||||
}
|
||||
|
||||
/* This function run fundamental checks on internal data
|
||||
* Errors are thrown if fundamental checking fails
|
||||
*/
|
||||
bool LbRRGraph::validate() const {
|
||||
size_t num_err = 0;
|
||||
|
||||
/* Validate the sizes of nodes and node-related vectors
|
||||
* Validate the sizes of edges and edge-related vectors
|
||||
*/
|
||||
if (false == validate_sizes()) {
|
||||
VTR_LOG_WARN("Fail in validating node and edges sizes!\n");
|
||||
num_err++;
|
||||
}
|
||||
|
||||
/* Fundamental check */
|
||||
if (false == validate_nodes_edges()) {
|
||||
VTR_LOG_WARN("Fail in validating edges connected to each node!\n");
|
||||
num_err++;
|
||||
}
|
||||
|
||||
/* Error out if there is any fatal errors found */
|
||||
if (0 < num_err) {
|
||||
VTR_LOG_ERROR("Logical tile Routing Resource graph is not valid due to %d fatal errors !\n",
|
||||
num_err);
|
||||
}
|
||||
|
||||
return (0 == num_err);
|
||||
}
|
||||
|
||||
bool LbRRGraph::empty() const {
|
||||
return (0 == nodes().size()) && (0 == edges().size());
|
||||
}
|
||||
|
||||
/******************************************************************************
|
||||
* Private validators/invalidators
|
||||
******************************************************************************/
|
||||
bool LbRRGraph::validate_node_sizes() const {
|
||||
size_t num_nodes = node_ids_.size();
|
||||
return node_ids_.size() == num_nodes
|
||||
&& node_types_.size() == num_nodes
|
||||
&& node_capacities_.size() == num_nodes
|
||||
&& node_pb_graph_pins_.size() == num_nodes
|
||||
&& node_intrinsic_costs_.size() == num_nodes
|
||||
&& node_in_edges_.size() == num_nodes
|
||||
&& node_out_edges_.size() == num_nodes;
|
||||
}
|
||||
|
||||
bool LbRRGraph::validate_edge_sizes() const {
|
||||
size_t num_edges = edge_ids_.size();
|
||||
return edge_src_nodes_.size() == num_edges
|
||||
&& edge_sink_nodes_.size() == num_edges
|
||||
&& edge_intrinsic_costs_.size() == num_edges
|
||||
&& edge_modes_.size() == num_edges;
|
||||
}
|
||||
|
||||
bool LbRRGraph::validate_sizes() const {
|
||||
return validate_node_sizes() && validate_edge_sizes();
|
||||
}
|
||||
|
||||
/* Check if a node is in the list of source_nodes of a edge */
|
||||
bool LbRRGraph::validate_node_is_edge_src(const LbRRNodeId& node, const LbRREdgeId& edge) const {
|
||||
/* Assure a valid node id */
|
||||
VTR_ASSERT_SAFE(valid_node_id(node));
|
||||
/* assure a valid edge id */
|
||||
VTR_ASSERT_SAFE(valid_edge_id(edge));
|
||||
/* find if the node is the src */
|
||||
if (node == edge_src_node(edge)) {
|
||||
return true; /* confirmed source node*/
|
||||
} else {
|
||||
return false; /* not a source */
|
||||
}
|
||||
}
|
||||
|
||||
/* Check if a node is in the list of sink_nodes of a edge */
|
||||
bool LbRRGraph::validate_node_is_edge_sink(const LbRRNodeId& node, const LbRREdgeId& edge) const {
|
||||
/* Assure a valid node id */
|
||||
VTR_ASSERT_SAFE(valid_node_id(node));
|
||||
/* assure a valid edge id */
|
||||
VTR_ASSERT_SAFE(valid_edge_id(edge));
|
||||
/* find if the node is the sink */
|
||||
if (node == edge_sink_node(edge)) {
|
||||
return true; /* confirmed source node*/
|
||||
} else {
|
||||
return false; /* not a source */
|
||||
}
|
||||
}
|
||||
/* This function will check if a node has valid input edges
|
||||
* 1. Check the edge ids are valid
|
||||
* 2. Check the node is in the list of edge_sink_node
|
||||
*/
|
||||
bool LbRRGraph::validate_node_in_edges(const LbRRNodeId& node) const {
|
||||
bool all_valid = true;
|
||||
/* Assure a valid node id */
|
||||
VTR_ASSERT_SAFE(valid_node_id(node));
|
||||
/* Check each edge */
|
||||
for (auto edge : node_in_edges(node)) {
|
||||
/* assure a valid edge id */
|
||||
VTR_ASSERT_SAFE(valid_edge_id(edge));
|
||||
/* check the node is in the list of edge_sink_node */
|
||||
if (true == validate_node_is_edge_sink(node, edge)) {
|
||||
continue;
|
||||
}
|
||||
/* Reach here, it means there is something wrong!
|
||||
* Print a warning
|
||||
*/
|
||||
VTR_LOG_WARN("Edge %d is in the input edge list of node %d while the node is not in edge's sink node list!\n",
|
||||
size_t(edge), size_t(node));
|
||||
all_valid = false;
|
||||
}
|
||||
|
||||
return all_valid;
|
||||
}
|
||||
|
||||
/* This function will check if a node has valid output edges
|
||||
* 1. Check the edge ids are valid
|
||||
* 2. Check the node is in the list of edge_source_node
|
||||
*/
|
||||
bool LbRRGraph::validate_node_out_edges(const LbRRNodeId& node) const {
|
||||
bool all_valid = true;
|
||||
/* Assure a valid node id */
|
||||
VTR_ASSERT_SAFE(valid_node_id(node));
|
||||
/* Check each edge */
|
||||
for (auto edge : node_out_edges(node)) {
|
||||
/* assure a valid edge id */
|
||||
VTR_ASSERT_SAFE(valid_edge_id(edge));
|
||||
/* check the node is in the list of edge_sink_node */
|
||||
if (true == validate_node_is_edge_src(node, edge)) {
|
||||
continue;
|
||||
}
|
||||
/* Reach here, it means there is something wrong!
|
||||
* Print a warning
|
||||
*/
|
||||
VTR_LOG_WARN("Edge %d is in the output edge list of node %d while the node is not in edge's source node list!\n",
|
||||
size_t(edge), size_t(node));
|
||||
all_valid = false;
|
||||
}
|
||||
|
||||
return all_valid;
|
||||
}
|
||||
|
||||
|
||||
/* check if all the nodes' input edges are valid */
|
||||
bool LbRRGraph::validate_nodes_in_edges() const {
|
||||
bool all_valid = true;
|
||||
for (const LbRRNodeId& id : nodes()) {
|
||||
/* Try to find if this is an invalid id or not */
|
||||
if (!valid_node_id(id)) {
|
||||
/* Skip this id */
|
||||
continue;
|
||||
}
|
||||
if (true == validate_node_in_edges(id)) {
|
||||
continue;
|
||||
}
|
||||
/* Reach here, it means there is something wrong!
|
||||
* Print a warning
|
||||
*/
|
||||
all_valid = false;
|
||||
}
|
||||
return all_valid;
|
||||
}
|
||||
|
||||
/* check if all the nodes' output edges are valid */
|
||||
bool LbRRGraph::validate_nodes_out_edges() const {
|
||||
bool all_valid = true;
|
||||
for (const LbRRNodeId& id : nodes()) {
|
||||
/* Try to find if this is an invalid id or not */
|
||||
if (!valid_node_id(id)) {
|
||||
/* Skip this id */
|
||||
continue;
|
||||
}
|
||||
if (true == validate_node_out_edges(id)) {
|
||||
continue;
|
||||
}
|
||||
/* Reach here, it means there is something wrong!
|
||||
* Print a warning
|
||||
*/
|
||||
all_valid = false;
|
||||
}
|
||||
return all_valid;
|
||||
}
|
||||
|
||||
/* check all the edges of every node */
|
||||
bool LbRRGraph::validate_nodes_edges() const {
|
||||
bool all_valid = true;
|
||||
|
||||
if (false == validate_nodes_in_edges()) {
|
||||
all_valid = false;
|
||||
}
|
||||
if (false == validate_nodes_out_edges()) {
|
||||
all_valid = false;
|
||||
}
|
||||
|
||||
return all_valid;
|
||||
}
|
||||
|
||||
} /* end namespace openfpga */
|
||||
|
|
|
@ -168,6 +168,9 @@ class LbRRGraph {
|
|||
typedef vtr::Range<node_iterator> node_range;
|
||||
typedef vtr::Range<edge_iterator> edge_range;
|
||||
|
||||
public: /* Constructors */
|
||||
LbRRGraph();
|
||||
|
||||
public: /* Accessors */
|
||||
/* Aggregates: create range-based loops for nodes/edges/switches/segments
|
||||
* To iterate over the nodes/edges/switches/segments in a RRGraph,
|
||||
|
@ -195,13 +198,21 @@ class LbRRGraph {
|
|||
float node_intrinsic_cost(const LbRRNodeId& node) const;
|
||||
|
||||
/* Get a list of edge ids, which are incoming edges to a node */
|
||||
std::vector<LbRREdgeId> node_in_edges(const LbRRNodeId& node) const;
|
||||
std::vector<LbRREdgeId> node_in_edges(const LbRRNodeId& node, t_mode* mode) const;
|
||||
|
||||
/* Get a list of edge ids, which are outgoing edges from a node */
|
||||
std::vector<LbRREdgeId> node_out_edges(const LbRRNodeId& node) const;
|
||||
std::vector<LbRREdgeId> node_out_edges(const LbRRNodeId& node, t_mode* mode) const;
|
||||
|
||||
LbRRNodeId find_node(const e_lb_rr_type& type, t_pb_graph_pin* pb_graph_pin) const;
|
||||
/* General method to look up a node with type and only pb_graph_pin information */
|
||||
LbRRNodeId find_node(const e_lb_rr_type& type, const t_pb_graph_pin* pb_graph_pin) const;
|
||||
/* Method to find special node */
|
||||
LbRRNodeId ext_source_node() const;
|
||||
LbRRNodeId ext_sink_node() const;
|
||||
|
||||
/* General method to look up a edge with source and sink nodes */
|
||||
std::vector<LbRREdgeId> find_edge(const LbRRNodeId& src_node, const LbRRNodeId& sink_node) const;
|
||||
/* Get the source node which drives a edge */
|
||||
LbRRNodeId edge_src_node(const LbRREdgeId& edge) const;
|
||||
/* Get the sink node which a edge ends to */
|
||||
|
@ -234,6 +245,10 @@ class LbRRGraph {
|
|||
* set_node_xlow(node, 0);
|
||||
*/
|
||||
LbRRNodeId create_node(const e_lb_rr_type& type);
|
||||
|
||||
/* Create special nodes */
|
||||
LbRRNodeId create_ext_source_node(const e_lb_rr_type& type);
|
||||
LbRRNodeId create_ext_sink_node(const e_lb_rr_type& type);
|
||||
|
||||
/* Set node-level information */
|
||||
void set_node_type(const LbRRNodeId& node, const e_lb_rr_type& type);
|
||||
|
@ -251,13 +266,29 @@ class LbRRGraph {
|
|||
LbRREdgeId create_edge(const LbRRNodeId& source, const LbRRNodeId& sink, t_mode* mode);
|
||||
void set_edge_intrinsic_cost(const LbRREdgeId& edge, const float& cost);
|
||||
|
||||
public: /* Validators */
|
||||
public: /* Public validators */
|
||||
/* Validate is the node id does exist in the RRGraph */
|
||||
bool valid_node_id(const LbRRNodeId& node) const;
|
||||
|
||||
/* Validate is the edge id does exist in the RRGraph */
|
||||
bool valid_edge_id(const LbRREdgeId& edge) const;
|
||||
|
||||
bool validate() const;
|
||||
|
||||
bool empty() const;
|
||||
|
||||
private: /* Private Validators */
|
||||
bool validate_node_sizes() const;
|
||||
bool validate_edge_sizes() const;
|
||||
bool validate_sizes() const;
|
||||
bool validate_node_is_edge_src(const LbRRNodeId& node, const LbRREdgeId& edge) const;
|
||||
bool validate_node_is_edge_sink(const LbRRNodeId& node, const LbRREdgeId& edge) const;
|
||||
bool validate_node_in_edges(const LbRRNodeId& node) const;
|
||||
bool validate_node_out_edges(const LbRRNodeId& node) const;
|
||||
bool validate_nodes_in_edges() const;
|
||||
bool validate_nodes_out_edges() const;
|
||||
bool validate_nodes_edges() const;
|
||||
|
||||
private: /* Internal Data */
|
||||
/* Node related data */
|
||||
vtr::vector<LbRRNodeId, LbRRNodeId> node_ids_;
|
||||
|
@ -287,8 +318,12 @@ class LbRRGraph {
|
|||
/* Fast look-up to search a node by its type, coordinator and ptc_num
|
||||
* Indexing of fast look-up: [0..NUM_TYPES-1][t_pb_graph_pin*]
|
||||
*/
|
||||
typedef std::vector<std::map<t_pb_graph_pin*, LbRRNodeId>> NodeLookup;
|
||||
typedef std::vector<std::map<const t_pb_graph_pin*, LbRRNodeId>> NodeLookup;
|
||||
mutable NodeLookup node_lookup_;
|
||||
|
||||
/* Special node look-up */
|
||||
LbRRNodeId ext_source_node_;
|
||||
LbRRNodeId ext_sink_node_;
|
||||
};
|
||||
|
||||
} /* end namespace openfpga */
|
||||
|
|
|
@ -0,0 +1,73 @@
|
|||
/***************************************************************************************
|
||||
* This file includes most utilized functions for LbRRGraph object
|
||||
***************************************************************************************/
|
||||
|
||||
/* Headers from vtrutil library */
|
||||
#include "vtr_log.h"
|
||||
#include "vtr_assert.h"
|
||||
#include "vtr_util.h"
|
||||
|
||||
#include "lb_rr_graph_utils.h"
|
||||
|
||||
/* begin namespace openfpga */
|
||||
namespace openfpga {
|
||||
|
||||
/***************************************************************************************
|
||||
* Generate a string to describe a node in a logical tile rr_graph
|
||||
* in the context of logical tile
|
||||
***************************************************************************************/
|
||||
std::string describe_lb_rr_node(const LbRRGraph& lb_rr_graph,
|
||||
const LbRRNodeId& inode) {
|
||||
std::string description;
|
||||
|
||||
const t_pb_graph_pin* pb_graph_pin = lb_rr_graph.node_pb_graph_pin(inode);
|
||||
|
||||
if (pb_graph_pin) {
|
||||
description += "'" + pb_graph_pin->to_string(false) + "'";
|
||||
} else if (inode == lb_rr_graph.ext_source_node()) {
|
||||
VTR_ASSERT(LB_SOURCE == lb_rr_graph.node_type(inode));
|
||||
description = "cluster-external source (LB_SOURCE)";
|
||||
} else if (inode == lb_rr_graph.ext_sink_node()) {
|
||||
VTR_ASSERT(LB_SINK == lb_rr_graph.node_type(inode));
|
||||
description = "cluster-external sink (LB_SINK)";
|
||||
} else if (LB_SINK == lb_rr_graph.node_type(inode)) {
|
||||
description = "cluster-internal sink (LB_SINK accessible via architecture pins: ";
|
||||
|
||||
//To account for equivalent pins multiple pins may route to a single sink.
|
||||
//As a result we need to fin all the nodes which connect to this sink in order
|
||||
//to give user-friendly pin names
|
||||
std::vector<std::string> pin_descriptions;
|
||||
for (const LbRREdgeId edge : lb_rr_graph.node_in_edges(inode)) {
|
||||
const LbRRNodeId pin_rr_idx = lb_rr_graph.edge_src_node(edge);
|
||||
const t_pb_graph_pin* pin_pb_gpin = lb_rr_graph.node_pb_graph_pin(pin_rr_idx);
|
||||
pin_descriptions.push_back(pin_pb_gpin->to_string());
|
||||
}
|
||||
|
||||
description += vtr::join(pin_descriptions, ", ");
|
||||
description += ")";
|
||||
|
||||
} else if (LB_SOURCE == lb_rr_graph.node_type(inode)) {
|
||||
description = "cluster-internal source (LB_SOURCE)";
|
||||
} else if (LB_INTERMEDIATE == lb_rr_graph.node_type(inode)) {
|
||||
description = "cluster-internal intermediate?";
|
||||
} else {
|
||||
description = "<unknown lb_type_rr_node>";
|
||||
}
|
||||
|
||||
return description;
|
||||
}
|
||||
|
||||
/* This function aims to print basic information about a node */
|
||||
void print_lb_rr_node(const LbRRGraph& lb_rr_graph,
|
||||
const LbRRNodeId& node) {
|
||||
VTR_LOG("Node id: %d\n", size_t(node));
|
||||
VTR_LOG("Node type: %s\n", lb_rr_type_str[lb_rr_graph.node_type(node)]);
|
||||
VTR_LOG("Node capacity: %d\n", lb_rr_graph.node_capacity(node));
|
||||
VTR_LOG("Node pb_graph_pin: %s\n", lb_rr_graph.node_pb_graph_pin(node)->to_string().c_str());
|
||||
VTR_LOG("Node intrinsic_cost: %f\n", lb_rr_graph.node_intrinsic_cost(node));
|
||||
VTR_LOG("Node num in_edges: %ld\n", lb_rr_graph.node_in_edges(node).size());
|
||||
VTR_LOG("Node num out_edges: %ld\n", lb_rr_graph.node_out_edges(node).size());
|
||||
}
|
||||
|
||||
|
||||
} /* end namespace openfpga */
|
|
@ -0,0 +1,25 @@
|
|||
#ifndef LB_RR_GRAPH_UTILS_H
|
||||
#define LB_RR_GRAPH_UTILS_H
|
||||
|
||||
/********************************************************************
|
||||
* Include header files that are required by function declaration
|
||||
*******************************************************************/
|
||||
#include <string>
|
||||
#include "lb_rr_graph.h"
|
||||
|
||||
/********************************************************************
|
||||
* Function declaration
|
||||
*******************************************************************/
|
||||
|
||||
/* begin namespace openfpga */
|
||||
namespace openfpga {
|
||||
|
||||
std::string describe_lb_rr_node(const LbRRGraph& lb_rr_graph,
|
||||
const LbRRNodeId& inode);
|
||||
|
||||
void print_lb_rr_node(const LbRRGraph& lb_rr_graph,
|
||||
const LbRRNodeId& node);
|
||||
|
||||
} /* end namespace openfpga */
|
||||
|
||||
#endif
|
|
@ -0,0 +1,138 @@
|
|||
/******************************************************************************
|
||||
* Memember functions for data structure PhysicalPb
|
||||
******************************************************************************/
|
||||
#include "vtr_assert.h"
|
||||
#include "vtr_log.h"
|
||||
|
||||
#include "physical_pb.h"
|
||||
|
||||
/* begin namespace openfpga */
|
||||
namespace openfpga {
|
||||
|
||||
/**************************************************
|
||||
* Public Accessors
|
||||
*************************************************/
|
||||
PhysicalPb::physical_pb_range PhysicalPb::pbs() const {
|
||||
return vtr::make_range(pb_ids_.begin(), pb_ids_.end());
|
||||
}
|
||||
|
||||
std::string PhysicalPb::name(const PhysicalPbId& pb) const {
|
||||
VTR_ASSERT(true == valid_pb_id(pb));
|
||||
return names_[pb];
|
||||
}
|
||||
|
||||
/* Find the module id by a given name, return invalid if not found */
|
||||
PhysicalPbId PhysicalPb::find_pb(const t_pb_graph_node* pb_graph_node) const {
|
||||
if (type2id_map_.find(pb_graph_node) != type2id_map_.end()) {
|
||||
/* Find it, return the id */
|
||||
return type2id_map_.at(pb_graph_node);
|
||||
}
|
||||
/* Not found, return an invalid id */
|
||||
return PhysicalPbId::INVALID();
|
||||
}
|
||||
|
||||
PhysicalPbId PhysicalPb::parent(const PhysicalPbId& pb) const {
|
||||
VTR_ASSERT(true == valid_pb_id(pb));
|
||||
return parent_pbs_[pb];
|
||||
}
|
||||
|
||||
AtomNetId PhysicalPb::pb_graph_pin_atom_net(const PhysicalPbId& pb,
|
||||
const t_pb_graph_pin* pb_graph_pin) const {
|
||||
VTR_ASSERT(true == valid_pb_id(pb));
|
||||
if (pin_atom_nets_[pb].find(pb_graph_pin) != pin_atom_nets_[pb].end()) {
|
||||
/* Find it, return the id */
|
||||
return pin_atom_nets_[pb].at(pb_graph_pin);
|
||||
}
|
||||
/* Not found, return an invalid id */
|
||||
return AtomNetId::INVALID();
|
||||
}
|
||||
|
||||
/******************************************************************************
|
||||
* Private Mutators
|
||||
******************************************************************************/
|
||||
PhysicalPbId PhysicalPb::create_pb(const t_pb_graph_node* pb_graph_node) {
|
||||
/* Find if the name has been used. If used, return an invalid Id and report error! */
|
||||
std::map<const t_pb_graph_node*, PhysicalPbId>::iterator it = type2id_map_.find(pb_graph_node);
|
||||
if (it != type2id_map_.end()) {
|
||||
return PhysicalPbId::INVALID();
|
||||
}
|
||||
|
||||
/* Create an new id */
|
||||
PhysicalPbId pb = PhysicalPbId(pb_ids_.size());
|
||||
pb_ids_.push_back(pb);
|
||||
|
||||
/* Allocate other attributes */
|
||||
names_.emplace_back();
|
||||
pb_graph_nodes_.push_back(pb_graph_node);
|
||||
atom_blocks_.emplace_back();
|
||||
pin_atom_nets_.emplace_back();
|
||||
|
||||
child_pbs_.emplace_back();
|
||||
parent_pbs_.emplace_back();
|
||||
|
||||
mode_bits_.emplace_back();
|
||||
|
||||
/* Register in the name2id map */
|
||||
type2id_map_[pb_graph_node] = pb;
|
||||
|
||||
return pb;
|
||||
}
|
||||
|
||||
void PhysicalPb::add_child(const PhysicalPbId& parent,
|
||||
const PhysicalPbId& child,
|
||||
const t_pb_type* child_type) {
|
||||
VTR_ASSERT(true == valid_pb_id(parent));
|
||||
VTR_ASSERT(true == valid_pb_id(child));
|
||||
|
||||
child_pbs_[parent][child_type].push_back(child);
|
||||
|
||||
if (PhysicalPbId::INVALID() != parent_pbs_[child]) {
|
||||
VTR_LOGF_WARN(__FILE__, __LINE__,
|
||||
"Overwrite parent '%s' for physical pb '%s' with a new one '%s'!\n",
|
||||
pb_graph_nodes_[parent_pbs_[child]]->hierarchical_type_name().c_str(),
|
||||
pb_graph_nodes_[child]->hierarchical_type_name().c_str(),
|
||||
pb_graph_nodes_[parent]->hierarchical_type_name().c_str());
|
||||
}
|
||||
parent_pbs_[child] = parent;
|
||||
}
|
||||
|
||||
void PhysicalPb::set_mode_bits(const PhysicalPbId& pb,
|
||||
const std::vector<size_t>& mode_bits) {
|
||||
VTR_ASSERT(true == valid_pb_id(pb));
|
||||
|
||||
mode_bits_[pb] = mode_bits;
|
||||
}
|
||||
|
||||
void PhysicalPb::add_atom_block(const PhysicalPbId& pb,
|
||||
const AtomBlockId& atom_block) {
|
||||
VTR_ASSERT(true == valid_pb_id(pb));
|
||||
|
||||
atom_blocks_[pb].push_back(atom_block);
|
||||
}
|
||||
|
||||
void PhysicalPb::set_pb_graph_pin_atom_net(const PhysicalPbId& pb,
|
||||
const t_pb_graph_pin* pb_graph_pin,
|
||||
const AtomNetId& atom_net) {
|
||||
VTR_ASSERT(true == valid_pb_id(pb));
|
||||
if (pin_atom_nets_[pb].end() != pin_atom_nets_[pb].find(pb_graph_pin)) {
|
||||
VTR_LOG_WARN("Overwrite pb_graph_pin '%s[%d]' atom net '%lu' with '%lu'\n",
|
||||
pb_graph_pin->port->name, pb_graph_pin->pin_number,
|
||||
size_t(pin_atom_nets_[pb][pb_graph_pin]),
|
||||
size_t(atom_net));
|
||||
}
|
||||
|
||||
pin_atom_nets_[pb][pb_graph_pin] = atom_net;
|
||||
}
|
||||
|
||||
/******************************************************************************
|
||||
* Private validators/invalidators
|
||||
******************************************************************************/
|
||||
bool PhysicalPb::valid_pb_id(const PhysicalPbId& pb_id) const {
|
||||
return ( size_t(pb_id) < pb_ids_.size() ) && ( pb_id == pb_ids_[pb_id] );
|
||||
}
|
||||
|
||||
bool PhysicalPb::empty() const {
|
||||
return 0 == pb_ids_.size();
|
||||
}
|
||||
|
||||
} /* end namespace openfpga */
|
|
@ -0,0 +1,81 @@
|
|||
#ifndef PHYSICAL_PB_H
|
||||
#define PHYSICAL_PB_H
|
||||
|
||||
/********************************************************************
|
||||
* Include header files required by the data structure definition
|
||||
*******************************************************************/
|
||||
/* Headers from vtrutil library */
|
||||
#include "vtr_geometry.h"
|
||||
#include "vtr_vector.h"
|
||||
|
||||
/* Headers from readarch library */
|
||||
#include "physical_types.h"
|
||||
|
||||
/* Headers from vpr library */
|
||||
#include "atom_netlist_fwd.h"
|
||||
|
||||
#include "physical_pb_fwd.h"
|
||||
|
||||
/* Begin namespace openfpga */
|
||||
namespace openfpga {
|
||||
|
||||
/********************************************************************
|
||||
* PhysicalPb object aims to store the mapped result for a programmable
|
||||
* logical block like the VPR data structure t_pb does.
|
||||
* Differently, it is tailored for the physical implementation of a
|
||||
* programmable block.
|
||||
* - It does not contain multi-mode for each child physical_pb while
|
||||
* VPR t_pb does have multi-mode. This is because that the hardware
|
||||
* implementation is unique
|
||||
* - It contains mode-selection bits for each primitive physical_pb
|
||||
* This is used to help bitstream generator to configure a primitive
|
||||
* circuit in the correct mode
|
||||
* - A primitive LUT can be mapped to various truth tables.
|
||||
* This is true for any fracturable LUTs.
|
||||
*******************************************************************/
|
||||
class PhysicalPb {
|
||||
public: /* Types and ranges */
|
||||
typedef vtr::vector<PhysicalPbId, PhysicalPbId>::const_iterator physical_pb_iterator;
|
||||
typedef vtr::Range<physical_pb_iterator> physical_pb_range;
|
||||
public: /* Public aggregators */
|
||||
physical_pb_range pbs() const;
|
||||
std::string name(const PhysicalPbId& pb) const;
|
||||
PhysicalPbId find_pb(const t_pb_graph_node* name) const;
|
||||
PhysicalPbId parent(const PhysicalPbId& pb) const;
|
||||
AtomNetId pb_graph_pin_atom_net(const PhysicalPbId& pb,
|
||||
const t_pb_graph_pin* pb_graph_pin) const;
|
||||
public: /* Public mutators */
|
||||
PhysicalPbId create_pb(const t_pb_graph_node* pb_graph_node);
|
||||
void add_child(const PhysicalPbId& parent,
|
||||
const PhysicalPbId& child,
|
||||
const t_pb_type* child_type);
|
||||
void add_atom_block(const PhysicalPbId& pb,
|
||||
const AtomBlockId& atom_block);
|
||||
void set_mode_bits(const PhysicalPbId& pb,
|
||||
const std::vector<size_t>& mode_bits);
|
||||
void set_pb_graph_pin_atom_net(const PhysicalPbId& pb,
|
||||
const t_pb_graph_pin* pb_graph_pin,
|
||||
const AtomNetId& atom_net);
|
||||
public: /* Public validators/invalidators */
|
||||
bool valid_pb_id(const PhysicalPbId& pb_id) const;
|
||||
bool empty() const;
|
||||
private: /* Internal Data */
|
||||
vtr::vector<PhysicalPbId, PhysicalPbId> pb_ids_;
|
||||
vtr::vector<PhysicalPbId, const t_pb_graph_node*> pb_graph_nodes_;
|
||||
vtr::vector<PhysicalPbId, std::string> names_;
|
||||
vtr::vector<PhysicalPbId, std::vector<AtomBlockId>> atom_blocks_;
|
||||
vtr::vector<PhysicalPbId, std::map<const t_pb_graph_pin*, AtomNetId>> pin_atom_nets_;
|
||||
|
||||
/* Child pbs are organized as [0..num_child_pb_types-1][0..child_pb_type->num_pb-1] */
|
||||
vtr::vector<PhysicalPbId, std::map<const t_pb_type*, std::vector<PhysicalPbId>>> child_pbs_;
|
||||
vtr::vector<PhysicalPbId, PhysicalPbId> parent_pbs_;
|
||||
|
||||
vtr::vector<PhysicalPbId, std::vector<size_t>> mode_bits_;
|
||||
|
||||
/* Fast lookup */
|
||||
std::map<const t_pb_graph_node*, PhysicalPbId> type2id_map_;
|
||||
};
|
||||
|
||||
} /* End namespace openfpga*/
|
||||
|
||||
#endif
|
|
@ -0,0 +1,23 @@
|
|||
/**************************************************
|
||||
* This file includes only declarations for
|
||||
* the data structures for PhysicalPb
|
||||
* Please refer to physical_pb.h for more details
|
||||
*************************************************/
|
||||
#ifndef PHYSICAL_PB_FWD_H
|
||||
#define PHYSICAL_PB_FWD_H
|
||||
|
||||
#include "vtr_strong_id.h"
|
||||
|
||||
/* begin namespace openfpga */
|
||||
namespace openfpga {
|
||||
|
||||
/* Strong Ids for ModuleManager */
|
||||
struct physical_pb_id_tag;
|
||||
|
||||
typedef vtr::StrongId<physical_pb_id_tag> PhysicalPbId;
|
||||
|
||||
class PhysicalPb;
|
||||
|
||||
} /* end namespace openfpga */
|
||||
|
||||
#endif
|
|
@ -7,26 +7,344 @@
|
|||
#include "vtr_assert.h"
|
||||
#include "vtr_time.h"
|
||||
|
||||
/* Headers from vpr library */
|
||||
#include "vpr_utils.h"
|
||||
|
||||
#include "pb_type_utils.h"
|
||||
#include "build_physical_lb_rr_graph.h"
|
||||
#include "lb_router.h"
|
||||
#include "lb_router_utils.h"
|
||||
#include "physical_pb_utils.h"
|
||||
#include "repack.h"
|
||||
|
||||
/* begin namespace openfpga */
|
||||
namespace openfpga {
|
||||
|
||||
/***************************************************************************************
|
||||
* Try find the pin id which is mapped to a given atom net id in the context of pb route
|
||||
***************************************************************************************/
|
||||
static
|
||||
std::vector<t_pb_graph_pin*> find_routed_pb_graph_pins_atom_net(const t_pb* pb,
|
||||
const AtomNetId& atom_net_id,
|
||||
t_pb_graph_pin** pb_graph_pin_lookup_from_index) {
|
||||
std::vector<t_pb_graph_pin*> sink_pb_pins;
|
||||
|
||||
/* Find the sink nodes from top-level node */
|
||||
for (int pin = 0; pin < pb->pb_graph_node->total_pb_pins; ++pin) {
|
||||
/* Bypass unused pins */
|
||||
if ((0 == pb->pb_route.count(pin)) || (AtomNetId::INVALID() == pb->pb_route[pin].atom_net_id)) {
|
||||
continue;
|
||||
}
|
||||
/* Get the driver pb pin id, it must be valid */
|
||||
if (atom_net_id != pb->pb_route[pin].atom_net_id) {
|
||||
continue;
|
||||
}
|
||||
/* Check each sink nodes, if pin belongs to an input of a primitive pb_graph_node, it is what we want */
|
||||
for (const int& sink_pb_pin_id : pb->pb_route[pin].sink_pb_pin_ids) {
|
||||
t_pb_graph_pin* sink_pb_pin = pb_graph_pin_lookup_from_index[sink_pb_pin_id];
|
||||
VTR_ASSERT(nullptr != sink_pb_pin);
|
||||
/* We care only
|
||||
* - input pins of primitive nodes
|
||||
* - output pins of top node
|
||||
*/
|
||||
if ( (true == is_primitive_pb_type(sink_pb_pin->parent_node->pb_type))
|
||||
&& (IN_PORT == sink_pb_pin->port->type)) {
|
||||
sink_pb_pins.push_back(sink_pb_pin);
|
||||
}
|
||||
|
||||
if ( (true == sink_pb_pin->parent_node->is_root())
|
||||
&& (OUT_PORT == sink_pb_pin->port->type)) {
|
||||
sink_pb_pins.push_back(sink_pb_pin);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return sink_pb_pins;
|
||||
}
|
||||
|
||||
/***************************************************************************************
|
||||
* Find the corresponding nodes in a logical block routing resource graph
|
||||
* with a given list of sink pb_graph pins
|
||||
* Note that these sink pins belong to operating pb_graph_node,
|
||||
* we will find the associated physical pb_graph_node as well as physical pins
|
||||
* and then spot the nodes in lb_rr_graph
|
||||
***************************************************************************************/
|
||||
static
|
||||
std::vector<LbRRNodeId> find_lb_net_physical_sink_lb_rr_nodes(const LbRRGraph& lb_rr_graph,
|
||||
const std::vector<t_pb_graph_pin*>& sink_pins,
|
||||
const VprDeviceAnnotation& device_annotation) {
|
||||
std::vector<LbRRNodeId> sink_nodes;
|
||||
|
||||
for (t_pb_graph_pin* sink_pin : sink_pins) {
|
||||
/* Find the physical pin */
|
||||
t_pb_graph_pin* physical_sink_pin = nullptr;
|
||||
if (true == sink_pin->parent_node->is_root()) {
|
||||
physical_sink_pin = sink_pin;
|
||||
} else {
|
||||
physical_sink_pin = device_annotation.physical_pb_graph_pin(sink_pin);
|
||||
}
|
||||
|
||||
/* if this is the root node, the physical pin is its self */
|
||||
if (nullptr == physical_sink_pin) {
|
||||
VTR_LOG("Fail to find a physical pin for operating pin '%s'!\n",
|
||||
sink_pin->to_string().c_str());
|
||||
}
|
||||
VTR_ASSERT(nullptr != physical_sink_pin);
|
||||
LbRRNodeId sink_lb_rr_node = lb_rr_graph.find_node(LB_INTERMEDIATE, physical_sink_pin);
|
||||
if (true != lb_rr_graph.valid_node_id(sink_lb_rr_node)) {
|
||||
VTR_LOG("Try to find the lb_rr_node for pb_graph_pin '%s'\n",
|
||||
physical_sink_pin->to_string().c_str());
|
||||
}
|
||||
VTR_ASSERT(true == lb_rr_graph.valid_node_id(sink_lb_rr_node));
|
||||
sink_nodes.push_back(sink_lb_rr_node);
|
||||
}
|
||||
|
||||
return sink_nodes;
|
||||
}
|
||||
|
||||
/***************************************************************************************
|
||||
* Create nets to be routed, including the source nodes and terminals
|
||||
* And add them to the logical block router
|
||||
***************************************************************************************/
|
||||
static
|
||||
void add_lb_router_nets(LbRouter& lb_router,
|
||||
t_logical_block_type_ptr lb_type,
|
||||
const LbRRGraph& lb_rr_graph,
|
||||
const AtomContext& atom_ctx,
|
||||
const VprDeviceAnnotation& device_annotation,
|
||||
const ClusteringContext& clustering_ctx,
|
||||
const VprClusteringAnnotation& clustering_annotation,
|
||||
const ClusterBlockId& block_id,
|
||||
const bool& verbose) {
|
||||
size_t net_counter = 0;
|
||||
|
||||
/* Two spots to find source nodes for each nets
|
||||
* - nets that appear in the inputs of a clustered block
|
||||
* Note that these nets may be moved to another input of the same cluster block
|
||||
* we will locate the final pin and consider its corresponding routing resource node as source
|
||||
* - nets that appear in the outputs of a primitive pb_graph_node
|
||||
* Note that these primitive pb_graph node are operating pb_graph_node
|
||||
* while we are considering physical pb_graph node
|
||||
* Therefore, we will find the outputs of physical pb_graph_node corresponding to the operating one
|
||||
* and then consider the assoicated routing resource node as source
|
||||
*/
|
||||
t_pb* pb = clustering_ctx.clb_nlist.block_pb(block_id);
|
||||
VTR_ASSERT(true == pb->pb_graph_node->is_root());
|
||||
|
||||
/* Build the fast look-up between pb_pin_id and pb_graph_pin pointer */
|
||||
t_pb_graph_pin** pb_graph_pin_lookup_from_index = alloc_and_load_pb_graph_pin_lookup_from_index(lb_type);
|
||||
|
||||
/* Find the source nodes for the nets mapped to inputs of a clustered block */
|
||||
for (int j = 0; j < lb_type->pb_type->num_pins; j++) {
|
||||
/* Find the net mapped to this pin in clustering results*/
|
||||
ClusterNetId cluster_net_id = clustering_ctx.clb_nlist.block_net(block_id, j);
|
||||
/* Get the actual net id because it may be renamed during routing */
|
||||
if (true == clustering_annotation.is_net_renamed(block_id, j)) {
|
||||
cluster_net_id = clustering_annotation.net(block_id, j);
|
||||
}
|
||||
/* Bypass unmapped pins */
|
||||
if (ClusterNetId::INVALID() == cluster_net_id) {
|
||||
continue;
|
||||
}
|
||||
|
||||
/* Get the source pb_graph pin and find the rr_node in logical block routing resource graph */
|
||||
const t_pb_graph_pin* source_pb_pin = get_pb_graph_node_pin_from_block_pin(block_id, j);
|
||||
VTR_ASSERT(source_pb_pin->parent_node == pb->pb_graph_node);
|
||||
/* Bypass output pins */
|
||||
if (OUT_PORT == source_pb_pin->port->type) {
|
||||
continue;
|
||||
}
|
||||
|
||||
/* The outputs of pb_graph_node is INTERMEDIATE node in the routing resource graph,
|
||||
* they are all connected to a common source node
|
||||
*/
|
||||
LbRRNodeId source_lb_rr_node = lb_rr_graph.find_node(LB_INTERMEDIATE, source_pb_pin);
|
||||
VTR_ASSERT(true == lb_rr_graph.valid_node_id(source_lb_rr_node));
|
||||
|
||||
AtomNetId atom_net_id = atom_ctx.lookup.atom_net(cluster_net_id);
|
||||
VTR_ASSERT(AtomNetId::INVALID() != atom_net_id);
|
||||
|
||||
/* Find all the sink pins in the pb_route, we walk through the input pins and find the pin */
|
||||
std::vector<t_pb_graph_pin*> sink_pb_graph_pins = find_routed_pb_graph_pins_atom_net(pb, atom_net_id, pb_graph_pin_lookup_from_index);
|
||||
std::vector<LbRRNodeId> sink_lb_rr_nodes = find_lb_net_physical_sink_lb_rr_nodes(lb_rr_graph, sink_pb_graph_pins, device_annotation);
|
||||
VTR_ASSERT(sink_lb_rr_nodes.size() == sink_pb_graph_pins.size());
|
||||
|
||||
/* Add the net */
|
||||
add_lb_router_net_to_route(lb_router, lb_rr_graph,
|
||||
source_lb_rr_node, sink_lb_rr_nodes,
|
||||
atom_ctx, atom_net_id);
|
||||
net_counter++;
|
||||
}
|
||||
|
||||
/* Find the source nodes for the nets mapped to outputs of primitive pb_graph_node */
|
||||
for (int pin = 0; pin < pb->pb_graph_node->total_pb_pins; ++pin) {
|
||||
/* Bypass unused pins */
|
||||
if ((0 == pb->pb_route.count(pin)) || (AtomNetId::INVALID() == pb->pb_route[pin].atom_net_id)) {
|
||||
continue;
|
||||
}
|
||||
/* Get the driver pb pin id, it must be valid */
|
||||
int source_pb_pin_id = pb->pb_route[pin].driver_pb_pin_id;
|
||||
if (OPEN == source_pb_pin_id) {
|
||||
continue;
|
||||
}
|
||||
VTR_ASSERT(OPEN != source_pb_pin_id && source_pb_pin_id < pb->pb_graph_node->total_pb_pins);
|
||||
/* Find the corresponding pb_graph_pin and its physical pb_graph_pin */
|
||||
t_pb_graph_pin* source_pb_pin = pb_graph_pin_lookup_from_index[source_pb_pin_id];
|
||||
/* Skip the pin from top-level pb_graph_node, they have been handled already */
|
||||
if (source_pb_pin->parent_node == pb->pb_graph_node) {
|
||||
continue;
|
||||
}
|
||||
|
||||
/* The pin must be an output of a primitive pb_graph_node */
|
||||
if (OUT_PORT != source_pb_pin->port->type) {
|
||||
continue;
|
||||
}
|
||||
if (true != is_primitive_pb_type(source_pb_pin->parent_node->pb_type)) {
|
||||
continue;
|
||||
}
|
||||
|
||||
/* The outputs of pb_graph_node is SOURCE node in the routing resource graph */
|
||||
t_pb_graph_pin* physical_source_pb_pin = device_annotation.physical_pb_graph_pin(source_pb_pin);
|
||||
LbRRNodeId source_lb_rr_node = lb_rr_graph.find_node(LB_SOURCE, physical_source_pb_pin);
|
||||
VTR_ASSERT(true == lb_rr_graph.valid_node_id(source_lb_rr_node));
|
||||
|
||||
AtomNetId atom_net_id = pb->pb_route[pin].atom_net_id;
|
||||
VTR_ASSERT(AtomNetId::INVALID() != atom_net_id);
|
||||
|
||||
/* Find all the sink pins in the pb_route */
|
||||
std::vector<t_pb_graph_pin*> sink_pb_graph_pins = find_routed_pb_graph_pins_atom_net(pb, atom_net_id, pb_graph_pin_lookup_from_index);
|
||||
std::vector<LbRRNodeId> sink_lb_rr_nodes = find_lb_net_physical_sink_lb_rr_nodes(lb_rr_graph, sink_pb_graph_pins, device_annotation);
|
||||
VTR_ASSERT(sink_lb_rr_nodes.size() == sink_pb_graph_pins.size());
|
||||
|
||||
/* Add the net */
|
||||
add_lb_router_net_to_route(lb_router, lb_rr_graph,
|
||||
source_lb_rr_node, sink_lb_rr_nodes,
|
||||
atom_ctx, atom_net_id);
|
||||
net_counter++;
|
||||
}
|
||||
|
||||
/* Free */
|
||||
free_pb_graph_pin_lookup_from_index(pb_graph_pin_lookup_from_index);
|
||||
|
||||
VTR_LOGV(verbose,
|
||||
"Added %lu nets to be routed.\n",
|
||||
net_counter);
|
||||
}
|
||||
|
||||
/***************************************************************************************
|
||||
* Repack a clustered block in the physical mode
|
||||
* This function will do
|
||||
* - Find the lb_rr_graph that is affiliated to the clustered block
|
||||
* and initilize the logcial tile router
|
||||
* - Create nets to be routed, including the source nodes and terminals
|
||||
* This should consider the net remapping in the clustering_annotation
|
||||
* - Run the router to finish the repacking
|
||||
* - Output routing results to data structure PhysicalPb and store it in clustering annotation
|
||||
***************************************************************************************/
|
||||
static
|
||||
void repack_cluster(const AtomContext& atom_ctx,
|
||||
const ClusteringContext& clustering_ctx,
|
||||
const VprDeviceAnnotation& device_annotation,
|
||||
VprClusteringAnnotation& clustering_annotation,
|
||||
const ClusterBlockId& block_id,
|
||||
const bool& verbose) {
|
||||
/* Get the pb graph that current clustered block is mapped to */
|
||||
t_logical_block_type_ptr lb_type = clustering_ctx.clb_nlist.block_type(block_id);
|
||||
t_pb_graph_node* pb_graph_head = lb_type->pb_graph_head;
|
||||
VTR_ASSERT(nullptr != pb_graph_head);
|
||||
|
||||
/* We should get a non-empty graph */
|
||||
const LbRRGraph& lb_rr_graph = device_annotation.physical_lb_rr_graph(pb_graph_head);
|
||||
VTR_ASSERT(!lb_rr_graph.empty());
|
||||
|
||||
VTR_LOG("Repack clustered block '%s'...",
|
||||
clustering_ctx.clb_nlist.block_name(block_id).c_str());
|
||||
VTR_LOGV(verbose, "\n");
|
||||
|
||||
/* Initialize the router */
|
||||
LbRouter lb_router(lb_rr_graph, lb_type);
|
||||
|
||||
/* Add nets to be routed with source and terminals */
|
||||
add_lb_router_nets(lb_router, lb_type, lb_rr_graph, atom_ctx, device_annotation,
|
||||
clustering_ctx, const_cast<const VprClusteringAnnotation&>(clustering_annotation),
|
||||
block_id, verbose);
|
||||
|
||||
/* Initialize the modes to expand routing trees with the physical modes in device annotation
|
||||
* This is a must-do before running the routeri in the purpose of repacking!!!
|
||||
*/
|
||||
lb_router.set_physical_pb_modes(lb_rr_graph, device_annotation);
|
||||
|
||||
/* Run the router */
|
||||
bool route_success = lb_router.try_route(lb_rr_graph, atom_ctx.nlist, verbose);
|
||||
|
||||
if (false == route_success) {
|
||||
VTR_LOGV(verbose, "Reroute failed\n");
|
||||
exit(1);
|
||||
}
|
||||
VTR_ASSERT(true == route_success);
|
||||
VTR_LOGV(verbose, "Reroute succeed\n");
|
||||
|
||||
/* Annotate routing results to physical pb */
|
||||
PhysicalPb phy_pb;
|
||||
alloc_physical_pb_from_pb_graph(phy_pb, pb_graph_head, device_annotation);
|
||||
rec_update_physical_pb_from_operating_pb(phy_pb,
|
||||
clustering_ctx.clb_nlist.block_pb(block_id),
|
||||
clustering_ctx.clb_nlist.block_pb(block_id)->pb_route,
|
||||
atom_ctx,
|
||||
device_annotation);
|
||||
/* TODO: save routing results */
|
||||
VTR_LOGV(verbose, "Saved results in physical pb\n");
|
||||
|
||||
/* Add the pb to clustering context */
|
||||
clustering_annotation.add_physical_pb(block_id, phy_pb);
|
||||
|
||||
VTR_LOG("Done\n");
|
||||
}
|
||||
|
||||
/***************************************************************************************
|
||||
* Repack each clustered blocks in the clustering context
|
||||
***************************************************************************************/
|
||||
static
|
||||
void repack_clusters(const AtomContext& atom_ctx,
|
||||
const ClusteringContext& clustering_ctx,
|
||||
const VprDeviceAnnotation& device_annotation,
|
||||
VprClusteringAnnotation& clustering_annotation,
|
||||
const bool& verbose) {
|
||||
vtr::ScopedStartFinishTimer timer("Repack clustered blocks to physical implementation of logical tile");
|
||||
|
||||
for (auto blk_id : clustering_ctx.clb_nlist.blocks()) {
|
||||
repack_cluster(atom_ctx, clustering_ctx,
|
||||
device_annotation, clustering_annotation,
|
||||
blk_id, verbose);
|
||||
}
|
||||
}
|
||||
|
||||
/***************************************************************************************
|
||||
* Top-level function to pack physical pb_graph
|
||||
* This function will do :
|
||||
* - create physical lb_rr_graph for each pb_graph considering physical modes only
|
||||
* the lb_rr_graph willbe added to device annotation
|
||||
* the lb_rr_graph will be added to device annotation
|
||||
* - annotate nets to be routed for each clustered block from operating modes of pb_graph
|
||||
* to physical modes of pb_graph
|
||||
* - rerun the routing for each clustered block
|
||||
* - store the packing results to clustering annotation
|
||||
***************************************************************************************/
|
||||
void pack_physical_pbs(const DeviceContext& device_ctx,
|
||||
const VprDeviceAnnotation& device_annotation,
|
||||
const AtomContext& atom_ctx,
|
||||
const ClusteringContext& clustering_ctx,
|
||||
VprDeviceAnnotation& device_annotation,
|
||||
VprClusteringAnnotation& clustering_annotation,
|
||||
const VprRoutingAnnotation& routing_annotation,
|
||||
const bool& verbose) {
|
||||
|
||||
/* build the routing resource graph for each logical tile */
|
||||
build_physical_lb_rr_graphs(device_ctx,
|
||||
device_annotation,
|
||||
verbose);
|
||||
|
||||
/* Call the LbRouter to re-pack each clustered block to physical implementation */
|
||||
repack_clusters(atom_ctx, clustering_ctx,
|
||||
const_cast<const VprDeviceAnnotation&>(device_annotation), clustering_annotation,
|
||||
verbose);
|
||||
}
|
||||
|
||||
} /* end namespace openfpga */
|
||||
|
|
|
@ -17,9 +17,10 @@
|
|||
namespace openfpga {
|
||||
|
||||
void pack_physical_pbs(const DeviceContext& device_ctx,
|
||||
const VprDeviceAnnotation& device_annotation,
|
||||
const AtomContext& atom_ctx,
|
||||
const ClusteringContext& clustering_ctx,
|
||||
VprDeviceAnnotation& device_annotation,
|
||||
VprClusteringAnnotation& clustering_annotation,
|
||||
const VprRoutingAnnotation& routing_annotation,
|
||||
const bool& verbose);
|
||||
|
||||
} /* end namespace openfpga */
|
||||
|
|
|
@ -25,9 +25,17 @@ namespace openfpga {
|
|||
************************************************************************/
|
||||
bool is_primitive_pb_type(t_pb_type* pb_type) {
|
||||
if (LUT_CLASS == pb_type->class_type) {
|
||||
/* The first mode of LUT is wire, the second is the regular LUT */
|
||||
VTR_ASSERT(std::string("wire") == std::string(pb_type->modes[0].name));
|
||||
VTR_ASSERT(std::string(pb_type->name) == std::string(pb_type->modes[1].name));
|
||||
/* The only primitive LUT we recognize is the one which have
|
||||
* a first mode of LUT is wire, the second is the regular LUT
|
||||
* VPR contructed two modes under a regular LUT, and these children
|
||||
* are labelled as LUT_CLASS as well. OpenFPGA does not consider
|
||||
* them as primitive as they are for CAD usage only
|
||||
*/
|
||||
if (0 == pb_type->num_modes) {
|
||||
return false;
|
||||
}
|
||||
VTR_ASSERT( (std::string("wire") == std::string(pb_type->modes[0].name))
|
||||
&& (std::string(pb_type->name) == std::string(pb_type->modes[1].name)));
|
||||
return true;
|
||||
}
|
||||
return 0 == pb_type->num_modes;
|
||||
|
|
|
@ -0,0 +1,272 @@
|
|||
/************************************************************************
|
||||
* Function to perform fundamental operation for the physical pb using
|
||||
* data structures
|
||||
***********************************************************************/
|
||||
/* Headers from vtrutil library */
|
||||
#include "vtr_assert.h"
|
||||
#include "vtr_log.h"
|
||||
|
||||
#include "openfpga_naming.h"
|
||||
#include "pb_type_utils.h"
|
||||
#include "physical_pb_utils.h"
|
||||
|
||||
/* begin namespace openfpga */
|
||||
namespace openfpga {
|
||||
|
||||
/************************************************************************
|
||||
* Allocate an empty physical pb graph based on pb_graph
|
||||
* This function should start with an empty physical pb object!!!
|
||||
* Suggest to check this before executing this function
|
||||
* VTR_ASSERT(true == phy_pb.empty());
|
||||
***********************************************************************/
|
||||
static
|
||||
void rec_alloc_physical_pb_from_pb_graph(PhysicalPb& phy_pb,
|
||||
const t_pb_graph_node* pb_graph_node,
|
||||
const VprDeviceAnnotation& device_annotation) {
|
||||
t_pb_type* pb_type = pb_graph_node->pb_type;
|
||||
|
||||
t_mode* physical_mode = device_annotation.physical_mode(pb_type);
|
||||
|
||||
PhysicalPbId cur_phy_pb_id = phy_pb.create_pb(pb_graph_node);
|
||||
VTR_ASSERT(true == phy_pb.valid_pb_id(cur_phy_pb_id));
|
||||
|
||||
/* Finish for primitive node */
|
||||
if (true == is_primitive_pb_type(pb_type)) {
|
||||
return;
|
||||
}
|
||||
|
||||
/* Find the physical mode */
|
||||
VTR_ASSERT(nullptr != physical_mode);
|
||||
|
||||
/* Go to the leaf nodes first. This aims to build all the primitive nodes first
|
||||
* and then we build the parents and create links
|
||||
*/
|
||||
for (int ipb = 0; ipb < physical_mode->num_pb_type_children; ++ipb) {
|
||||
for (int jpb = 0; jpb < physical_mode->pb_type_children[ipb].num_pb; ++jpb) {
|
||||
rec_alloc_physical_pb_from_pb_graph(phy_pb,
|
||||
&(pb_graph_node->child_pb_graph_nodes[physical_mode->index][ipb][jpb]),
|
||||
device_annotation);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/************************************************************************
|
||||
* Build all the relationships between parent and children
|
||||
* inside a physical pb graph
|
||||
* This function must be executed after rec_alloc_physical_pb_from_pb_graph()!!!
|
||||
***********************************************************************/
|
||||
static
|
||||
void rec_build_physical_pb_children_from_pb_graph(PhysicalPb& phy_pb,
|
||||
const t_pb_graph_node* pb_graph_node,
|
||||
const VprDeviceAnnotation& device_annotation) {
|
||||
t_pb_type* pb_type = pb_graph_node->pb_type;
|
||||
|
||||
/* Finish for primitive node */
|
||||
if (true == is_primitive_pb_type(pb_type)) {
|
||||
return;
|
||||
}
|
||||
|
||||
t_mode* physical_mode = device_annotation.physical_mode(pb_type);
|
||||
VTR_ASSERT(nullptr != physical_mode);
|
||||
|
||||
/* Please use the openfpga naming function so that you can build the link to module manager */
|
||||
PhysicalPbId parent_pb_id = phy_pb.find_pb(pb_graph_node);
|
||||
VTR_ASSERT(true == phy_pb.valid_pb_id(parent_pb_id));
|
||||
|
||||
/* Add all the children */
|
||||
for (int ipb = 0; ipb < physical_mode->num_pb_type_children; ++ipb) {
|
||||
for (int jpb = 0; jpb < physical_mode->pb_type_children[ipb].num_pb; ++jpb) {
|
||||
PhysicalPbId child_pb_id = phy_pb.find_pb(&(pb_graph_node->child_pb_graph_nodes[physical_mode->index][ipb][jpb]));
|
||||
VTR_ASSERT(true == phy_pb.valid_pb_id(child_pb_id));
|
||||
phy_pb.add_child(parent_pb_id, child_pb_id, &(physical_mode->pb_type_children[ipb]));
|
||||
}
|
||||
}
|
||||
|
||||
/* Go to the leaf nodes first. This aims to build all the primitive nodes first
|
||||
* and then we build the parents and create links
|
||||
*/
|
||||
for (int ipb = 0; ipb < physical_mode->num_pb_type_children; ++ipb) {
|
||||
for (int jpb = 0; jpb < physical_mode->pb_type_children[ipb].num_pb; ++jpb) {
|
||||
rec_build_physical_pb_children_from_pb_graph(phy_pb,
|
||||
&(pb_graph_node->child_pb_graph_nodes[physical_mode->index][ipb][jpb]),
|
||||
device_annotation);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/************************************************************************
|
||||
* Allocate an empty physical pb graph based on pb_graph
|
||||
* This function should start with an empty physical pb object!!!
|
||||
* Suggest to check this before executing this function
|
||||
* VTR_ASSERT(true == phy_pb.empty());
|
||||
***********************************************************************/
|
||||
void alloc_physical_pb_from_pb_graph(PhysicalPb& phy_pb,
|
||||
const t_pb_graph_node* pb_graph_head,
|
||||
const VprDeviceAnnotation& device_annotation) {
|
||||
VTR_ASSERT(true == phy_pb.empty());
|
||||
|
||||
rec_alloc_physical_pb_from_pb_graph(phy_pb, pb_graph_head, device_annotation);
|
||||
rec_build_physical_pb_children_from_pb_graph(phy_pb, pb_graph_head, device_annotation);
|
||||
}
|
||||
|
||||
/************************************************************************
|
||||
* Update a mapping net from a pin of an operating primitive pb to a
|
||||
* physical pb data base
|
||||
***********************************************************************/
|
||||
static
|
||||
void update_primitive_physical_pb_pin_atom_net(PhysicalPb& phy_pb,
|
||||
const PhysicalPbId& primitive_pb,
|
||||
const t_pb_graph_pin* pb_graph_pin,
|
||||
const t_pb_routes& pb_route,
|
||||
const VprDeviceAnnotation& device_annotation) {
|
||||
int node_index = pb_graph_pin->pin_count_in_cluster;
|
||||
if (pb_route.count(node_index)) {
|
||||
/* The pin is mapped to a net, find the original pin in the atom netlist */
|
||||
AtomNetId atom_net = pb_route[node_index].atom_net_id;
|
||||
|
||||
VTR_ASSERT(atom_net);
|
||||
|
||||
/* Find the physical pb_graph_pin */
|
||||
t_pb_graph_pin* physical_pb_graph_pin = device_annotation.physical_pb_graph_pin(pb_graph_pin);
|
||||
VTR_ASSERT(nullptr != physical_pb_graph_pin);
|
||||
|
||||
/* Check if the pin has been mapped to a net.
|
||||
* If yes, the atom net must be the same
|
||||
*/
|
||||
if (AtomNetId::INVALID() == phy_pb.pb_graph_pin_atom_net(primitive_pb, physical_pb_graph_pin)) {
|
||||
phy_pb.set_pb_graph_pin_atom_net(primitive_pb, physical_pb_graph_pin, atom_net);
|
||||
} else {
|
||||
VTR_ASSERT(atom_net == phy_pb.pb_graph_pin_atom_net(primitive_pb, physical_pb_graph_pin));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/************************************************************************
|
||||
* Synchronize mapping nets from an operating primitive pb to a physical pb
|
||||
***********************************************************************/
|
||||
static
|
||||
void synchronize_primitive_physical_pb_atom_nets(PhysicalPb& phy_pb,
|
||||
const PhysicalPbId& primitive_pb,
|
||||
const t_pb_graph_node* pb_graph_node,
|
||||
const t_pb_routes& pb_route,
|
||||
const AtomContext& atom_ctx,
|
||||
const AtomBlockId& atom_blk,
|
||||
const VprDeviceAnnotation& device_annotation) {
|
||||
/* Iterate over all the ports: input, output and clock */
|
||||
for (int iport = 0; iport < pb_graph_node->num_input_ports; ++iport) {
|
||||
for (int ipin = 0; ipin < pb_graph_node->num_input_pins[iport]; ++ipin) {
|
||||
/* Port exists (some LUTs may have no input and hence no port in the atom netlist) */
|
||||
t_model_ports* model_port = pb_graph_node->input_pins[iport][ipin].port->model_port;
|
||||
if (nullptr == model_port) {
|
||||
continue;
|
||||
}
|
||||
|
||||
AtomPortId atom_port = atom_ctx.nlist.find_atom_port(atom_blk, model_port);
|
||||
if (!atom_port) {
|
||||
continue;
|
||||
}
|
||||
/* Find the atom nets mapped to the pin
|
||||
* Note that some inputs may not be used, we set them to be open by default
|
||||
*/
|
||||
update_primitive_physical_pb_pin_atom_net(phy_pb, primitive_pb,
|
||||
&(pb_graph_node->input_pins[iport][ipin]),
|
||||
pb_route, device_annotation);
|
||||
}
|
||||
}
|
||||
|
||||
for (int iport = 0; iport < pb_graph_node->num_output_ports; ++iport) {
|
||||
for (int ipin = 0; ipin < pb_graph_node->num_output_pins[iport]; ++ipin) {
|
||||
/* Port exists (some LUTs may have no input and hence no port in the atom netlist) */
|
||||
t_model_ports* model_port = pb_graph_node->output_pins[iport][ipin].port->model_port;
|
||||
if (nullptr == model_port) {
|
||||
continue;
|
||||
}
|
||||
|
||||
AtomPortId atom_port = atom_ctx.nlist.find_atom_port(atom_blk, model_port);
|
||||
if (!atom_port) {
|
||||
continue;
|
||||
}
|
||||
/* Find the atom nets mapped to the pin
|
||||
* Note that some inputs may not be used, we set them to be open by default
|
||||
*/
|
||||
update_primitive_physical_pb_pin_atom_net(phy_pb, primitive_pb,
|
||||
&(pb_graph_node->output_pins[iport][ipin]),
|
||||
pb_route, device_annotation);
|
||||
}
|
||||
}
|
||||
|
||||
for (int iport = 0; iport < pb_graph_node->num_clock_ports; ++iport) {
|
||||
for (int ipin = 0; ipin < pb_graph_node->num_clock_pins[iport]; ++ipin) {
|
||||
/* Port exists (some LUTs may have no input and hence no port in the atom netlist) */
|
||||
t_model_ports* model_port = pb_graph_node->clock_pins[iport][ipin].port->model_port;
|
||||
if (nullptr == model_port) {
|
||||
continue;
|
||||
}
|
||||
|
||||
AtomPortId atom_port = atom_ctx.nlist.find_atom_port(atom_blk, model_port);
|
||||
if (!atom_port) {
|
||||
continue;
|
||||
}
|
||||
/* Find the atom nets mapped to the pin
|
||||
* Note that some inputs may not be used, we set them to be open by default
|
||||
*/
|
||||
update_primitive_physical_pb_pin_atom_net(phy_pb, primitive_pb,
|
||||
&(pb_graph_node->clock_pins[iport][ipin]),
|
||||
pb_route, device_annotation);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/************************************************************************
|
||||
* Synchronize mapping results from an operating pb to a physical pb
|
||||
***********************************************************************/
|
||||
void rec_update_physical_pb_from_operating_pb(PhysicalPb& phy_pb,
|
||||
const t_pb* op_pb,
|
||||
const t_pb_routes& pb_route,
|
||||
const AtomContext& atom_ctx,
|
||||
const VprDeviceAnnotation& device_annotation) {
|
||||
t_pb_graph_node* pb_graph_node = op_pb->pb_graph_node;
|
||||
t_pb_type* pb_type = pb_graph_node->pb_type;
|
||||
|
||||
if (true == is_primitive_pb_type(pb_type)) {
|
||||
t_pb_graph_node* physical_pb_graph_node = device_annotation.physical_pb_graph_node(pb_graph_node);
|
||||
VTR_ASSERT(nullptr != physical_pb_graph_node);
|
||||
/* Find the physical pb */
|
||||
const PhysicalPbId& physical_pb = phy_pb.find_pb(physical_pb_graph_node);
|
||||
VTR_ASSERT(true == phy_pb.valid_pb_id(physical_pb));
|
||||
|
||||
/* Set the mode bits */
|
||||
phy_pb.set_mode_bits(physical_pb, device_annotation.pb_type_mode_bits(physical_pb_graph_node->pb_type));
|
||||
|
||||
/* Find mapped atom block and add to this physical pb */
|
||||
AtomBlockId atom_blk = atom_ctx.nlist.find_block(op_pb->name);
|
||||
VTR_ASSERT(atom_blk);
|
||||
|
||||
phy_pb.add_atom_block(physical_pb, atom_blk);
|
||||
|
||||
/* TODO: Iterate over ports and annotate the atom pins */
|
||||
synchronize_primitive_physical_pb_atom_nets(phy_pb, physical_pb,
|
||||
pb_graph_node,
|
||||
pb_route,
|
||||
atom_ctx, atom_blk,
|
||||
device_annotation);
|
||||
return;
|
||||
}
|
||||
|
||||
/* Walk through the pb recursively but only visit the mapped modes and child pbs */
|
||||
t_mode* mapped_mode = &(pb_graph_node->pb_type->modes[op_pb->mode]);
|
||||
for (int ipb = 0; ipb < mapped_mode->num_pb_type_children; ++ipb) {
|
||||
/* Each child may exist multiple times in the hierarchy*/
|
||||
for (int jpb = 0; jpb < mapped_mode->pb_type_children[ipb].num_pb; ++jpb) {
|
||||
if ((nullptr != op_pb->child_pbs[ipb]) && (nullptr != op_pb->child_pbs[ipb][jpb].name)) {
|
||||
rec_update_physical_pb_from_operating_pb(phy_pb,
|
||||
&(op_pb->child_pbs[ipb][jpb]),
|
||||
pb_route,
|
||||
atom_ctx,
|
||||
device_annotation);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
} /* end namespace openfpga */
|
|
@ -0,0 +1,35 @@
|
|||
/********************************************************************
|
||||
* Header file for circuit_library_utils.cpp
|
||||
*******************************************************************/
|
||||
#ifndef PHYSICAL_PB_UTILS_H
|
||||
#define PHYSICAL_PB_UTILS_H
|
||||
|
||||
/********************************************************************
|
||||
* Include header files that are required by function declaration
|
||||
*******************************************************************/
|
||||
#include <vector>
|
||||
#include "physical_types.h"
|
||||
#include "vpr_device_annotation.h"
|
||||
#include "vpr_context.h"
|
||||
#include "physical_pb.h"
|
||||
|
||||
/********************************************************************
|
||||
* Function declaration
|
||||
*******************************************************************/
|
||||
|
||||
/* begin namespace openfpga */
|
||||
namespace openfpga {
|
||||
|
||||
void alloc_physical_pb_from_pb_graph(PhysicalPb& phy_pb,
|
||||
const t_pb_graph_node* pb_graph_head,
|
||||
const VprDeviceAnnotation& device_annotation);
|
||||
|
||||
void rec_update_physical_pb_from_operating_pb(PhysicalPb& phy_pb,
|
||||
const t_pb* op_pb,
|
||||
const t_pb_routes& pb_route,
|
||||
const AtomContext& atom_ctx,
|
||||
const VprDeviceAnnotation& device_annotation);
|
||||
|
||||
} /* end namespace openfpga */
|
||||
|
||||
#endif
|
|
@ -226,7 +226,7 @@
|
|||
<port name="in" physical_mode_port="in[0:5]"/>
|
||||
<port name="out" physical_mode_port="lut6_out"/>
|
||||
</pb_type>
|
||||
<pb_type name="clb.fle[n1_lut6].ble6.ff" physical_pb_type_name="clb.fle[physical].fabric.ff" physical_pb_type_index_factor="2" physical_pb_type_index_offset="1"/>
|
||||
<pb_type name="clb.fle[n1_lut6].ble6.ff" physical_pb_type_name="clb.fle[physical].fabric.ff" physical_pb_type_index_factor="2" physical_pb_type_index_offset="0"/>
|
||||
<!-- End physical pb_type binding in complex block IO -->
|
||||
</pb_type_annotations>
|
||||
</openfpga_architecture>
|
||||
|
|
|
@ -4,6 +4,9 @@ vpr ./test_vpr_arch/k6_frac_N10_40nm.xml ./test_blif/s298.blif --write_rr_graph
|
|||
# Read OpenFPGA architecture definition
|
||||
read_openfpga_arch -f ./test_openfpga_arch/k6_frac_N10_40nm_openfpga.xml
|
||||
|
||||
# Write out the architecture XML as a proof
|
||||
#write_openfpga_arch -f ./arch_echo.xml
|
||||
|
||||
# Annotate the OpenFPGA architecture to VPR data base
|
||||
link_openfpga_arch --verbose
|
||||
|
||||
|
@ -11,7 +14,7 @@ link_openfpga_arch --verbose
|
|||
check_netlist_naming_conflict --fix --report ./netlist_renaming.xml
|
||||
|
||||
# Apply fix-up to clustering nets based on routing results
|
||||
pb_pin_fixup #--verbose
|
||||
pb_pin_fixup --verbose
|
||||
|
||||
# Apply fix-up to Look-Up Table truth tables based on packing results
|
||||
lut_truth_table_fixup #--verbose
|
||||
|
@ -19,7 +22,12 @@ lut_truth_table_fixup #--verbose
|
|||
# Build the module graph
|
||||
# - Enabled compression on routing architecture modules
|
||||
# - Enable pin duplication on grid modules
|
||||
build_fabric --compress_routing --duplicate_grid_pin --verbose
|
||||
build_fabric --compress_routing --duplicate_grid_pin #--verbose
|
||||
|
||||
# Repack the netlist to physical pbs
|
||||
# This must be done before bitstream generator and testbench generation
|
||||
# Strongly recommend it is done after all the fix-up have been applied
|
||||
repack --verbose
|
||||
|
||||
# Write the Verilog netlit for FPGA fabric
|
||||
# - Enable the use of explicit port mapping in Verilog netlist
|
||||
|
|
|
@ -236,8 +236,10 @@
|
|||
<output name="lut6_out" num_pins="1"/>
|
||||
</pb_type>
|
||||
<interconnect>
|
||||
<direct name="direct1" input="frac_lut6.lut5_out[0]" output="frac_logic.out[0]"/>
|
||||
<mux name="mux1" input="frac_lut6.lut6_out frac_lut6.lut5_out[1]" output="frac_logic.out[1]"/>
|
||||
<direct name="direct1" input="frac_logic.in" output="frac_lut6.in"/>
|
||||
<direct name="direct2" input="frac_lut6.lut5_out[1]" output="frac_logic.out[1]"/>
|
||||
<!-- Xifan Tang: I use out[0] because the output of lut6 in lut6 mode is wired to the out[0] -->
|
||||
<mux name="mux1" input="frac_lut6.lut6_out frac_lut6.lut5_out[0]" output="frac_logic.out[0]"/>
|
||||
</interconnect>
|
||||
</pb_type>
|
||||
<!-- Define flip-flop -->
|
||||
|
@ -250,6 +252,7 @@
|
|||
</pb_type>
|
||||
<interconnect>
|
||||
<direct name="direct1" input="fabric.in" output="frac_logic.in"/>
|
||||
<direct name="direct2" input="frac_logic.out[1:0]" output="ff[1:0].D"/>
|
||||
<complete name="direct3" input="fabric.clk" output="ff[1:0].clk"/>
|
||||
<mux name="mux1" input="ff[0].Q frac_logic.out[0]" output="fabric.out[0]">
|
||||
<!-- LUT to output is faster than FF to output on a Stratix IV -->
|
||||
|
|
|
@ -11,6 +11,7 @@
|
|||
#include <vector>
|
||||
|
||||
#include "arch_types.h"
|
||||
#include "vpr_types.h"
|
||||
#include "atom_netlist_fwd.h"
|
||||
|
||||
#include "lb_rr_graph_types.h"
|
||||
|
|
Loading…
Reference in New Issue