Basic implementation of High Fanout Net Synthesis.
To perform HFNS, recursively makes clusters of closest sinks, less than 30 sinks and with a control of the half-perimeter at all the clusters levels. Clearly needs lots of improvements but the backbone of the feature works. Make use of the pool buffers as do the clock tree. Clustering is done with a degenerated Kruskal algorithm. This is rougly based on the article: Buffered Steiner Trees for Difficult Instances Charles J. Alpert, Member, IEEE IEEE TCAD, Vol. 21, No. 1, January 2002 0278–0070/02$17.00 (c) 2002 IEEE
This commit is contained in:
parent
42d11792a1
commit
0c1a6def56
|
@ -51,6 +51,7 @@
|
|||
${CMAKE_CURRENT_SOURCE_DIR}/plugins/alpha/block/spares.py
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/plugins/alpha/block/block.py
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/plugins/alpha/block/clocktree.py
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/plugins/alpha/block/hfns.py
|
||||
)
|
||||
|
||||
install ( FILES ${pySources} DESTINATION ${PYTHON_SITE_PACKAGES}/cumulus )
|
||||
|
|
|
@ -50,6 +50,7 @@ import plugins.rsave
|
|||
from plugins import getParameter
|
||||
from alpha.block.spares import Spares
|
||||
from alpha.block.clocktree import ClockTree
|
||||
from alpha.block.hfns import BufferTree
|
||||
from alpha.block.configuration import IoPin
|
||||
from alpha.block.configuration import BlockState
|
||||
|
||||
|
@ -78,6 +79,13 @@ class Side ( object ):
|
|||
elif self.side & IoPin.SOUTH: return Pin.Direction.SOUTH
|
||||
else: return Pin.Direction.NORTH
|
||||
|
||||
def destroy ( self ):
|
||||
#with UpdateSession():
|
||||
# for pinsAtPos in self.pins.values():
|
||||
# for pin in pinsAtPos:
|
||||
# pin.destroy()
|
||||
self.pins = {}
|
||||
|
||||
def setupAb ( self ):
|
||||
"""
|
||||
Initialise the side coordinate from the block abutmeent box.
|
||||
|
@ -307,6 +315,7 @@ class Block ( object ):
|
|||
self.state = state
|
||||
self.spares = Spares( self )
|
||||
self.clockTrees = []
|
||||
self.hfnTrees = []
|
||||
self.blockInstances = []
|
||||
self.sides = { IoPin.WEST : Side( self, IoPin.WEST )
|
||||
, IoPin.EAST : Side( self, IoPin.EAST )
|
||||
|
@ -376,7 +385,7 @@ class Block ( object ):
|
|||
|
||||
def addClockTrees ( self ):
|
||||
"""Create the trunk of all the clock trees (recursive H-Tree)."""
|
||||
print( ' o Buildding clock tree(s).' )
|
||||
print( ' o Building clock tree(s).' )
|
||||
af = CRL.AllianceFramework.get()
|
||||
clockNets = []
|
||||
for net in self.state.cell.getNets():
|
||||
|
@ -402,6 +411,35 @@ class Block ( object ):
|
|||
for clockTree in self.clockTrees:
|
||||
clockTree.splitClock()
|
||||
|
||||
def findHfnTrees ( self ):
|
||||
"""Create the trunk of all the high fanout nets."""
|
||||
print( ' o Building high fanout nets trees.' )
|
||||
if self.spares:
|
||||
with UpdateSession():
|
||||
for net in self.state.cell.getNets():
|
||||
sinksCount = 0
|
||||
for rp in net.getRoutingPads(): sinksCount += 1
|
||||
if sinksCount > 30:
|
||||
trace( 550, '\tBlock.addHfnTrees(): Found high fanout net "{}" ({} sinks).\n' \
|
||||
.format(net.getName(),sinksCount) )
|
||||
#if not net.getName().startswith('alu_m_muls_b(1)'): continue
|
||||
#if not net.getName().startswith('abc_75177_new_n12236'): continue
|
||||
sys.stderr.flush()
|
||||
print( ' - "{}", {} sinks.'.format(net.getName(),sinksCount) )
|
||||
sys.stdout.flush()
|
||||
self.hfnTrees.append( BufferTree( self.spares, net ) )
|
||||
self.hfnTrees[-1].buildBTree()
|
||||
self.hfnTrees[-1].rcreateBuffer()
|
||||
self.hfnTrees[-1].splitNet()
|
||||
self.spares.rshowPoolUse()
|
||||
else:
|
||||
print( ' (No spares buffers, disabled)' )
|
||||
return len(self.hfnTrees)
|
||||
|
||||
def addHfnBuffers ( self ):
|
||||
for hfnTree in self.hfnTrees:
|
||||
hfnTree.rcreateBuffer()
|
||||
|
||||
def placeIoPins ( self ):
|
||||
"""
|
||||
Place the Pins on all the sides. Raise an exception in case of failure.
|
||||
|
@ -510,14 +548,21 @@ class Block ( object ):
|
|||
blockInstance.block.build()
|
||||
if editor: editor.setCell( self.state.cell )
|
||||
self.state.cfg.apply()
|
||||
self.setupAb()
|
||||
self.placeIoPins()
|
||||
self.checkIoPins()
|
||||
self.spares.build()
|
||||
if editor: editor.fit()
|
||||
if self.state.useClockTree: self.addClockTrees()
|
||||
#Breakpoint.stop( 0, 'Clock tree(s) done.' )
|
||||
self.place()
|
||||
iteration = -1
|
||||
while True:
|
||||
iteration += 1
|
||||
if iteration > 0: break
|
||||
self.setupAb()
|
||||
self.placeIoPins()
|
||||
self.checkIoPins()
|
||||
self.spares.build()
|
||||
if self.state.useClockTree: self.addClockTrees()
|
||||
self.addHfnBuffers()
|
||||
if editor: editor.fit()
|
||||
#Breakpoint.stop( 0, 'Clock tree(s) done.' )
|
||||
self.place()
|
||||
self.findHfnTrees()
|
||||
break
|
||||
if self.state.useClockTree: self.splitClocks()
|
||||
status = self.route()
|
||||
self.addBlockages()
|
||||
|
|
|
@ -59,10 +59,25 @@ class ClockTree ( object ):
|
|||
self.spares = spares
|
||||
self.clockNet = clockNet
|
||||
self.clockIndex = index
|
||||
self.subNets = []
|
||||
if not self.clockNet.isClock():
|
||||
print( WarningMessage( 'ClockTree.__init__(): Net "{}" is not of CLOCK type.' \
|
||||
.format(self.clockNet.getName()) ))
|
||||
|
||||
def destroy ( self ):
|
||||
trace( 550, ',+', '\tClockTree.destroy() "{}"\n'.format(self.clockNet.getName()) )
|
||||
with UpdateSession():
|
||||
for subNet in self.subNets + [ self.clockNet ]:
|
||||
components = []
|
||||
for comp in subNet.getComponents():
|
||||
if isinstance(comp,RoutingPad): components.append( comp )
|
||||
if isinstance(comp,Pin ): components.append( comp )
|
||||
for comp in components:
|
||||
comp.destroy()
|
||||
if subNet != self.clockNet:
|
||||
subNet.destroy()
|
||||
trace( 550, '-' )
|
||||
|
||||
def _rconnectHTree ( self, qt ):
|
||||
if qt.isLeaf(): return False
|
||||
qt.rconnectBuffer()
|
||||
|
@ -85,6 +100,7 @@ class ClockTree ( object ):
|
|||
gaugeConf = self.spares.state.gaugeConf
|
||||
bufferConf = self.spares.state.bufferConf
|
||||
ckNet = qt.bOutputPlug.getNet()
|
||||
self.subNets.append( ckNet )
|
||||
|
||||
leftSourceContact = gaugeConf.rpAccessByPlugName( qt.buffer , bufferConf.output, ckNet , GaugeConf.HAccess|GaugeConf.OffsetBottom1 )
|
||||
rightSourceContact = gaugeConf.rpAccessByPlugName( qt.buffer , bufferConf.output, ckNet , GaugeConf.HAccess|GaugeConf.OffsetBottom1 )
|
||||
|
@ -175,7 +191,7 @@ class ClockTree ( object ):
|
|||
"""
|
||||
quadTree = self.spares.quadTree
|
||||
quadTree.bufferTag = self.clockNet.getName()
|
||||
|
||||
quadTree.rselectBuffer( self.clockIndex, self.clockIndex, 0 )
|
||||
with UpdateSession():
|
||||
hyperClock = HyperNet.create( Occurrence(self.clockNet) )
|
||||
for plugOccurrence in hyperClock.getTerminalNetlistPlugOccurrences():
|
||||
|
|
|
@ -415,6 +415,9 @@ class BufferInterface ( object ):
|
|||
self.count += 1
|
||||
return instance
|
||||
|
||||
def resetBufferCount ( self ):
|
||||
self.count = 0
|
||||
|
||||
|
||||
# ----------------------------------------------------------------------------
|
||||
# Class : "configuration.IoPin".
|
||||
|
@ -636,3 +639,6 @@ class BlockState ( object ):
|
|||
def getIoPinsCounts ( self, net ):
|
||||
if not self.ioPinsCounts.has_key(net): return 0
|
||||
return self.ioPinsCounts[net]
|
||||
|
||||
def resetBufferCount ( self ):
|
||||
self.bufferConf.resetBufferCount()
|
||||
|
|
|
@ -0,0 +1,718 @@
|
|||
#
|
||||
# This file is part of the Coriolis Software.
|
||||
# Copyright (c) SU 2020-2020, All Rights Reserved
|
||||
#
|
||||
# +-----------------------------------------------------------------+
|
||||
# | C O R I O L I S |
|
||||
# | C u m u l u s - P y t h o n T o o l s |
|
||||
# | |
|
||||
# | Author : Jean-Paul CHAPUT |
|
||||
# | E-mail : Jean-Paul.Chaput@lip6.fr |
|
||||
# | =============================================================== |
|
||||
# | Python : "./plugins/block/hfns.py" |
|
||||
# +-----------------------------------------------------------------+
|
||||
|
||||
"""
|
||||
Manage High Fanout Net Synthesis (HFNS).
|
||||
"""
|
||||
|
||||
from __future__ import print_function
|
||||
import sys
|
||||
import os.path
|
||||
import re
|
||||
from operator import itemgetter, attrgetter, methodcaller
|
||||
import Cfg
|
||||
from Hurricane import Breakpoint
|
||||
from Hurricane import DbU
|
||||
from Hurricane import Box
|
||||
from Hurricane import Transformation
|
||||
from Hurricane import Box
|
||||
from Hurricane import Path
|
||||
from Hurricane import Layer
|
||||
from Hurricane import Occurrence
|
||||
from Hurricane import Net
|
||||
from Hurricane import HyperNet
|
||||
from Hurricane import RoutingPad
|
||||
from Hurricane import Horizontal
|
||||
from Hurricane import Vertical
|
||||
from Hurricane import Contact
|
||||
from Hurricane import Pin
|
||||
from Hurricane import Plug
|
||||
from Hurricane import Instance
|
||||
import CRL
|
||||
from CRL import RoutingLayerGauge
|
||||
from helpers import trace, l, u, n
|
||||
from helpers.io import ErrorMessage
|
||||
from helpers.io import WarningMessage
|
||||
from helpers.io import catch
|
||||
from helpers.overlay import UpdateSession
|
||||
from plugins import getParameter
|
||||
from plugins import utils
|
||||
from plugins.alpha.block.configuration import GaugeConf
|
||||
from plugins.alpha.block.spares import Spares
|
||||
|
||||
|
||||
af = CRL.AllianceFramework.get()
|
||||
|
||||
|
||||
# ----------------------------------------------------------------------------
|
||||
# Class : "hfns.SlicedArea".
|
||||
|
||||
class SlicedArea ( object ):
|
||||
"""
|
||||
Perform the buffer creation and insertion for a Cluster. It can request
|
||||
a free buffer from the spare set or insert a new one directly in the
|
||||
design. The second option is still available but now unused, kept as
|
||||
code example (may be needed in the future).
|
||||
"""
|
||||
|
||||
REPLACE = 0x0001
|
||||
WEDGE = 0x0002
|
||||
|
||||
def __init__ ( self, cluster ):
|
||||
"""
|
||||
Create the sliced area and perform an immediate buffer allocation
|
||||
from the spare set. Hint for a position indide of the cluster's area
|
||||
but closest to the parent's center area (so, ideally, on the cluster's
|
||||
edge).
|
||||
"""
|
||||
state = cluster.bufferTree.spares.state
|
||||
self.cluster = cluster
|
||||
self.rows = {}
|
||||
self.iposition = None
|
||||
if cluster.parent is None:
|
||||
attractor = cluster.getCenter()
|
||||
else:
|
||||
attractor = cluster.parent.area.getCenter()
|
||||
self.instBuffer = cluster.bufferTree.spares.getFreeBufferUnder( cluster.area, attractor )
|
||||
|
||||
@property
|
||||
def buffer ( self ):
|
||||
"""The buffer instance."""
|
||||
return self.instBuffer
|
||||
|
||||
@property
|
||||
def bInputPlug ( self ):
|
||||
"""The input Plug of the buffer."""
|
||||
return utils.getPlugByName( self.buffer, self.cluster.bufferTree.spares.state.bufferConf.input )
|
||||
|
||||
@property
|
||||
def bOutputPlug ( self ):
|
||||
"""The output Plug of the buffer."""
|
||||
return utils.getPlugByName( self.buffer, self.cluster.bufferTree.spares.state.bufferConf.output )
|
||||
|
||||
def buildSlicesUnder ( self ):
|
||||
"""
|
||||
UNUSED. Kept as reference example.
|
||||
Rebuild slices structure under a specific area (must be small).
|
||||
"""
|
||||
state = self.cluster.bufferTree.spares.state
|
||||
sliceHeight = state.gaugeConf.sliceHeight
|
||||
cell = state.cell
|
||||
cellAb = cell.getAbutmentBox()
|
||||
insertArea = Box( self.cluster.getCenter() )
|
||||
insertArea.inflate( l(50.0*3), 2*l(50.0) )
|
||||
for occurrence in cell.getTerminalNetlistInstanceOccurrencesUnder( insertArea ):
|
||||
instance = occurrence.getEntity()
|
||||
masterCell = instance.getMasterCell()
|
||||
ab = masterCell.getAbutmentBox()
|
||||
transf = instance.getTransformation()
|
||||
occurrence.getPath().getTransformation().applyOn( transf )
|
||||
transf.applyOn( ab )
|
||||
y = (ab.getYMin() - cellAb.getYMin()) / sliceHeight
|
||||
if (ab.getYMin() - cellAb.getYMin()) % sliceHeight:
|
||||
print( ErrorMessage( 1, 'SlicedArea.__init__(): Misaligned {}.'.format(occurrence) ))
|
||||
continue
|
||||
if not self.rows.has_key(y):
|
||||
self.rows[y] = []
|
||||
row = self.rows[ y ]
|
||||
row.append( (occurrence,ab) )
|
||||
for row in self.rows.values():
|
||||
row.sort( key=lambda v: v[1].getXMin() )
|
||||
|
||||
def findBufferSite ( self ):
|
||||
"""
|
||||
UNUSED. Kept as reference example.
|
||||
Analyse the slices for spaces and holes into which insert a buffer.
|
||||
Look for hole big enough (REPLACE case) or if we do need to shift
|
||||
the cells to create one (WEDGE case).
|
||||
"""
|
||||
global af
|
||||
catalog = af.getCatalog()
|
||||
bufferLength = self.cluster.bufferTree.spares.state.bufferConf.width
|
||||
for key in self.rows.keys():
|
||||
row = self.rows[ key ]
|
||||
trace( 550, '\t+ Row:\n' )
|
||||
tieLength = 0
|
||||
holeLength = 0
|
||||
biggestHoleLength = 0
|
||||
contiguousTie = False
|
||||
ihole = 0
|
||||
for i in range(len(row)):
|
||||
occurrence, ab = row[i]
|
||||
masterCell = occurrence.getEntity().getMasterCell()
|
||||
catalogState = catalog.getState( masterCell.getName() )
|
||||
if catalogState.isFeed():
|
||||
trace( 550, '\t| Feed:{}\n'.format(occurrence) )
|
||||
cellLength = masterCell.getAbutmentBox().getWidth()
|
||||
tieLength += cellLength
|
||||
holeLength += cellLength
|
||||
contiguousTie = True
|
||||
if holeLength > biggestHoleLength:
|
||||
biggestHoleLength = holeLength
|
||||
if holeLength == cellLength:
|
||||
ihole = i
|
||||
else:
|
||||
holeLength = 0
|
||||
contiguousTie = False
|
||||
if bufferLength <= tieLength:
|
||||
trace( 550, '\tbufferLength:{} tieLength:{} biggestHole:{}\n' \
|
||||
.format( DbU.getValueString(bufferLength)
|
||||
, DbU.getValueString(tieLength)
|
||||
, DbU.getValueString(biggestHoleLength) ))
|
||||
if bufferLength <= biggestHoleLength:
|
||||
trace( 550, '\tHole is big enough, REPLACE\n' )
|
||||
self.iposition = (key, ihole, SlicedArea.REPLACE)
|
||||
else:
|
||||
trace( 550, '\tNeeds wedging, WEDGE\n' )
|
||||
self.iposition = (key, ihole, SlicedArea.WEDGE)
|
||||
return True
|
||||
return False
|
||||
|
||||
def insertBuffer ( self ):
|
||||
"""
|
||||
UNUSED. Kept as reference example.
|
||||
Insert a new buffer instance inside the slice area. Uses the informations
|
||||
gathered by ``findBufferSite()`` (where to REPLACE or WEDGE).
|
||||
"""
|
||||
if self.iposition is None:
|
||||
raise ErrorMessage( 2, 'SlicedArea.insertBuffer(): No position defined to wedge the buffer.' )
|
||||
state = self.cluster.bufferTree.spares.state
|
||||
catalog = af.getCatalog()
|
||||
bufferLength = self.cluster.bufferTree.spares.state.bufferConf.width
|
||||
tieLength = 0
|
||||
transf = None
|
||||
if self.iposition[2] & SlicedArea.REPLACE:
|
||||
row = self.rows[ self.iposition[0] ]
|
||||
for i in range(self.iposition[1],len(row)):
|
||||
occurrence, ab = row[i]
|
||||
tieLength += ab.getWidth()
|
||||
tieInstance = occurrence.getEntity()
|
||||
masterCell = tieInstance.getMasterCell()
|
||||
catalogState = catalog.getState( masterCell.getName() )
|
||||
if not catalogState.isFeed():
|
||||
raise ErrorMessage( 2, 'SlicedArea.insertBuffer(): Not a feed cell under wedge position.' )
|
||||
if transf is None:
|
||||
transf = tieInstance.getTransformation()
|
||||
tieInstance.destroy()
|
||||
if tieLength >= bufferLength:
|
||||
break
|
||||
self.instBuffer = state.createBuffer()
|
||||
self.instBuffer.setTransformation( transf )
|
||||
self.instBuffer.setPlacementStatus( Instance.PlacementStatus.FIXED )
|
||||
trace( 550, '\tWedged: {} @{}\n'.format(self.instBuffer,transf) )
|
||||
|
||||
def display ( self ):
|
||||
"""Display the orderded instances under the sliced area."""
|
||||
for key in self.rows.keys():
|
||||
print( 'Row @{}:'.format(key) )
|
||||
row = self.rows[ key ]
|
||||
for occurrence, ab in row:
|
||||
print( '| {} <- {}'.format(ab,occurrence) )
|
||||
|
||||
|
||||
# ----------------------------------------------------------------------------
|
||||
# Class : "hfns.Cluster".
|
||||
|
||||
class Cluster ( object ):
|
||||
"""
|
||||
Implementation of cluster of RoutingPads. This is a disjoint-set data
|
||||
structure (ref. https://en.wikipedia.org/wiki/Disjoint-set_data_structure).
|
||||
|
||||
We manage two kind of trees, do not mistake them:
|
||||
|
||||
1. The cluster's own tree, that is, the union set. ``self.root`` and
|
||||
``self.parent`` belongs to that structure.
|
||||
2. The tree *of* Clusters. Recursive functions like ``rsplitNet()``
|
||||
and ``rcreateBuffer()`` belongs to that super-tree.
|
||||
|
||||
The ``snapshot()`` and ``rrebindRp()`` are kept just in case. They allow
|
||||
to keep the cluster partitionning between iterations of the placer by
|
||||
replacing RoutingPad (which get erased) by Plug occurrences which are
|
||||
stables.
|
||||
"""
|
||||
|
||||
def __init__ ( self, bufferTree, anchor, depth ):
|
||||
self.depth = depth
|
||||
self.bufferTree = bufferTree
|
||||
self.anchor = anchor
|
||||
self.mergedAnchors = [ anchor ]
|
||||
self.parent = None
|
||||
self.rank = 0
|
||||
self.size = 1
|
||||
self.area = Box( anchor.getCenter() )
|
||||
|
||||
def __str__ ( self ):
|
||||
parentId = 'None' if self.parent is None else str(self.parent.id)
|
||||
s = '<Cluster d:{} par:{} id:{} sz:{} area:{}x{}>' \
|
||||
.format( self.depth
|
||||
, parentId
|
||||
, self.id
|
||||
, self.size
|
||||
, DbU.getValueString(self.area.getWidth())
|
||||
, DbU.getValueString(self.area.getHeight()) )
|
||||
return s
|
||||
|
||||
def __cmp__ ( self, other ):
|
||||
if other is None: return 1
|
||||
if self.id < other.id: return -1
|
||||
if self.id > other.id: return 1
|
||||
return 0
|
||||
|
||||
@property
|
||||
def buffer ( self ):
|
||||
"""The buffer instance (proxy to slicedArea)."""
|
||||
return self.slicedArea.buffer
|
||||
|
||||
@property
|
||||
def bInputPlug ( self ):
|
||||
"""The input Plug of the buffer (proxy to slicedArea)."""
|
||||
return self.slicedArea.bInputPlug
|
||||
|
||||
@property
|
||||
def bOutputPlug ( self ):
|
||||
"""The output Plug of the buffer (proxy to slicedArea)."""
|
||||
return self.slicedArea.bOutputPlug
|
||||
|
||||
@property
|
||||
def id ( self ):
|
||||
if self.anchor is None: return 0
|
||||
return self.anchor.getId()
|
||||
|
||||
def getId ( self ):
|
||||
return self.id
|
||||
|
||||
def isRoot ( self ): return self.parent is None
|
||||
|
||||
def getCenter ( self ):
|
||||
return self.area.getCenter()
|
||||
|
||||
def getRoot ( self ):
|
||||
"""Find the root, performing simple path compression as it goes."""
|
||||
#trace( 550, ',+', '\tCluster.getRoot() of id:{}\n'.format(self.id) )
|
||||
root = self
|
||||
#trace( 550, '\t+ Finding root:\n' )
|
||||
while root.parent is not None:
|
||||
root = root.parent
|
||||
#trace( 550, '\t| id:{}\n'.format(root.id) )
|
||||
node = self
|
||||
#trace( 550, '\t+ Compressing path:\n' )
|
||||
while node.parent is not None:
|
||||
pnode = node.parent
|
||||
node.parent = root
|
||||
node = pnode
|
||||
#trace( 550, '\t| id:{}\n'.format(node.id) )
|
||||
#trace( 550, ',-', '\t> Root of id:{} is id:{}\n'.format(self.id,root.id) )
|
||||
return root
|
||||
|
||||
def merge ( self, other ):
|
||||
"""Union by rank."""
|
||||
#trace( 550, ',+', '\tCluster.merge() id:{} with id:{}\n' \
|
||||
# .format(self.id,other.id) )
|
||||
root1 = self.getRoot()
|
||||
root2 = other.getRoot()
|
||||
if root1 != root2:
|
||||
if root1.rank < root2.rank:
|
||||
root1, root2 = root2, root1
|
||||
if root1.rank != root2.rank:
|
||||
root1.rank += 1
|
||||
root1.area.merge( root2.area )
|
||||
root1.size += root2.size
|
||||
root1.mergedAnchors += root2.mergedAnchors
|
||||
root2.parent = root1
|
||||
#trace( 550, ',-', '\tMerge id:{} <= id:{} done\n' \
|
||||
# .format(root1.id,root2.id) )
|
||||
else:
|
||||
pass
|
||||
#trace( 550, ',-', '\tMerge id:{} and id:{} already done\n' \
|
||||
# .format(root1.id,root2.id) )
|
||||
return root1
|
||||
|
||||
def snapshot ( self ):
|
||||
"""
|
||||
UNUSED. Kept as reference example.
|
||||
Replace the RoutingPad by their occurrences (in place).
|
||||
This operation is needed to save the cluster information between
|
||||
two runs as RoutingPad will get destroyed/re-created. However,
|
||||
the Plug occurrence will remains valid (and stable).
|
||||
"""
|
||||
if isinstance(self.anchor,RoutingPad):
|
||||
self.anchor = self.anchor.getPlugOccurrence()
|
||||
mergedAnchors = []
|
||||
for anchor in self.mergedAnchors:
|
||||
if isinstance(anchor,RoutingPad):
|
||||
mergedAnchors.append( anchor.getPlugOccurrence() )
|
||||
trace( 550, '\t| snapshot:{}\n'.format(anchor.getPlugOccurrence()) )
|
||||
else:
|
||||
mergedAnchors.append( anchor )
|
||||
anchor.snapshot()
|
||||
self.mergedAnchors = mergedAnchors
|
||||
|
||||
def rrebindRp ( self, rp ):
|
||||
"""
|
||||
UNUSED. Kept as reference example.
|
||||
Associate a RoutingPad ``rp`` to a cluster. This is done by
|
||||
matching the plug occurrence of the RoutingPad. The ``snapshot()``
|
||||
method must have been called before.
|
||||
"""
|
||||
trace( 550, ',+', '\tCluster.rrebindRp() {}\n'.format(rp.getPlugOccurrence()) )
|
||||
bound = False
|
||||
plugOcc = rp.getPlugOccurrence()
|
||||
if plugOcc == self.anchor:
|
||||
bound = True
|
||||
self.anchor = rp
|
||||
self.mergedAnchors[0] = rp
|
||||
trace( 550, '\t> Bound to: {}\n'.format(self) )
|
||||
else:
|
||||
trace( 550, '\t+ mergedAnchor: {}\n'.format(len(self.mergedAnchors)) )
|
||||
if len(self.mergedAnchors):
|
||||
for i in range(len(self.mergedAnchors)):
|
||||
trace( 550, '\t| compare:[{:2}] {}\n'.format(i,self.mergeAnchors[i]) )
|
||||
if plugOcc == self.mergeAnchors[i]:
|
||||
self.mergedAnchors[i] = rp
|
||||
bound = True
|
||||
trace( 550, '\t> Bound to: {}\n'.format(self) )
|
||||
break
|
||||
if not bound and self.mergedAnchors is not None:
|
||||
for cluster in self.mergedAnchors:
|
||||
if isinstance(cluster,Cluster):
|
||||
bound = cluster.rrebindRp(rp)
|
||||
if bound: break
|
||||
trace( 550, '-' )
|
||||
return bound
|
||||
|
||||
def createBufInputRp ( self, net ):
|
||||
"""Create a RoutingPad for the buffer input Plug (terminal)."""
|
||||
return RoutingPad.create( net, Occurrence(self.bInputPlug), RoutingPad.BiggestArea )
|
||||
|
||||
def createBufOutputRp ( self, net ):
|
||||
"""Create a RoutingPad for the buffer output Plug (terminal)."""
|
||||
return RoutingPad.create( net, Occurrence(self.bOutputPlug), RoutingPad.BiggestArea )
|
||||
|
||||
def setRootDriver ( self, net ):
|
||||
"""Connect the top-level buffer input to the original signal."""
|
||||
if not self.isRoot():
|
||||
raise ErrorMessage( 2, 'Cluster.setRootDriver(): Must be called only on the top root cluster.' )
|
||||
self.createBufInputRp( net )
|
||||
|
||||
def createBuffer ( self ):
|
||||
"""Create the SlicedArea which will create/insert the buffer of the cluster."""
|
||||
if not self.isRoot():
|
||||
raise ErrorMessage( 2, 'Cluster.createBuffer(): Only root cluster should have buffer.' )
|
||||
self.slicedArea = SlicedArea( self )
|
||||
|
||||
def rcreateBuffer ( self ):
|
||||
"""Recursively call ``createBuffer()`` on the whole cluster hierarchy."""
|
||||
self.createBuffer()
|
||||
for anchor in self.mergedAnchors:
|
||||
if isinstance(anchor,Cluster):
|
||||
anchor.rcreateBuffer()
|
||||
|
||||
def rsplitNet ( self ):
|
||||
"""
|
||||
Perform the actual splitting of the net into subnets. This is a
|
||||
recursive function. One driver net will be created by cluster.
|
||||
"""
|
||||
if not self.isRoot():
|
||||
raise ErrorMessage( 2, 'Cluster.connect(): Only root cluster should be connecteds.' )
|
||||
spares = self.bufferTree.spares
|
||||
netBuff = self.bufferTree.createSubNet()
|
||||
self.bOutputPlug.setNet( netBuff )
|
||||
trace( 550, ',+', '\tCluster.rsplitNet(), size:{} depth:{} driver:{}\n' \
|
||||
.format(self.size,self.depth,netBuff.getName()) )
|
||||
if len(self.mergedAnchors) > 30:
|
||||
print( 'Top cluster of "{}" still has {} sinks.' \
|
||||
.format(netBuff.getName(),len(self.mergedAnchors)) )
|
||||
for anchor in self.mergedAnchors:
|
||||
if isinstance(anchor,Cluster):
|
||||
trace( 550, '\tcluster input: "{}"\n'.format(netBuff) )
|
||||
anchor.bInputPlug.setNet( netBuff )
|
||||
anchor.rsplitNet()
|
||||
else:
|
||||
plug = anchor.getPlugOccurrence()
|
||||
deepPlug = spares.raddTransNet( netBuff, plug.getPath() )
|
||||
deepNetBuff = deepPlug.getMasterNet() if deepPlug else netBuff
|
||||
trace( 550, '\tdeepNetBuf: "{}"\n'.format(deepNetBuff) )
|
||||
if isinstance(plug.getEntity(),Pin):
|
||||
print( 'PIN, SKIPPED for {}'.format(deepNetBuff.getName()) )
|
||||
continue
|
||||
plug.getEntity().setNet( deepNetBuff )
|
||||
anchor.destroy()
|
||||
if netBuff.getName().startswith('abc_75177_new_n3292'):
|
||||
trace( 550, '\tNet {}:\n'.format(netBuff.getName()) )
|
||||
count = 0
|
||||
for component in netBuff.getComponents():
|
||||
trace( 550, '\t[{:2}] {}\n'.format(count,component) )
|
||||
count += 1
|
||||
trace( 550, ',-' )
|
||||
|
||||
def show ( self ):
|
||||
"""Select the RoutingPad of the cluster in the editor."""
|
||||
editor = self.bufferTree.spares.state.editor
|
||||
if not editor: return False
|
||||
editor.unselectAll()
|
||||
editor.setCumulativeSelection( True )
|
||||
editor.setShowSelection( True )
|
||||
area = Box( self.area )
|
||||
area.inflate( l(10.0) )
|
||||
editor.reframe( area, False )
|
||||
#editor.select( self.anchor.getOccurrence() )
|
||||
for anchor in self.mergedAnchors:
|
||||
if isinstance(anchor,Cluster):
|
||||
continue
|
||||
else:
|
||||
editor.select( anchor.getOccurrence() )
|
||||
return True
|
||||
|
||||
# ----------------------------------------------------------------------------
|
||||
# Class : "hfns.Edge".
|
||||
|
||||
class Edge ( object ):
|
||||
"""
|
||||
Define an Edge between two Clusters. The lenght of the Edge is the
|
||||
Manhattan distance of the two centers of the cluster's areas.
|
||||
So, as Clusters grows, so does the area and the length of the
|
||||
edge change over time. To work on a stable value, the initial
|
||||
distance is cached in the ``length`` attribute.
|
||||
"""
|
||||
|
||||
def __init__ ( self, source, target ):
|
||||
self.source = source
|
||||
self.target = target
|
||||
self.length = self.clusterLength
|
||||
|
||||
@property
|
||||
def clusterLength ( self ):
|
||||
"""
|
||||
Manhattan distance, cluster center to cluster center.
|
||||
The actual one, not the ``length`` initial cached value.
|
||||
"""
|
||||
sourceCenter = self.source.getCenter()
|
||||
targetCenter = self.target.getCenter()
|
||||
return targetCenter.manhattanDistance( sourceCenter )
|
||||
|
||||
def __cmp__ ( self, other ):
|
||||
"""Comparison over the cached initial length value."""
|
||||
if self.length < other.length: return -1
|
||||
if self.length > other.length: return 1
|
||||
return 0
|
||||
|
||||
|
||||
# ----------------------------------------------------------------------------
|
||||
# Class : "hfns.BufferTree".
|
||||
|
||||
class BufferTree ( object ):
|
||||
"""
|
||||
Build a buffer tree for a high fanout net. Recursively build clusters
|
||||
using Kruskal algorithm (https://en.wikipedia.org/wiki/Kruskal's_algorithm).
|
||||
All subnet are created at the design top level (like for clock tree)
|
||||
so they are not ``DeepNet``, the driver itself is pulled up to the top
|
||||
level if needs be.
|
||||
"""
|
||||
|
||||
patVhdlVector = re.compile( r'(?P<name>.*)\((?P<index>\d+)\)' )
|
||||
|
||||
def __init__ ( self, spares, net ):
|
||||
trace( 550, '\tBufferTree.__init__() on "{}".\n'.format(net.getName()) )
|
||||
self.spares = spares
|
||||
self.net = net
|
||||
self.isDeepNet = True
|
||||
self.clusterDepth = 0
|
||||
self.clusters = [ [] ]
|
||||
self.netCount = 0
|
||||
self.netName = self.net.getName()
|
||||
self.netIndex = None
|
||||
m = BufferTree.patVhdlVector.match( self.net.getName() )
|
||||
if m:
|
||||
self.netName = m.group('name')
|
||||
self.netIndex = m.group('index')
|
||||
|
||||
@property
|
||||
def root ( self ):
|
||||
"""The root cluster of the tree (must be unique...)"""
|
||||
if len(self.clusters[-1]) != 1:
|
||||
raise ErrorMessage( 2, 'BufferTree.root: No, or multiple root for "{}".' \
|
||||
.format(self.net.getName()) )
|
||||
return self.clusters[-1][0]
|
||||
|
||||
@property
|
||||
def edgeLimit ( self ):
|
||||
"""
|
||||
Maximum length of Edge to consider. Edges above this threshold will be
|
||||
pruned from the set given to Kruskal.
|
||||
"""
|
||||
levelFactor = 1
|
||||
if self.clusterDepth == 0: pass
|
||||
else: levelFactor = 4*self.clusterDepth
|
||||
return levelFactor*l(700)
|
||||
|
||||
def createSubNet ( self ):
|
||||
"""
|
||||
Create a new sub-net for a buffer driver. If the signal is a bit
|
||||
from a vector, unvectorize but keep a ``bitX`` tag in it. For example,
|
||||
the third (i.e. index 2) auxiliary signal for ``my_vector(3)`` will give
|
||||
``my_vector_bit3_2``.
|
||||
"""
|
||||
if self.netIndex is None:
|
||||
subNetName = '{}_hfns_{}'.format( self.netName, self.netCount )
|
||||
else:
|
||||
subNetName = '{}_bit{}_hfns_{}'.format( self.netName, self.netIndex, self.netCount )
|
||||
net = Net.create( self.spares.state.cell, subNetName )
|
||||
self.netCount += 1
|
||||
return net
|
||||
|
||||
def canMerge ( self, clusterA, clusterB ):
|
||||
"""
|
||||
Control the merge criterion between two clusters. For now we check
|
||||
that the number of sinks is below 30 and the half-perimeter is not
|
||||
too great (see ``edgeLimit``).
|
||||
"""
|
||||
if clusterA.size + clusterB.size > 30:
|
||||
trace( 550, '\t> Reject merge, over size threshold of 30.\n' )
|
||||
return False
|
||||
area = Box( clusterA.area )
|
||||
area.merge( clusterB.area )
|
||||
hpwl = (area.getWidth() + area.getHeight()) / 2
|
||||
if hpwl > 2*self.edgeLimit:
|
||||
trace( 550, '\t> Reject merge, over HPWL threshold of 2*{}.\n' \
|
||||
.format(DbU.getValueString(self.edgeLimit)))
|
||||
return False
|
||||
else:
|
||||
trace( 550, '\t> Accepted merge, future area is {}x{}.\n' \
|
||||
.format( DbU.getValueString(area.getWidth ())
|
||||
, DbU.getValueString(area.getHeight()) ))
|
||||
return True
|
||||
|
||||
def doKruskal ( self ):
|
||||
"""
|
||||
Do Kruskal algorithm. We do not perform a complete Krukal as
|
||||
*too long* edges are pruned and we do not keep tracks of edges,
|
||||
we just want a cluster of close RoutingPad, not a minimum
|
||||
spanning tree.
|
||||
"""
|
||||
trace( 550, ',+', '\tBufferTree.doKruskal()\n' )
|
||||
trace( 550, '\tBuilding edges, max length:{} ...\n'.format(DbU.getValueString(self.edgeLimit)) )
|
||||
clusters = self.clusters[-1]
|
||||
edges = []
|
||||
for i in range(len(clusters)):
|
||||
for j in range(i+1,len(clusters)):
|
||||
edge = Edge( clusters[i], clusters[j] )
|
||||
if edge.length < self.edgeLimit:
|
||||
edges.append( edge )
|
||||
trace( 550, '\tSorting {} edges ...\n'.format(len(edges)) )
|
||||
edges.sort( key=attrgetter('length') )
|
||||
trace( 550, '\tProcessing edges ...\n' )
|
||||
clustersCount = len(clusters)
|
||||
for i in range(len(edges)):
|
||||
edge = edges[i]
|
||||
trace( 550, '\t| Process [{:3d}], length:{} clusterLength:{}\n' \
|
||||
.format( i, DbU.getValueString(edge.length)
|
||||
, DbU.getValueString(edge.clusterLength)) )
|
||||
sourceRoot = edge.source.getRoot()
|
||||
targetRoot = edge.target.getRoot()
|
||||
if sourceRoot == targetRoot:
|
||||
continue
|
||||
if not self.canMerge(sourceRoot,targetRoot):
|
||||
continue
|
||||
sourceRoot.merge( targetRoot )
|
||||
trace( 550, '\t> Merged cluster: {}\n'.format(sourceRoot.getRoot()) )
|
||||
clustersCount -= 1
|
||||
trace( 550, '\tClusters count: {}\n'.format(clustersCount) )
|
||||
for cluster in clusters:
|
||||
if cluster.isRoot():
|
||||
trace( 550, '\t | Cluster, size:{}, area:{} x {} {}\n' \
|
||||
.format( cluster.size
|
||||
, DbU.getValueString(cluster.area.getWidth())
|
||||
, DbU.getValueString(cluster.area.getHeight())
|
||||
, cluster.area ) )
|
||||
trace( 550, '-' )
|
||||
return clustersCount
|
||||
|
||||
def buildBTree ( self ):
|
||||
"""
|
||||
Recursively performs the Kruskal algorithm until only *one* root
|
||||
cluster remains. First level is clusters of RoutingPad, then
|
||||
clusters of clusters.
|
||||
"""
|
||||
trace( 550, ',+', '\tBufferTree.buildBTree() on "{}" ...\n'.format(self.net.getName()) )
|
||||
self.rpDriver = None
|
||||
pinRp = None
|
||||
for rp in self.net.getRoutingPads():
|
||||
rpOccurrence = rp.getPlugOccurrence()
|
||||
entity = rpOccurrence.getEntity()
|
||||
if rpOccurrence.getPath().isEmpty():
|
||||
self.isDeepNet = False
|
||||
if isinstance(entity,Pin):
|
||||
pinRp = rp
|
||||
continue
|
||||
masterNet = entity.getMasterNet()
|
||||
if masterNet.getDirection() & Net.Direction.DirIn:
|
||||
self.clusters[0].append( Cluster(self,rp,self.clusterDepth) )
|
||||
else:
|
||||
trace( 550, '\tDriver:{}.\n'.format(rp) )
|
||||
self.rpDriver = rp
|
||||
if pinRp:
|
||||
if self.rpDriver is None:
|
||||
trace( 550, '\tDriver (externa pin):{}.\n'.format(rp) )
|
||||
self.rpDriver = rp
|
||||
else:
|
||||
self.clusters[0].append( Cluster(self,pinRp,self.clusterDepth) )
|
||||
while len(self.clusters[-1]) > 1:
|
||||
self.doKruskal()
|
||||
self.clusters.append( [] )
|
||||
for cluster in self.clusters[-2]:
|
||||
if cluster.isRoot():
|
||||
self.clusters[-1].append( Cluster(self,cluster,self.clusterDepth+1) )
|
||||
#if cluster.show():
|
||||
# Breakpoint.stop( 0, 'Showing cluster of {} RoutingPads'.format(cluster.size) )
|
||||
editor = self.spares.state.editor
|
||||
if editor:
|
||||
editor.unselectAll()
|
||||
editor.setCumulativeSelection( False )
|
||||
editor.setShowSelection( False )
|
||||
self.clusterDepth += 1
|
||||
trace( 550, '-' )
|
||||
|
||||
def snapshot ( self ):
|
||||
"""UNUSED. Kept as reference example. See ``Cluster.snapshot()``."""
|
||||
if not self.root:
|
||||
raise ErrorMessage( 2, 'BufferTree.snapshot(): Clusters must be built first.' )
|
||||
self.root.snapshot()
|
||||
|
||||
def rcreateBuffer ( self ):
|
||||
"""Proxy to ``Cluster.rcreateBuffer()``."""
|
||||
if not self.root:
|
||||
raise ErrorMessage( 2, 'BufferTree.rcreateBuffer(): Clusters must be built first.' )
|
||||
self.root.rcreateBuffer()
|
||||
|
||||
def splitNet ( self ):
|
||||
"""
|
||||
Perform the actual splitting of the net into sub-trees. Mostly calls
|
||||
``Cluster.rsplitNet()`` then connect the top cluster root to the original
|
||||
signal.
|
||||
"""
|
||||
if not self.root:
|
||||
raise ErrorMessage( 2, 'BufferTree.splitNet(): Clusters must be built first.' )
|
||||
self.root.rsplitNet()
|
||||
if self.isDeepNet:
|
||||
# Must convert from a DeepNet into a real top Net to be saved.
|
||||
driverRpOcc = self.rpDriver.getPlugOccurrence()
|
||||
topNetName = self.net.getName()
|
||||
self.net.destroy()
|
||||
self.net = Net.create( self.spares.state.cell, topNetName )
|
||||
deepPlug = self.spares.raddTransNet( self.net, driverRpOcc.getPath() )
|
||||
deepDriverNet = deepPlug.getMasterNet()
|
||||
driverRpOcc.getEntity().setNet( deepDriverNet )
|
||||
RoutingPad.create( self.net, driverRpOcc, RoutingPad.BiggestArea )
|
||||
self.root.setRootDriver( self.net )
|
||||
trace( 550, '\tRoot input: {}\n'.format(self.root.bInputPlug) )
|
|
@ -16,6 +16,7 @@
|
|||
import sys
|
||||
import os.path
|
||||
import Cfg
|
||||
from operator import itemgetter
|
||||
from Hurricane import Breakpoint
|
||||
from Hurricane import DbU
|
||||
from Hurricane import Box
|
||||
|
@ -59,6 +60,7 @@ class BufferPool ( object ):
|
|||
self.quadTree = quadTree
|
||||
self.columns = quadTree.spares.state.bColumns
|
||||
self.rows = quadTree.spares.state.bRows
|
||||
self.area = Box()
|
||||
self.buffers = []
|
||||
self.selectedIndex = None
|
||||
for i in range(self.rows*self.columns):
|
||||
|
@ -119,10 +121,24 @@ class BufferPool ( object ):
|
|||
selectedBuffer[0] |= Spares.USED
|
||||
|
||||
def selectFree ( self ):
|
||||
"""Select the first free buffer available."""
|
||||
"""
|
||||
Select the first free buffer available. Marks the buffer as used
|
||||
and return it's instance. If all buffer in the pool are taken,
|
||||
return ``None``.
|
||||
"""
|
||||
for i in range(self.rows*self.columns):
|
||||
if not (self.buffers[i][0] & Spares.USED):
|
||||
self._select( i, Spares.MARK_USED )
|
||||
trace( 550, '\tUse buffer from pool {}\n'.format(self.quadTree) )
|
||||
return self.buffers[i][1]
|
||||
return None
|
||||
|
||||
def hasFree ( self ):
|
||||
"""Tells if the pool has a free buffer available."""
|
||||
for i in range(self.rows*self.columns):
|
||||
if not (self.buffers[i][0] & Spares.USED):
|
||||
return True
|
||||
return False
|
||||
|
||||
def _createBuffers ( self ):
|
||||
"""Create the matrix of instances buffer."""
|
||||
|
@ -152,8 +168,30 @@ class BufferPool ( object ):
|
|||
instance.setPlacementStatus( Instance.PlacementStatus.FIXED )
|
||||
self.buffers[ index ][1] = instance
|
||||
trace( 550, '\tBuffer[{}]: {} @{}\n'.format(index,self.buffers[index],transf) )
|
||||
blBufAb = self.buffers[ 0][1].getAbutmentBox()
|
||||
trBufAb = self.buffers[-1][1].getAbutmentBox()
|
||||
self.area = Box( blBufAb.getXMin(), blBufAb.getYMin()
|
||||
, trBufAb.getXMax(), trBufAb.getYMax() )
|
||||
trace( 550, '-' )
|
||||
|
||||
|
||||
def _destroyBuffers ( self ):
|
||||
"""Destroy all the buffer instances of the pool."""
|
||||
for flags, buffer in self.buffers:
|
||||
buffer.destroy()
|
||||
|
||||
def showUse ( self, depth ):
|
||||
"""Display the pool occupancy."""
|
||||
count = 0
|
||||
for i in range(self.rows*self.columns):
|
||||
if self.buffers[i][0] & Spares.USED:
|
||||
count += 1
|
||||
#header = '| ' if self.quadTree.isLeaf() else '+ '
|
||||
#print( ' {}{}Pool {}, usage:{}/{}.'.format( ' '*depth
|
||||
# , header
|
||||
# , self.quadTree
|
||||
# , count
|
||||
# , self.rows*self.columns) )
|
||||
return count, self.rows*self.columns
|
||||
|
||||
|
||||
# ----------------------------------------------------------------------------
|
||||
|
@ -179,6 +217,7 @@ class QuadTree ( object ):
|
|||
self.xcut = None
|
||||
self.ycut = None
|
||||
self.parent = parent
|
||||
self.depth = parent.depth+1 if parent else 0
|
||||
self.bl = None
|
||||
self.br = None
|
||||
self.tl = None
|
||||
|
@ -192,6 +231,13 @@ class QuadTree ( object ):
|
|||
else:
|
||||
self.rtag = rtag
|
||||
|
||||
def destroy ( self ):
|
||||
if self.bl: self.bl.destroy()
|
||||
if self.br: self.br.destroy()
|
||||
if self.tl: self.tl.destroy()
|
||||
if self.tr: self.tr.destroy()
|
||||
self.pool._destroyBuffers()
|
||||
|
||||
def __str__ ( self ):
|
||||
s = '<QuadTree [{},{} {},{}] "{}">'.format( DbU.getValueString(self.area.getXMin())
|
||||
, DbU.getValueString(self.area.getYMin())
|
||||
|
@ -200,6 +246,27 @@ class QuadTree ( object ):
|
|||
, self.rtag )
|
||||
return s
|
||||
|
||||
def __eq__ ( self, other ):
|
||||
return self.rtag == other.rtag
|
||||
|
||||
def rshowPoolUse ( self ):
|
||||
rused = 0
|
||||
rtotal = 0
|
||||
if not self.depth:
|
||||
print( ' o Detailed use of spare buffers.' )
|
||||
used, total = self.pool.showUse( self.depth )
|
||||
rused += used
|
||||
rtotal += total
|
||||
for leaf in self.leafs:
|
||||
used, total = leaf.rshowPoolUse()
|
||||
rused += used
|
||||
rtotal += total
|
||||
if not self.depth:
|
||||
if rtotal:
|
||||
print( ' - Useds: {}, total: {} ({:.1%}).' \
|
||||
.format(rused,rtotal,float(rused)/float(rtotal)) )
|
||||
return rused, rtotal
|
||||
|
||||
@property
|
||||
def leafs ( self ):
|
||||
activeLeafs = []
|
||||
|
@ -225,6 +292,20 @@ class QuadTree ( object ):
|
|||
if leaf is not None: return False
|
||||
return True
|
||||
|
||||
def getParentAt ( self, depth ):
|
||||
"""
|
||||
Return the parent at the given ``depth``. The depth increase starting
|
||||
from the root which is labeled 0. So requesting the parent at a depth
|
||||
superior to the one of the node is an error...
|
||||
"""
|
||||
if self.depth <= depth:
|
||||
raise ErrorMessage( 2, 'QuadTree.getParentAt(): Parent depth must be lower than current depth ({} vs. {})' \
|
||||
.format(depth,self.depth) )
|
||||
parent = self.parent
|
||||
while parent.depth > depth:
|
||||
parent = parent.parent
|
||||
return parent
|
||||
|
||||
@property
|
||||
def buffer ( self ):
|
||||
"""The the currently selected buffer instance in the pool."""
|
||||
|
@ -250,6 +331,13 @@ class QuadTree ( object ):
|
|||
modulo = (x - self.area.getXMin()) % self.spares.state.gaugeConf.sliceStep
|
||||
return x - modulo
|
||||
|
||||
def selectFree ( self ):
|
||||
"""
|
||||
Returns the first free buffer *instance* in the pool or None if
|
||||
there isn't any left.
|
||||
"""
|
||||
return self.pool.selectFree()
|
||||
|
||||
def connectBuffer ( self, doLeaf=False ):
|
||||
"""
|
||||
Create output nets for the currently selected buffer, if they do not
|
||||
|
@ -392,7 +480,7 @@ class QuadTree ( object ):
|
|||
trace( 550, '-' )
|
||||
|
||||
def getLeafUnder ( self, position ):
|
||||
"""Find the QuadTree leaf under `position`."""
|
||||
"""Find the QuadTree leaf under ``position``."""
|
||||
if self.isLeaf(): return self
|
||||
if self.isHBipart():
|
||||
if position.getX() < self.xcut: return self.bl.getLeafUnder(position)
|
||||
|
@ -406,6 +494,29 @@ class QuadTree ( object ):
|
|||
if position.getY() < self.ycut: return self.br.getLeafUnder(position)
|
||||
return self.tr.getLeafUnder(position)
|
||||
|
||||
def getFreeLeafUnder ( self, area, attractor=None ):
|
||||
"""
|
||||
Find a free buffer under the given ``area`` the candidates are sorted
|
||||
according to their distance to the ``attractor`` point, the closest is
|
||||
returned. If no ``attractor`` is supplied, the center of the ``area``
|
||||
id used. This function is a frontend to ``_getFreeLeafUnder()``.
|
||||
"""
|
||||
candidates = []
|
||||
self._getFreeLeafUnder( area, candidates, attractor )
|
||||
if not len(candidates):
|
||||
return None
|
||||
candidates.sort( key=itemgetter(1) )
|
||||
return candidates[0][0]
|
||||
|
||||
def _getFreeLeafUnder ( self, area, candidates, attractor ):
|
||||
"""Find a free buffer under the given ``area``. See ``getFreeLeafUnder()``."""
|
||||
if self.pool.hasFree():
|
||||
point = area.getCenter() if attractor is None else attractor
|
||||
candidates.append( [ self, self.area.getCenter().manhattanDistance(point) ] )
|
||||
for leaf in self.leafs:
|
||||
if leaf.area.intersect(area):
|
||||
leaf._getFreeLeafUnder( area, candidates, attractor )
|
||||
|
||||
def attachToLeaf ( self, plugOccurrence ):
|
||||
"""Assign the plug occurrence to the QuadTree leaf it is under."""
|
||||
position = plugOccurrence.getBoundingBox().getCenter()
|
||||
|
@ -476,9 +587,19 @@ class Spares ( object ):
|
|||
MARK_USED = 0x00020000
|
||||
|
||||
def __init__ ( self, block ):
|
||||
self.state = block.state
|
||||
self.quadTree = None
|
||||
self.cloneds = []
|
||||
self.state = block.state
|
||||
self.quadTree = None
|
||||
self.cloneds = []
|
||||
self.strayBuffers = []
|
||||
|
||||
def reset ( self ):
|
||||
self.quadTree.destroy()
|
||||
for buffer in self.strayBuffers:
|
||||
buffer.destroy()
|
||||
self.quadTree = None
|
||||
self.cloneds = []
|
||||
self.strayBuffers = []
|
||||
self.state.resetBufferCount()
|
||||
|
||||
def getSpareSpaceMargin ( self ):
|
||||
"""
|
||||
|
@ -494,7 +615,7 @@ class Spares ( object ):
|
|||
bufferLength = self.state.bufferConf.width * self.state.bColumns * self.state.bRows
|
||||
if not areaLength:
|
||||
raise ErrorMessage( 3, 'Spares.getSpareSpaceMargin(): Spare leaf area is zero.' )
|
||||
return float(bufferLength) / float(areaLength)
|
||||
return (float(bufferLength) * 1.3) / float(areaLength)
|
||||
|
||||
def toXGCellGrid ( self, x ):
|
||||
"""Find the nearest X (inferior) on the Cell gauge grid (sliceStep)."""
|
||||
|
@ -513,6 +634,46 @@ class Spares ( object ):
|
|||
self.quadTree = QuadTree.create( self )
|
||||
trace( 550, '-' )
|
||||
|
||||
def rshowPoolUse ( self ):
|
||||
if self.quadTree:
|
||||
self.quadTree.rshowPoolUse()
|
||||
|
||||
def addStrayBuffer ( self, position ):
|
||||
"""Add a new stray buffer at ``position``."""
|
||||
trace( 550, ',+', '\tSpares.addStrayBuffer()\n' )
|
||||
|
||||
sliceHeight = self.state.gaugeConf.sliceHeight
|
||||
x = self.quadTree.onXPitch( position.getX() )
|
||||
y = self.quadTree.onYSlice( position.getY() )
|
||||
slice = y / sliceHeight
|
||||
orientation = Transformation.Orientation.ID
|
||||
y = slice * sliceHeight
|
||||
if slice % 2:
|
||||
orientation = Transformation.Orientation.MY
|
||||
y += sliceHeight
|
||||
transf = Transformation( x, y, orientation )
|
||||
instance = self.state.createBuffer()
|
||||
instance.setTransformation( transf )
|
||||
instance.setPlacementStatus( Instance.PlacementStatus.FIXED )
|
||||
unoverlapDx = self.quadTree.getUnoverlapDx( instance.getAbutmentBox() )
|
||||
if unoverlapDx:
|
||||
transf = Transformation( x+unoverlapDx, y, orientation )
|
||||
instance.setTransformation( transf )
|
||||
self.strayBuffers.append( instance )
|
||||
trace( 550, '\tBuffer: {} @{}\n'.format(self.strayBuffers[-1],transf) )
|
||||
trace( 550, '-' )
|
||||
return instance
|
||||
|
||||
def getFreeBufferNear ( self, position ):
|
||||
leaf = self.quadTree.getLeafUnder( position )
|
||||
return leaf.selectFree()
|
||||
|
||||
def getFreeBufferUnder ( self, area, attractor=None ):
|
||||
leaf = self.quadTree.getFreeLeafUnder( area, attractor )
|
||||
if leaf is None:
|
||||
raise ErrorMessage( 2, 'Spares.getFreeBufferUnder(): No more free buffers under {}.'.format(area) )
|
||||
return leaf.selectFree()
|
||||
|
||||
def addClonedCell ( self, masterCell ):
|
||||
if not masterCell in self.cloneds: self.cloneds.append( masterCell )
|
||||
return
|
||||
|
|
|
@ -17,6 +17,7 @@ from __future__ import print_function
|
|||
from Hurricane import Breakpoint
|
||||
from Hurricane import Box
|
||||
from Hurricane import Vertical
|
||||
from Hurricane import RoutingPad
|
||||
|
||||
|
||||
def breakpoint ( editor, level, message ):
|
||||
|
@ -81,3 +82,17 @@ def showNet ( cell, netName ):
|
|||
for component in net.getComponents():
|
||||
print( '| {} bb:{}'.format(component, component.getBoundingBox()) )
|
||||
return
|
||||
|
||||
|
||||
def hpathToName ( path ):
|
||||
"""
|
||||
Translate a hierarchical path into a string. This function is to be used
|
||||
when a flattend name is required. Should be VHDL compliant.
|
||||
"""
|
||||
s = ''
|
||||
while not path.isEmpty():
|
||||
head = path.getHeadInstance()
|
||||
path = path.getTailPath()
|
||||
if len(s): s += '_'
|
||||
s += head.getName()
|
||||
return s
|
||||
|
|
Loading…
Reference in New Issue